1178172Simp/*- 2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko 3178172Simp * All rights reserved. 4178172Simp * 5178172Simp * Redistribution and use in source and binary forms, with or without 6178172Simp * modification, are permitted provided that the following conditions 7178172Simp * are met: 8178172Simp * 1. Redistributions of source code must retain the above copyright 9178172Simp * notice, this list of conditions, and the following disclaimer, 10178172Simp * without modification, immediately at the beginning of the file. 11178172Simp * 2. The name of the author may not be used to endorse or promote products 12178172Simp * derived from this software without specific prior written permission. 13178172Simp * 14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24178172Simp * SUCH DAMAGE. 25178172Simp * 26202046Simp * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27178172Simp */ 28178172Simp 29178172Simp#include <sys/cdefs.h> 30178172Simp__FBSDID("$FreeBSD$"); 31178172Simp 32202046Simp/* 33202046Simp * MIPS bus dma support routines 34202046Simp */ 35202046Simp 36178172Simp#include <sys/param.h> 37178172Simp#include <sys/systm.h> 38178172Simp#include <sys/malloc.h> 39178172Simp#include <sys/bus.h> 40178172Simp#include <sys/interrupt.h> 41178172Simp#include <sys/lock.h> 42178172Simp#include <sys/proc.h> 43246713Skib#include <sys/memdesc.h> 44178172Simp#include <sys/mutex.h> 45178172Simp#include <sys/ktr.h> 46178172Simp#include <sys/kernel.h> 47202046Simp#include <sys/sysctl.h> 48246713Skib#include <sys/uio.h> 49178172Simp 50178172Simp#include <vm/vm.h> 51178172Simp#include <vm/vm_page.h> 52178172Simp#include <vm/vm_map.h> 53178172Simp 54178172Simp#include <machine/atomic.h> 55178172Simp#include <machine/bus.h> 56178172Simp#include <machine/cache.h> 57178172Simp#include <machine/cpufunc.h> 58204689Sneel#include <machine/cpuinfo.h> 59202046Simp#include <machine/md_var.h> 60178172Simp 61202046Simp#define MAX_BPAGES 64 62202046Simp#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63202046Simp#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64202046Simp 65202046Simpstruct bounce_zone; 66202046Simp 67178172Simpstruct bus_dma_tag { 68178172Simp bus_dma_tag_t parent; 69178172Simp bus_size_t alignment; 70232356Sjhb bus_addr_t boundary; 71178172Simp bus_addr_t lowaddr; 72178172Simp bus_addr_t highaddr; 73178172Simp bus_dma_filter_t *filter; 74178172Simp void *filterarg; 75178172Simp bus_size_t maxsize; 76178172Simp u_int nsegments; 77178172Simp bus_size_t maxsegsz; 78178172Simp int flags; 79178172Simp int ref_count; 80178172Simp int map_count; 81178172Simp bus_dma_lock_t *lockfunc; 82178172Simp void *lockfuncarg; 83240177Sjhb bus_dma_segment_t *segments; 84202046Simp struct bounce_zone *bounce_zone; 85178172Simp}; 86178172Simp 87202046Simpstruct bounce_page { 88202046Simp vm_offset_t vaddr; /* kva of bounce buffer */ 89202046Simp vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 90202046Simp bus_addr_t busaddr; /* Physical address */ 91202046Simp vm_offset_t datavaddr; /* kva of client data */ 92246713Skib bus_addr_t dataaddr; /* client physical address */ 93202046Simp bus_size_t datacount; /* client data count */ 94202046Simp STAILQ_ENTRY(bounce_page) links; 95202046Simp}; 96202046Simp 97246713Skibstruct sync_list { 98246713Skib vm_offset_t vaddr; /* kva of bounce buffer */ 99246713Skib bus_addr_t busaddr; /* Physical address */ 100246713Skib bus_size_t datacount; /* client data count */ 101246713Skib}; 102246713Skib 103202046Simpint busdma_swi_pending; 104202046Simp 105202046Simpstruct bounce_zone { 106202046Simp STAILQ_ENTRY(bounce_zone) links; 107202046Simp STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 108202046Simp int total_bpages; 109202046Simp int free_bpages; 110202046Simp int reserved_bpages; 111202046Simp int active_bpages; 112202046Simp int total_bounced; 113202046Simp int total_deferred; 114202046Simp int map_count; 115202046Simp bus_size_t alignment; 116202046Simp bus_addr_t lowaddr; 117202046Simp char zoneid[8]; 118202046Simp char lowaddrid[20]; 119202046Simp struct sysctl_ctx_list sysctl_tree; 120202046Simp struct sysctl_oid *sysctl_tree_top; 121202046Simp}; 122202046Simp 123202046Simpstatic struct mtx bounce_lock; 124202046Simpstatic int total_bpages; 125202046Simpstatic int busdma_zonecount; 126202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list; 127202046Simp 128227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 129202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 130202046Simp "Total bounce pages"); 131202046Simp 132204689Sneel#define DMAMAP_UNCACHEABLE 0x8 133202046Simp#define DMAMAP_ALLOCATED 0x10 134202046Simp#define DMAMAP_MALLOCUSED 0x20 135202046Simp 136178172Simpstruct bus_dmamap { 137202046Simp struct bp_list bpages; 138202046Simp int pagesneeded; 139202046Simp int pagesreserved; 140212284Sjchandra bus_dma_tag_t dmat; 141246713Skib struct memdesc mem; 142178172Simp int flags; 143178172Simp void *origbuffer; 144178172Simp void *allocbuffer; 145178172Simp TAILQ_ENTRY(bus_dmamap) freelist; 146202046Simp STAILQ_ENTRY(bus_dmamap) links; 147202046Simp bus_dmamap_callback_t *callback; 148212284Sjchandra void *callback_arg; 149246713Skib int sync_count; 150246713Skib struct sync_list *slist; 151178172Simp}; 152178172Simp 153202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 154202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 155202046Simp 156178172Simpstatic TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 157178172Simp TAILQ_HEAD_INITIALIZER(dmamap_freelist); 158178172Simp 159242466Sadrian#define BUSDMA_STATIC_MAPS 128 160178172Simpstatic struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 161178172Simp 162178172Simpstatic struct mtx busdma_mtx; 163178172Simp 164178172SimpMTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 165178172Simp 166202046Simpstatic void init_bounce_pages(void *dummy); 167202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat); 168202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 169202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 170202046Simp int commit); 171202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 172246713Skib vm_offset_t vaddr, bus_addr_t addr, 173246713Skib bus_size_t size); 174202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 175202046Simp 176202046Simp/* Default tag, as most drivers provide no parent tag. */ 177202046Simpbus_dma_tag_t mips_root_dma_tag; 178202046Simp 179202046Simp/* 180202046Simp * Return true if a match is made. 181202046Simp * 182202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 183202046Simp * 184202046Simp * If paddr is within the bounds of the dma tag then call the filter callback 185202046Simp * to check for a match, if there is no filter callback then assume a match. 186202046Simp */ 187202046Simpstatic int 188202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 189202046Simp{ 190202046Simp int retval; 191202046Simp 192202046Simp retval = 0; 193202046Simp 194202046Simp do { 195202046Simp if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 196202046Simp || ((paddr & (dmat->alignment - 1)) != 0)) 197202046Simp && (dmat->filter == NULL 198202046Simp || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 199202046Simp retval = 1; 200202046Simp 201202046Simp dmat = dmat->parent; 202202046Simp } while (retval == 0 && dmat != NULL); 203202046Simp return (retval); 204202046Simp} 205202046Simp 206178172Simpstatic void 207178172Simpmips_dmamap_freelist_init(void *dummy) 208178172Simp{ 209178172Simp int i; 210178172Simp 211178172Simp for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 212178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 213178172Simp} 214178172Simp 215178172SimpSYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 216178172Simp 217178172Simp/* 218178172Simp * Check to see if the specified page is in an allowed DMA range. 219178172Simp */ 220178172Simp 221178172Simpstatic __inline int 222202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223202046Simp{ 224202046Simp int i; 225202046Simp for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226202046Simp if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227202046Simp || (lowaddr < phys_avail[i] && 228202046Simp highaddr > phys_avail[i])) 229202046Simp return (1); 230202046Simp } 231202046Simp return (0); 232202046Simp} 233202046Simp 234178172Simp/* 235178172Simp * Convenience function for manipulating driver locks from busdma (during 236178172Simp * busdma_swi, for example). Drivers that don't provide their own locks 237178172Simp * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238178172Simp * non-mutex locking scheme don't have to use this at all. 239178172Simp */ 240178172Simpvoid 241178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242178172Simp{ 243178172Simp struct mtx *dmtx; 244178172Simp 245178172Simp dmtx = (struct mtx *)arg; 246178172Simp switch (op) { 247178172Simp case BUS_DMA_LOCK: 248178172Simp mtx_lock(dmtx); 249178172Simp break; 250178172Simp case BUS_DMA_UNLOCK: 251178172Simp mtx_unlock(dmtx); 252178172Simp break; 253178172Simp default: 254178172Simp panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255178172Simp } 256178172Simp} 257178172Simp 258178172Simp/* 259178172Simp * dflt_lock should never get called. It gets put into the dma tag when 260178172Simp * lockfunc == NULL, which is only valid if the maps that are associated 261178172Simp * with the tag are meant to never be defered. 262178172Simp * XXX Should have a way to identify which driver is responsible here. 263178172Simp */ 264178172Simpstatic void 265178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op) 266178172Simp{ 267178172Simp#ifdef INVARIANTS 268178172Simp panic("driver error: busdma dflt_lock called"); 269178172Simp#else 270178172Simp printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271178172Simp#endif 272178172Simp} 273178172Simp 274178172Simpstatic __inline bus_dmamap_t 275246713Skib_busdma_alloc_dmamap(bus_dma_tag_t dmat) 276178172Simp{ 277246713Skib struct sync_list *slist; 278178172Simp bus_dmamap_t map; 279178172Simp 280246713Skib slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 281246713Skib if (slist == NULL) 282246713Skib return (NULL); 283178172Simp mtx_lock(&busdma_mtx); 284178172Simp map = TAILQ_FIRST(&dmamap_freelist); 285178172Simp if (map) 286178172Simp TAILQ_REMOVE(&dmamap_freelist, map, freelist); 287178172Simp mtx_unlock(&busdma_mtx); 288178172Simp if (!map) { 289178172Simp map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 290178172Simp if (map) 291178172Simp map->flags = DMAMAP_ALLOCATED; 292178172Simp } else 293178172Simp map->flags = 0; 294246713Skib if (map != NULL) { 295246713Skib STAILQ_INIT(&map->bpages); 296246713Skib map->slist = slist; 297246713Skib } else 298246713Skib free(slist, M_DEVBUF); 299178172Simp return (map); 300178172Simp} 301178172Simp 302178172Simpstatic __inline void 303178172Simp_busdma_free_dmamap(bus_dmamap_t map) 304178172Simp{ 305246713Skib free(map->slist, M_DEVBUF); 306178172Simp if (map->flags & DMAMAP_ALLOCATED) 307178172Simp free(map, M_DEVBUF); 308178172Simp else { 309178172Simp mtx_lock(&busdma_mtx); 310178172Simp TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 311178172Simp mtx_unlock(&busdma_mtx); 312178172Simp } 313178172Simp} 314178172Simp 315202046Simp/* 316202046Simp * Allocate a device specific dma_tag. 317202046Simp */ 318202046Simp#define SEG_NB 1024 319202046Simp 320178172Simpint 321178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 322232356Sjhb bus_addr_t boundary, bus_addr_t lowaddr, 323212284Sjchandra bus_addr_t highaddr, bus_dma_filter_t *filter, 324212284Sjchandra void *filterarg, bus_size_t maxsize, int nsegments, 325212284Sjchandra bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 326212284Sjchandra void *lockfuncarg, bus_dma_tag_t *dmat) 327178172Simp{ 328178172Simp bus_dma_tag_t newtag; 329178172Simp int error = 0; 330178172Simp /* Return a NULL tag on failure */ 331178172Simp *dmat = NULL; 332202046Simp if (!parent) 333202046Simp parent = mips_root_dma_tag; 334178172Simp 335202046Simp newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 336178172Simp if (newtag == NULL) { 337178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 338178172Simp __func__, newtag, 0, error); 339178172Simp return (ENOMEM); 340178172Simp } 341178172Simp 342178172Simp newtag->parent = parent; 343178172Simp newtag->alignment = alignment; 344178172Simp newtag->boundary = boundary; 345202046Simp newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 346202046Simp newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 347178172Simp newtag->filter = filter; 348178172Simp newtag->filterarg = filterarg; 349212284Sjchandra newtag->maxsize = maxsize; 350212284Sjchandra newtag->nsegments = nsegments; 351178172Simp newtag->maxsegsz = maxsegsz; 352178172Simp newtag->flags = flags; 353204689Sneel if (cpuinfo.cache_coherent_dma) 354204689Sneel newtag->flags |= BUS_DMA_COHERENT; 355178172Simp newtag->ref_count = 1; /* Count ourself */ 356178172Simp newtag->map_count = 0; 357178172Simp if (lockfunc != NULL) { 358178172Simp newtag->lockfunc = lockfunc; 359178172Simp newtag->lockfuncarg = lockfuncarg; 360178172Simp } else { 361178172Simp newtag->lockfunc = dflt_lock; 362178172Simp newtag->lockfuncarg = NULL; 363178172Simp } 364240177Sjhb newtag->segments = NULL; 365240177Sjhb 366212284Sjchandra /* 367202046Simp * Take into account any restrictions imposed by our parent tag 368202046Simp */ 369212284Sjchandra if (parent != NULL) { 370232356Sjhb newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 371232356Sjhb newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 372178172Simp if (newtag->boundary == 0) 373178172Simp newtag->boundary = parent->boundary; 374178172Simp else if (parent->boundary != 0) 375212284Sjchandra newtag->boundary = 376232356Sjhb MIN(parent->boundary, newtag->boundary); 377202046Simp if ((newtag->filter != NULL) || 378202046Simp ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 379202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 380212284Sjchandra if (newtag->filter == NULL) { 381212284Sjchandra /* 382212284Sjchandra * Short circuit looking at our parent directly 383212284Sjchandra * since we have encapsulated all of its information 384212284Sjchandra */ 385212284Sjchandra newtag->filter = parent->filter; 386212284Sjchandra newtag->filterarg = parent->filterarg; 387212284Sjchandra newtag->parent = parent->parent; 388178172Simp } 389178172Simp if (newtag->parent != NULL) 390178172Simp atomic_add_int(&parent->ref_count, 1); 391178172Simp } 392202046Simp if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 393202046Simp || newtag->alignment > 1) 394202046Simp newtag->flags |= BUS_DMA_COULD_BOUNCE; 395178172Simp 396202046Simp if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 397202046Simp (flags & BUS_DMA_ALLOCNOW) != 0) { 398202046Simp struct bounce_zone *bz; 399202046Simp 400202046Simp /* Must bounce */ 401202046Simp 402202046Simp if ((error = alloc_bounce_zone(newtag)) != 0) { 403202046Simp free(newtag, M_DEVBUF); 404202046Simp return (error); 405202046Simp } 406202046Simp bz = newtag->bounce_zone; 407202046Simp 408202046Simp if (ptoa(bz->total_bpages) < maxsize) { 409202046Simp int pages; 410202046Simp 411202046Simp pages = atop(maxsize) - bz->total_bpages; 412202046Simp 413202046Simp /* Add pages to our bounce pool */ 414202046Simp if (alloc_bounce_pages(newtag, pages) < pages) 415202046Simp error = ENOMEM; 416202046Simp } 417202046Simp /* Performed initial allocation */ 418202046Simp newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 419202046Simp } else 420202046Simp newtag->bounce_zone = NULL; 421202046Simp if (error != 0) 422178172Simp free(newtag, M_DEVBUF); 423202046Simp else 424178172Simp *dmat = newtag; 425178172Simp CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 426178172Simp __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 427202046Simp 428178172Simp return (error); 429178172Simp} 430178172Simp 431178172Simpint 432178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat) 433178172Simp{ 434178172Simp#ifdef KTR 435178172Simp bus_dma_tag_t dmat_copy = dmat; 436178172Simp#endif 437178172Simp 438178172Simp if (dmat != NULL) { 439212284Sjchandra if (dmat->map_count != 0) 440212284Sjchandra return (EBUSY); 441178172Simp 442212284Sjchandra while (dmat != NULL) { 443212284Sjchandra bus_dma_tag_t parent; 444178172Simp 445212284Sjchandra parent = dmat->parent; 446212284Sjchandra atomic_subtract_int(&dmat->ref_count, 1); 447212284Sjchandra if (dmat->ref_count == 0) { 448240177Sjhb if (dmat->segments != NULL) 449240177Sjhb free(dmat->segments, M_DEVBUF); 450212284Sjchandra free(dmat, M_DEVBUF); 451212284Sjchandra /* 452212284Sjchandra * Last reference count, so 453212284Sjchandra * release our reference 454212284Sjchandra * count on our parent. 455212284Sjchandra */ 456212284Sjchandra dmat = parent; 457212284Sjchandra } else 458240177Sjhb dmat = NULL; 459212284Sjchandra } 460212284Sjchandra } 461178172Simp CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 462178172Simp 463212284Sjchandra return (0); 464178172Simp} 465178172Simp 466202046Simp#include <sys/kdb.h> 467178172Simp/* 468178172Simp * Allocate a handle for mapping from kva/uva/physical 469178172Simp * address space into bus device space. 470178172Simp */ 471178172Simpint 472178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 473178172Simp{ 474178172Simp bus_dmamap_t newmap; 475178172Simp int error = 0; 476178172Simp 477240177Sjhb if (dmat->segments == NULL) { 478240177Sjhb dmat->segments = (bus_dma_segment_t *)malloc( 479240177Sjhb sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 480240177Sjhb M_NOWAIT); 481240177Sjhb if (dmat->segments == NULL) { 482240177Sjhb CTR3(KTR_BUSDMA, "%s: tag %p error %d", 483240177Sjhb __func__, dmat, ENOMEM); 484240177Sjhb return (ENOMEM); 485240177Sjhb } 486240177Sjhb } 487240177Sjhb 488246713Skib newmap = _busdma_alloc_dmamap(dmat); 489178172Simp if (newmap == NULL) { 490178172Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 491178172Simp return (ENOMEM); 492178172Simp } 493178172Simp *mapp = newmap; 494178172Simp newmap->dmat = dmat; 495202046Simp newmap->allocbuffer = NULL; 496246713Skib newmap->sync_count = 0; 497178172Simp dmat->map_count++; 498178172Simp 499202046Simp /* 500202046Simp * Bouncing might be required if the driver asks for an active 501202046Simp * exclusion region, a data alignment that is stricter than 1, and/or 502202046Simp * an active address boundary. 503202046Simp */ 504202046Simp if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 505202046Simp 506202046Simp /* Must bounce */ 507202046Simp struct bounce_zone *bz; 508202046Simp int maxpages; 509202046Simp 510202046Simp if (dmat->bounce_zone == NULL) { 511202046Simp if ((error = alloc_bounce_zone(dmat)) != 0) { 512202046Simp _busdma_free_dmamap(newmap); 513202046Simp *mapp = NULL; 514202046Simp return (error); 515202046Simp } 516202046Simp } 517202046Simp bz = dmat->bounce_zone; 518202046Simp 519202046Simp /* Initialize the new map */ 520202046Simp STAILQ_INIT(&((*mapp)->bpages)); 521202046Simp 522202046Simp /* 523202046Simp * Attempt to add pages to our pool on a per-instance 524202046Simp * basis up to a sane limit. 525202046Simp */ 526202046Simp maxpages = MAX_BPAGES; 527202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 528202046Simp || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 529202046Simp int pages; 530202046Simp 531202046Simp pages = MAX(atop(dmat->maxsize), 1); 532202046Simp pages = MIN(maxpages - bz->total_bpages, pages); 533202046Simp pages = MAX(pages, 1); 534202046Simp if (alloc_bounce_pages(dmat, pages) < pages) 535202046Simp error = ENOMEM; 536202046Simp 537202046Simp if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 538202046Simp if (error == 0) 539202046Simp dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 540202046Simp } else { 541202046Simp error = 0; 542202046Simp } 543202046Simp } 544202046Simp bz->map_count++; 545202046Simp } 546202046Simp 547178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 548178172Simp __func__, dmat, dmat->flags, error); 549178172Simp 550178172Simp return (0); 551178172Simp} 552178172Simp 553178172Simp/* 554178172Simp * Destroy a handle for mapping from kva/uva/physical 555178172Simp * address space into bus device space. 556178172Simp */ 557178172Simpint 558178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 559178172Simp{ 560202046Simp 561246713Skib if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 562202046Simp CTR3(KTR_BUSDMA, "%s: tag %p error %d", 563202046Simp __func__, dmat, EBUSY); 564202046Simp return (EBUSY); 565202046Simp } 566202046Simp if (dmat->bounce_zone) 567202046Simp dmat->bounce_zone->map_count--; 568178172Simp dmat->map_count--; 569242465Sadrian _busdma_free_dmamap(map); 570178172Simp CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 571178172Simp return (0); 572178172Simp} 573178172Simp 574178172Simp/* 575178172Simp * Allocate a piece of memory that can be efficiently mapped into 576178172Simp * bus device space based on the constraints lited in the dma tag. 577178172Simp * A dmamap to for use with dmamap_load is also allocated. 578178172Simp */ 579178172Simpint 580178172Simpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 581212284Sjchandra bus_dmamap_t *mapp) 582178172Simp{ 583178172Simp bus_dmamap_t newmap = NULL; 584178172Simp 585178172Simp int mflags; 586178172Simp 587178172Simp if (flags & BUS_DMA_NOWAIT) 588178172Simp mflags = M_NOWAIT; 589178172Simp else 590178172Simp mflags = M_WAITOK; 591240177Sjhb if (dmat->segments == NULL) { 592240177Sjhb dmat->segments = (bus_dma_segment_t *)malloc( 593240177Sjhb sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 594240177Sjhb mflags); 595240177Sjhb if (dmat->segments == NULL) { 596240177Sjhb CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 597240177Sjhb __func__, dmat, dmat->flags, ENOMEM); 598240177Sjhb return (ENOMEM); 599240177Sjhb } 600240177Sjhb } 601178172Simp if (flags & BUS_DMA_ZERO) 602178172Simp mflags |= M_ZERO; 603178172Simp 604246713Skib newmap = _busdma_alloc_dmamap(dmat); 605178172Simp if (newmap == NULL) { 606178172Simp CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 607178172Simp __func__, dmat, dmat->flags, ENOMEM); 608178172Simp return (ENOMEM); 609178172Simp } 610178172Simp dmat->map_count++; 611178172Simp *mapp = newmap; 612178172Simp newmap->dmat = dmat; 613246713Skib newmap->sync_count = 0; 614202046Simp 615204689Sneel /* 616204689Sneel * If all the memory is coherent with DMA then we don't need to 617204689Sneel * do anything special for a coherent mapping request. 618204689Sneel */ 619204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 620204689Sneel flags &= ~BUS_DMA_COHERENT; 621204689Sneel 622204689Sneel /* 623204689Sneel * Allocate uncacheable memory if all else fails. 624204689Sneel */ 625202046Simp if (flags & BUS_DMA_COHERENT) 626204689Sneel newmap->flags |= DMAMAP_UNCACHEABLE; 627204689Sneel 628212284Sjchandra if (dmat->maxsize <= PAGE_SIZE && 629202046Simp (dmat->alignment < dmat->maxsize) && 630202046Simp !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 631204689Sneel !(newmap->flags & DMAMAP_UNCACHEABLE)) { 632178172Simp *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 633202046Simp newmap->flags |= DMAMAP_MALLOCUSED; 634212284Sjchandra } else { 635212284Sjchandra /* 636212284Sjchandra * XXX Use Contigmalloc until it is merged into this facility 637212284Sjchandra * and handles multi-seg allocations. Nobody is doing 638212284Sjchandra * multi-seg allocations yet though. 639212284Sjchandra */ 640212284Sjchandra *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 641212284Sjchandra 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 642212284Sjchandra dmat->boundary); 643212284Sjchandra } 644212284Sjchandra if (*vaddr == NULL) { 645178172Simp if (newmap != NULL) { 646178172Simp _busdma_free_dmamap(newmap); 647178172Simp dmat->map_count--; 648178172Simp } 649178172Simp *mapp = NULL; 650212284Sjchandra return (ENOMEM); 651178172Simp } 652202046Simp 653204689Sneel if (newmap->flags & DMAMAP_UNCACHEABLE) { 654178172Simp void *tmpaddr = (void *)*vaddr; 655178172Simp 656178172Simp if (tmpaddr) { 657212283Sjchandra tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr), 658212283Sjchandra dmat->maxsize); 659178172Simp newmap->origbuffer = *vaddr; 660178172Simp newmap->allocbuffer = tmpaddr; 661178172Simp mips_dcache_wbinv_range((vm_offset_t)*vaddr, 662178172Simp dmat->maxsize); 663178172Simp *vaddr = tmpaddr; 664178172Simp } else 665178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 666202046Simp } else 667178172Simp newmap->origbuffer = newmap->allocbuffer = NULL; 668202046Simp 669212284Sjchandra return (0); 670178172Simp} 671178172Simp 672178172Simp/* 673178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated 674178172Simp * via bus_dmamem_alloc. Make the same choice for free/contigfree. 675178172Simp */ 676178172Simpvoid 677178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 678178172Simp{ 679178172Simp if (map->allocbuffer) { 680178172Simp KASSERT(map->allocbuffer == vaddr, 681178172Simp ("Trying to freeing the wrong DMA buffer")); 682178172Simp vaddr = map->origbuffer; 683178172Simp } 684202046Simp 685212283Sjchandra if (map->flags & DMAMAP_UNCACHEABLE) 686212283Sjchandra pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize); 687212284Sjchandra if (map->flags & DMAMAP_MALLOCUSED) 688178172Simp free(vaddr, M_DEVBUF); 689212284Sjchandra else 690178172Simp contigfree(vaddr, dmat->maxsize, M_DEVBUF); 691202046Simp 692178172Simp dmat->map_count--; 693178172Simp _busdma_free_dmamap(map); 694178172Simp CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 695202046Simp} 696178172Simp 697246713Skibstatic void 698246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 699246713Skib bus_size_t buflen, int flags) 700246713Skib{ 701246713Skib bus_addr_t curaddr; 702246713Skib bus_size_t sgsize; 703246713Skib 704246713Skib if ((map->pagesneeded == 0)) { 705246713Skib CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 706246713Skib dmat->lowaddr, dmat->boundary, dmat->alignment); 707246713Skib CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 708246713Skib map, map->pagesneeded); 709246713Skib /* 710246713Skib * Count the number of bounce pages 711246713Skib * needed in order to complete this transfer 712246713Skib */ 713246713Skib curaddr = buf; 714246713Skib while (buflen != 0) { 715246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 716246713Skib if (run_filter(dmat, curaddr) != 0) { 717246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 718246713Skib map->pagesneeded++; 719246713Skib } 720246713Skib curaddr += sgsize; 721246713Skib buflen -= sgsize; 722246713Skib } 723246713Skib CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 724246713Skib } 725246713Skib} 726246713Skib 727246713Skibstatic void 728202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 729202046Simp void *buf, bus_size_t buflen, int flags) 730202046Simp{ 731202046Simp vm_offset_t vaddr; 732202046Simp vm_offset_t vendaddr; 733202046Simp bus_addr_t paddr; 734202046Simp 735202046Simp if ((map->pagesneeded == 0)) { 736202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 737202046Simp dmat->lowaddr, dmat->boundary, dmat->alignment); 738202046Simp CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 739202046Simp map, map->pagesneeded); 740202046Simp /* 741202046Simp * Count the number of bounce pages 742202046Simp * needed in order to complete this transfer 743202046Simp */ 744206405Snwhitehorn vaddr = (vm_offset_t)buf; 745202046Simp vendaddr = (vm_offset_t)buf + buflen; 746202046Simp 747202046Simp while (vaddr < vendaddr) { 748206405Snwhitehorn bus_size_t sg_len; 749206405Snwhitehorn 750202046Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 751206405Snwhitehorn sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 752202046Simp paddr = pmap_kextract(vaddr); 753202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 754206405Snwhitehorn run_filter(dmat, paddr) != 0) { 755206405Snwhitehorn sg_len = roundup2(sg_len, dmat->alignment); 756202046Simp map->pagesneeded++; 757206405Snwhitehorn } 758206405Snwhitehorn vaddr += sg_len; 759202046Simp } 760202046Simp CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 761202046Simp } 762246713Skib} 763202046Simp 764246713Skibstatic int 765246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags) 766246713Skib{ 767246713Skib 768202046Simp /* Reserve Necessary Bounce Pages */ 769246713Skib mtx_lock(&bounce_lock); 770246713Skib if (flags & BUS_DMA_NOWAIT) { 771246713Skib if (reserve_bounce_pages(dmat, map, 0) != 0) { 772246713Skib mtx_unlock(&bounce_lock); 773246713Skib return (ENOMEM); 774202046Simp } 775246713Skib } else { 776246713Skib if (reserve_bounce_pages(dmat, map, 1) != 0) { 777246713Skib /* Queue us for resources */ 778246713Skib STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 779246713Skib map, links); 780246713Skib mtx_unlock(&bounce_lock); 781246713Skib return (EINPROGRESS); 782246713Skib } 783202046Simp } 784246713Skib mtx_unlock(&bounce_lock); 785202046Simp 786202046Simp return (0); 787178172Simp} 788178172Simp 789178172Simp/* 790246713Skib * Add a single contiguous physical range to the segment list. 791246713Skib */ 792246713Skibstatic int 793246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 794246713Skib bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 795246713Skib{ 796246713Skib bus_addr_t baddr, bmask; 797246713Skib int seg; 798246713Skib 799246713Skib /* 800246713Skib * Make sure we don't cross any boundaries. 801246713Skib */ 802246713Skib bmask = ~(dmat->boundary - 1); 803246713Skib if (dmat->boundary > 0) { 804246713Skib baddr = (curaddr + dmat->boundary) & bmask; 805246713Skib if (sgsize > (baddr - curaddr)) 806246713Skib sgsize = (baddr - curaddr); 807246713Skib } 808246713Skib /* 809246713Skib * Insert chunk into a segment, coalescing with 810246713Skib * the previous segment if possible. 811246713Skib */ 812246713Skib seg = *segp; 813246713Skib if (seg >= 0 && 814246713Skib curaddr == segs[seg].ds_addr + segs[seg].ds_len && 815246713Skib (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 816246713Skib (dmat->boundary == 0 || 817246713Skib (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 818246713Skib segs[seg].ds_len += sgsize; 819246713Skib } else { 820246713Skib if (++seg >= dmat->nsegments) 821246713Skib return (0); 822246713Skib segs[seg].ds_addr = curaddr; 823246713Skib segs[seg].ds_len = sgsize; 824246713Skib } 825246713Skib *segp = seg; 826246713Skib return (sgsize); 827246713Skib} 828246713Skib 829246713Skib/* 830246713Skib * Utility function to load a physical buffer. segp contains 831246713Skib * the starting segment on entrace, and the ending segment on exit. 832246713Skib */ 833246713Skibint 834246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 835246713Skib vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 836246713Skib int *segp) 837246713Skib{ 838246713Skib bus_addr_t curaddr; 839246713Skib bus_size_t sgsize; 840246713Skib int error; 841246713Skib 842246713Skib if (segs == NULL) 843246713Skib segs = dmat->segments; 844246713Skib 845246713Skib if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 846246713Skib _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 847246713Skib if (map->pagesneeded != 0) { 848246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 849246713Skib if (error) 850246713Skib return (error); 851246713Skib } 852246713Skib } 853246713Skib 854246713Skib while (buflen > 0) { 855246713Skib curaddr = buf; 856246713Skib sgsize = MIN(buflen, dmat->maxsegsz); 857246713Skib if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 858246713Skib map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 859246713Skib sgsize = MIN(sgsize, PAGE_SIZE); 860246713Skib curaddr = add_bounce_page(dmat, map, 0, curaddr, 861246713Skib sgsize); 862246713Skib } 863246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 864246713Skib segp); 865246713Skib if (sgsize == 0) 866246713Skib break; 867246713Skib buf += sgsize; 868246713Skib buflen -= sgsize; 869246713Skib } 870246713Skib 871246713Skib /* 872246713Skib * Did we fit? 873246713Skib */ 874246713Skib if (buflen != 0) { 875246713Skib _bus_dmamap_unload(dmat, map); 876246713Skib return (EFBIG); /* XXX better return value here? */ 877246713Skib } 878246713Skib return (0); 879246713Skib} 880246713Skib 881246713Skib/* 882246713Skib * Utility function to load a linear buffer. segp contains 883178172Simp * the starting segment on entrance, and the ending segment on exit. 884178172Simp * first indicates if this is the first invocation of this function. 885178172Simp */ 886246713Skibint 887246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 888246713Skib bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 889246713Skib int *segp) 890178172Simp{ 891178172Simp bus_size_t sgsize; 892246713Skib bus_addr_t curaddr; 893246713Skib struct sync_list *sl; 894178172Simp vm_offset_t vaddr = (vm_offset_t)buf; 895178172Simp int error = 0; 896178172Simp 897178172Simp 898246713Skib if (segs == NULL) 899246713Skib segs = dmat->segments; 900246713Skib 901202046Simp if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 902246713Skib _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 903246713Skib if (map->pagesneeded != 0) { 904246713Skib error = _bus_dmamap_reserve_pages(dmat, map, flags); 905246713Skib if (error) 906246713Skib return (error); 907246713Skib } 908202046Simp } 909202046Simp CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 910202046Simp "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 911202046Simp 912246713Skib while (buflen > 0) { 913178172Simp /* 914178172Simp * Get the physical address for this segment. 915202046Simp * 916202046Simp * XXX Don't support checking for coherent mappings 917202046Simp * XXX in user address space. 918178172Simp */ 919178172Simp KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 920178172Simp curaddr = pmap_kextract(vaddr); 921178172Simp 922178172Simp /* 923178172Simp * Compute the segment size, and adjust counts. 924178172Simp */ 925178172Simp sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 926202046Simp if (sgsize > dmat->maxsegsz) 927202046Simp sgsize = dmat->maxsegsz; 928178172Simp if (buflen < sgsize) 929178172Simp sgsize = buflen; 930178172Simp 931202046Simp if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 932202046Simp map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 933246713Skib curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 934246713Skib sgsize); 935178172Simp } else { 936246713Skib sl = &map->slist[map->sync_count - 1]; 937246713Skib if (map->sync_count == 0 || 938246713Skib vaddr != sl->vaddr + sl->datacount) { 939246713Skib if (++map->sync_count > dmat->nsegments) 940246713Skib goto cleanup; 941246713Skib sl++; 942246713Skib sl->vaddr = vaddr; 943246713Skib sl->datacount = sgsize; 944246713Skib sl->busaddr = curaddr; 945246713Skib } else 946246713Skib sl->datacount += sgsize; 947178172Simp } 948246713Skib sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 949246713Skib segp); 950246713Skib if (sgsize == 0) 951178172Simp break; 952178172Simp vaddr += sgsize; 953178172Simp buflen -= sgsize; 954178172Simp } 955178172Simp 956246713Skibcleanup: 957178172Simp /* 958178172Simp * Did we fit? 959178172Simp */ 960246713Skib if (buflen != 0) { 961246713Skib _bus_dmamap_unload(dmat, map); 962202046Simp error = EFBIG; /* XXX better return value here? */ 963246713Skib } 964202046Simp return (error); 965178172Simp} 966178172Simp 967246713Skibvoid 968246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 969246713Skib struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 970178172Simp{ 971178172Simp 972178172Simp KASSERT(dmat != NULL, ("dmatag is NULL")); 973178172Simp KASSERT(map != NULL, ("dmamap is NULL")); 974246713Skib map->mem = *mem; 975202046Simp map->callback = callback; 976202046Simp map->callback_arg = callback_arg; 977178172Simp} 978178172Simp 979246713Skibbus_dma_segment_t * 980246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 981246713Skib bus_dma_segment_t *segs, int nsegs, int error) 982178172Simp{ 983178172Simp 984246713Skib if (segs == NULL) 985246713Skib segs = dmat->segments; 986246713Skib return (segs); 987178172Simp} 988178172Simp 989178172Simp/* 990178172Simp * Release the mapping held by map. 991178172Simp */ 992178172Simpvoid 993178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 994178172Simp{ 995202046Simp struct bounce_page *bpage; 996178172Simp 997202046Simp while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 998202046Simp STAILQ_REMOVE_HEAD(&map->bpages, links); 999202046Simp free_bounce_page(dmat, bpage); 1000202046Simp } 1001246713Skib map->sync_count = 0; 1002178172Simp return; 1003178172Simp} 1004178172Simp 1005202046Simpstatic void 1006246713Skibbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op) 1007178172Simp{ 1008202046Simp char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1009202046Simp vm_offset_t buf_cl, buf_clend; 1010202046Simp vm_size_t size_cl, size_clend; 1011202046Simp int cache_linesize_mask = mips_pdcache_linesize - 1; 1012178172Simp 1013202046Simp /* 1014202046Simp * dcache invalidation operates on cache line aligned addresses 1015202046Simp * and could modify areas of memory that share the same cache line 1016202046Simp * at the beginning and the ending of the buffer. In order to 1017202046Simp * prevent a data loss we save these chunks in temporary buffer 1018202046Simp * before invalidation and restore them afer it 1019202046Simp */ 1020246713Skib buf_cl = buf & ~cache_linesize_mask; 1021246713Skib size_cl = buf & cache_linesize_mask; 1022246713Skib buf_clend = buf + len; 1023202046Simp size_clend = (mips_pdcache_linesize - 1024202046Simp (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1025202046Simp 1026178172Simp switch (op) { 1027202046Simp case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1028202046Simp case BUS_DMASYNC_POSTREAD: 1029202046Simp 1030202046Simp /* 1031202046Simp * Save buffers that might be modified by invalidation 1032202046Simp */ 1033202046Simp if (size_cl) 1034202046Simp memcpy (tmp_cl, (void*)buf_cl, size_cl); 1035202046Simp if (size_clend) 1036202046Simp memcpy (tmp_clend, (void*)buf_clend, size_clend); 1037246713Skib mips_dcache_inv_range(buf, len); 1038202046Simp /* 1039202046Simp * Restore them 1040202046Simp */ 1041202046Simp if (size_cl) 1042202046Simp memcpy ((void*)buf_cl, tmp_cl, size_cl); 1043202046Simp if (size_clend) 1044202046Simp memcpy ((void*)buf_clend, tmp_clend, size_clend); 1045203080Skan /* 1046203080Skan * Copies above have brought corresponding memory 1047203080Skan * cache lines back into dirty state. Write them back 1048203080Skan * out and invalidate affected cache lines again if 1049203080Skan * necessary. 1050203080Skan */ 1051203080Skan if (size_cl) 1052246713Skib mips_dcache_wbinv_range(buf_cl, size_cl); 1053203080Skan if (size_clend && (size_cl == 0 || 1054203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1055246713Skib mips_dcache_wbinv_range(buf_clend, size_clend); 1056202046Simp break; 1057202046Simp 1058178172Simp case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1059246713Skib mips_dcache_wbinv_range(buf_cl, len); 1060178172Simp break; 1061178172Simp 1062178172Simp case BUS_DMASYNC_PREREAD: 1063202046Simp /* 1064202046Simp * Save buffers that might be modified by invalidation 1065202046Simp */ 1066202046Simp if (size_cl) 1067202046Simp memcpy (tmp_cl, (void *)buf_cl, size_cl); 1068202046Simp if (size_clend) 1069202046Simp memcpy (tmp_clend, (void *)buf_clend, size_clend); 1070246713Skib mips_dcache_inv_range(buf, len); 1071202046Simp /* 1072202046Simp * Restore them 1073202046Simp */ 1074202046Simp if (size_cl) 1075202046Simp memcpy ((void *)buf_cl, tmp_cl, size_cl); 1076202046Simp if (size_clend) 1077202046Simp memcpy ((void *)buf_clend, tmp_clend, size_clend); 1078203080Skan /* 1079203080Skan * Copies above have brought corresponding memory 1080203080Skan * cache lines back into dirty state. Write them back 1081203080Skan * out and invalidate affected cache lines again if 1082203080Skan * necessary. 1083203080Skan */ 1084203080Skan if (size_cl) 1085246713Skib mips_dcache_wbinv_range(buf_cl, size_cl); 1086203080Skan if (size_clend && (size_cl == 0 || 1087203080Skan buf_clend - buf_cl > mips_pdcache_linesize)) 1088246713Skib mips_dcache_wbinv_range(buf_clend, size_clend); 1089178172Simp break; 1090178172Simp 1091178172Simp case BUS_DMASYNC_PREWRITE: 1092246713Skib mips_dcache_wb_range(buf, len); 1093178172Simp break; 1094178172Simp } 1095178172Simp} 1096178172Simp 1097202046Simpstatic void 1098202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1099202046Simp{ 1100202046Simp struct bounce_page *bpage; 1101202046Simp 1102202046Simp STAILQ_FOREACH(bpage, &map->bpages, links) { 1103202046Simp if (op & BUS_DMASYNC_PREWRITE) { 1104246713Skib if (bpage->datavaddr != 0) 1105246713Skib bcopy((void *)bpage->datavaddr, 1106246713Skib (void *)(bpage->vaddr_nocache != 0 ? 1107246713Skib bpage->vaddr_nocache : 1108246713Skib bpage->vaddr), 1109246713Skib bpage->datacount); 1110246713Skib else 1111246713Skib physcopyout(bpage->dataaddr, 1112246713Skib (void *)(bpage->vaddr_nocache != 0 ? 1113246713Skib bpage->vaddr_nocache : 1114246713Skib bpage->vaddr), 1115246713Skib bpage->datacount); 1116202046Simp if (bpage->vaddr_nocache == 0) { 1117202046Simp mips_dcache_wb_range(bpage->vaddr, 1118202046Simp bpage->datacount); 1119202046Simp } 1120202046Simp dmat->bounce_zone->total_bounced++; 1121202046Simp } 1122202046Simp if (op & BUS_DMASYNC_POSTREAD) { 1123202046Simp if (bpage->vaddr_nocache == 0) { 1124202046Simp mips_dcache_inv_range(bpage->vaddr, 1125202046Simp bpage->datacount); 1126202046Simp } 1127246713Skib if (bpage->datavaddr != 0) 1128246713Skib bcopy((void *)(bpage->vaddr_nocache != 0 ? 1129246713Skib bpage->vaddr_nocache : bpage->vaddr), 1130246713Skib (void *)bpage->datavaddr, bpage->datacount); 1131246713Skib else 1132246713Skib physcopyin((void *)(bpage->vaddr_nocache != 0 ? 1133246713Skib bpage->vaddr_nocache : bpage->vaddr), 1134246713Skib bpage->dataaddr, bpage->datacount); 1135202046Simp dmat->bounce_zone->total_bounced++; 1136202046Simp } 1137202046Simp } 1138202046Simp} 1139202046Simp 1140178172Simpvoid 1141178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1142178172Simp{ 1143246713Skib struct sync_list *sl, *end; 1144178172Simp 1145202046Simp if (op == BUS_DMASYNC_POSTWRITE) 1146178172Simp return; 1147202046Simp if (STAILQ_FIRST(&map->bpages)) 1148202046Simp _bus_dmamap_sync_bp(dmat, map, op); 1149204689Sneel 1150204689Sneel if (dmat->flags & BUS_DMA_COHERENT) 1151202046Simp return; 1152204689Sneel 1153204689Sneel if (map->flags & DMAMAP_UNCACHEABLE) 1154204689Sneel return; 1155204689Sneel 1156178172Simp CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1157246713Skib if (map->sync_count) { 1158246713Skib end = &map->slist[map->sync_count]; 1159246713Skib for (sl = &map->slist[0]; sl != end; sl++) 1160246713Skib bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op); 1161178172Simp } 1162178172Simp} 1163202046Simp 1164202046Simpstatic void 1165202046Simpinit_bounce_pages(void *dummy __unused) 1166202046Simp{ 1167202046Simp 1168202046Simp total_bpages = 0; 1169202046Simp STAILQ_INIT(&bounce_zone_list); 1170202046Simp STAILQ_INIT(&bounce_map_waitinglist); 1171202046Simp STAILQ_INIT(&bounce_map_callbacklist); 1172202046Simp mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1173202046Simp} 1174202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1175202046Simp 1176202046Simpstatic struct sysctl_ctx_list * 1177202046Simpbusdma_sysctl_tree(struct bounce_zone *bz) 1178202046Simp{ 1179202046Simp return (&bz->sysctl_tree); 1180202046Simp} 1181202046Simp 1182202046Simpstatic struct sysctl_oid * 1183202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz) 1184202046Simp{ 1185202046Simp return (bz->sysctl_tree_top); 1186202046Simp} 1187202046Simp 1188202046Simpstatic int 1189202046Simpalloc_bounce_zone(bus_dma_tag_t dmat) 1190202046Simp{ 1191202046Simp struct bounce_zone *bz; 1192202046Simp 1193202046Simp /* Check to see if we already have a suitable zone */ 1194202046Simp STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1195202046Simp if ((dmat->alignment <= bz->alignment) 1196202046Simp && (dmat->lowaddr >= bz->lowaddr)) { 1197202046Simp dmat->bounce_zone = bz; 1198202046Simp return (0); 1199202046Simp } 1200202046Simp } 1201202046Simp 1202202046Simp if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1203202046Simp M_NOWAIT | M_ZERO)) == NULL) 1204202046Simp return (ENOMEM); 1205202046Simp 1206202046Simp STAILQ_INIT(&bz->bounce_page_list); 1207202046Simp bz->free_bpages = 0; 1208202046Simp bz->reserved_bpages = 0; 1209202046Simp bz->active_bpages = 0; 1210202046Simp bz->lowaddr = dmat->lowaddr; 1211202046Simp bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1212202046Simp bz->map_count = 0; 1213202046Simp snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1214202046Simp busdma_zonecount++; 1215202046Simp snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1216202046Simp STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1217202046Simp dmat->bounce_zone = bz; 1218202046Simp 1219202046Simp sysctl_ctx_init(&bz->sysctl_tree); 1220202046Simp bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1221202046Simp SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1222202046Simp CTLFLAG_RD, 0, ""); 1223202046Simp if (bz->sysctl_tree_top == NULL) { 1224202046Simp sysctl_ctx_free(&bz->sysctl_tree); 1225202046Simp return (0); /* XXX error code? */ 1226202046Simp } 1227202046Simp 1228202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1229202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1230202046Simp "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1231202046Simp "Total bounce pages"); 1232202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1233202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1234202046Simp "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1235202046Simp "Free bounce pages"); 1236202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1237202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1238202046Simp "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1239202046Simp "Reserved bounce pages"); 1240202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1241202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1242202046Simp "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1243202046Simp "Active bounce pages"); 1244202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1245202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1246202046Simp "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1247202046Simp "Total bounce requests"); 1248202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1249202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1250202046Simp "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1251202046Simp "Total bounce requests that were deferred"); 1252202046Simp SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1253202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1254202046Simp "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1255202046Simp SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1256202046Simp SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1257202046Simp "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1258202046Simp 1259202046Simp return (0); 1260202046Simp} 1261202046Simp 1262202046Simpstatic int 1263202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1264202046Simp{ 1265202046Simp struct bounce_zone *bz; 1266202046Simp int count; 1267202046Simp 1268202046Simp bz = dmat->bounce_zone; 1269202046Simp count = 0; 1270202046Simp while (numpages > 0) { 1271202046Simp struct bounce_page *bpage; 1272202046Simp 1273202046Simp bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1274202046Simp M_NOWAIT | M_ZERO); 1275202046Simp 1276202046Simp if (bpage == NULL) 1277202046Simp break; 1278202046Simp bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1279202046Simp M_NOWAIT, 0ul, 1280202046Simp bz->lowaddr, 1281202046Simp PAGE_SIZE, 1282202046Simp 0); 1283202046Simp if (bpage->vaddr == 0) { 1284202046Simp free(bpage, M_DEVBUF); 1285202046Simp break; 1286202046Simp } 1287202046Simp bpage->busaddr = pmap_kextract(bpage->vaddr); 1288202046Simp bpage->vaddr_nocache = 1289212283Sjchandra (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE); 1290202046Simp mtx_lock(&bounce_lock); 1291202046Simp STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1292202046Simp total_bpages++; 1293202046Simp bz->total_bpages++; 1294202046Simp bz->free_bpages++; 1295202046Simp mtx_unlock(&bounce_lock); 1296202046Simp count++; 1297202046Simp numpages--; 1298202046Simp } 1299202046Simp return (count); 1300202046Simp} 1301202046Simp 1302202046Simpstatic int 1303202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1304202046Simp{ 1305202046Simp struct bounce_zone *bz; 1306202046Simp int pages; 1307202046Simp 1308202046Simp mtx_assert(&bounce_lock, MA_OWNED); 1309202046Simp bz = dmat->bounce_zone; 1310202046Simp pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1311202046Simp if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1312202046Simp return (map->pagesneeded - (map->pagesreserved + pages)); 1313202046Simp bz->free_bpages -= pages; 1314202046Simp bz->reserved_bpages += pages; 1315202046Simp map->pagesreserved += pages; 1316202046Simp pages = map->pagesneeded - map->pagesreserved; 1317202046Simp 1318202046Simp return (pages); 1319202046Simp} 1320202046Simp 1321202046Simpstatic bus_addr_t 1322202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1323246713Skib bus_addr_t addr, bus_size_t size) 1324202046Simp{ 1325202046Simp struct bounce_zone *bz; 1326202046Simp struct bounce_page *bpage; 1327202046Simp 1328202046Simp KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1329202046Simp KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1330202046Simp 1331202046Simp bz = dmat->bounce_zone; 1332202046Simp if (map->pagesneeded == 0) 1333202046Simp panic("add_bounce_page: map doesn't need any pages"); 1334202046Simp map->pagesneeded--; 1335202046Simp 1336202046Simp if (map->pagesreserved == 0) 1337202046Simp panic("add_bounce_page: map doesn't need any pages"); 1338202046Simp map->pagesreserved--; 1339202046Simp 1340202046Simp mtx_lock(&bounce_lock); 1341202046Simp bpage = STAILQ_FIRST(&bz->bounce_page_list); 1342202046Simp if (bpage == NULL) 1343202046Simp panic("add_bounce_page: free page list is empty"); 1344202046Simp 1345202046Simp STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1346202046Simp bz->reserved_bpages--; 1347202046Simp bz->active_bpages++; 1348202046Simp mtx_unlock(&bounce_lock); 1349202046Simp 1350202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1351202046Simp /* Page offset needs to be preserved. */ 1352202046Simp bpage->vaddr |= vaddr & PAGE_MASK; 1353202046Simp bpage->busaddr |= vaddr & PAGE_MASK; 1354202046Simp } 1355202046Simp bpage->datavaddr = vaddr; 1356246713Skib bpage->dataaddr = addr; 1357202046Simp bpage->datacount = size; 1358202046Simp STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1359202046Simp return (bpage->busaddr); 1360202046Simp} 1361202046Simp 1362202046Simpstatic void 1363202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1364202046Simp{ 1365202046Simp struct bus_dmamap *map; 1366202046Simp struct bounce_zone *bz; 1367202046Simp 1368202046Simp bz = dmat->bounce_zone; 1369202046Simp bpage->datavaddr = 0; 1370202046Simp bpage->datacount = 0; 1371202046Simp if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1372202046Simp /* 1373202046Simp * Reset the bounce page to start at offset 0. Other uses 1374202046Simp * of this bounce page may need to store a full page of 1375202046Simp * data and/or assume it starts on a page boundary. 1376202046Simp */ 1377202046Simp bpage->vaddr &= ~PAGE_MASK; 1378202046Simp bpage->busaddr &= ~PAGE_MASK; 1379202046Simp } 1380202046Simp 1381202046Simp mtx_lock(&bounce_lock); 1382202046Simp STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1383202046Simp bz->free_bpages++; 1384202046Simp bz->active_bpages--; 1385202046Simp if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1386202046Simp if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1387202046Simp STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1388202046Simp STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1389202046Simp map, links); 1390202046Simp busdma_swi_pending = 1; 1391202046Simp bz->total_deferred++; 1392202046Simp swi_sched(vm_ih, 0); 1393202046Simp } 1394202046Simp } 1395202046Simp mtx_unlock(&bounce_lock); 1396202046Simp} 1397202046Simp 1398202046Simpvoid 1399202046Simpbusdma_swi(void) 1400202046Simp{ 1401202046Simp bus_dma_tag_t dmat; 1402202046Simp struct bus_dmamap *map; 1403202046Simp 1404202046Simp mtx_lock(&bounce_lock); 1405202046Simp while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1406202046Simp STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1407202046Simp mtx_unlock(&bounce_lock); 1408202046Simp dmat = map->dmat; 1409202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1410246713Skib bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1411246713Skib map->callback_arg, BUS_DMA_WAITOK); 1412202046Simp (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1413202046Simp mtx_lock(&bounce_lock); 1414202046Simp } 1415202046Simp mtx_unlock(&bounce_lock); 1416202046Simp} 1417