busdma_bounce.c revision 259511
1/*-
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/x86/x86/busdma_bounce.c 259511 2013-12-17 13:39:50Z kib $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/bus.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/ktr.h>
37#include <sys/lock.h>
38#include <sys/proc.h>
39#include <sys/memdesc.h>
40#include <sys/mutex.h>
41#include <sys/sysctl.h>
42#include <sys/uio.h>
43
44#include <vm/vm.h>
45#include <vm/vm_extern.h>
46#include <vm/vm_kern.h>
47#include <vm/vm_page.h>
48#include <vm/vm_map.h>
49
50#include <machine/atomic.h>
51#include <machine/bus.h>
52#include <machine/md_var.h>
53#include <machine/specialreg.h>
54#include <x86/include/busdma_impl.h>
55
56#ifdef __i386__
57#define MAX_BPAGES 512
58#else
59#define MAX_BPAGES 8192
60#endif
61#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
62#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
63
64struct bounce_zone;
65
66struct bus_dma_tag {
67	struct bus_dma_tag_common common;
68	int		  map_count;
69	bus_dma_segment_t *segments;
70	struct bounce_zone *bounce_zone;
71};
72
73struct bounce_page {
74	vm_offset_t	vaddr;		/* kva of bounce buffer */
75	bus_addr_t	busaddr;	/* Physical address */
76	vm_offset_t	datavaddr;	/* kva of client data */
77	bus_addr_t	dataaddr;	/* client physical address */
78	bus_size_t	datacount;	/* client data count */
79	STAILQ_ENTRY(bounce_page) links;
80};
81
82int busdma_swi_pending;
83
84struct bounce_zone {
85	STAILQ_ENTRY(bounce_zone) links;
86	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
87	int		total_bpages;
88	int		free_bpages;
89	int		reserved_bpages;
90	int		active_bpages;
91	int		total_bounced;
92	int		total_deferred;
93	int		map_count;
94	bus_size_t	alignment;
95	bus_addr_t	lowaddr;
96	char		zoneid[8];
97	char		lowaddrid[20];
98	struct sysctl_ctx_list sysctl_tree;
99	struct sysctl_oid *sysctl_tree_top;
100};
101
102static struct mtx bounce_lock;
103static int total_bpages;
104static int busdma_zonecount;
105static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
106
107static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
108SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
109	   "Total bounce pages");
110
111struct bus_dmamap {
112	struct bp_list	       bpages;
113	int		       pagesneeded;
114	int		       pagesreserved;
115	bus_dma_tag_t	       dmat;
116	struct memdesc	       mem;
117	bus_dmamap_callback_t *callback;
118	void		      *callback_arg;
119	STAILQ_ENTRY(bus_dmamap) links;
120};
121
122static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
123static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
124static struct bus_dmamap nobounce_dmamap, contig_dmamap;
125
126static void init_bounce_pages(void *dummy);
127static int alloc_bounce_zone(bus_dma_tag_t dmat);
128static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
129static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
130				int commit);
131static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
132				  vm_offset_t vaddr, bus_addr_t addr,
133				  bus_size_t size);
134static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
135int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
136static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
137				    pmap_t pmap, void *buf, bus_size_t buflen,
138				    int flags);
139static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
140				   vm_paddr_t buf, bus_size_t buflen,
141				   int flags);
142static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
143				     int flags);
144
145#ifdef XEN
146#undef pmap_kextract
147#define pmap_kextract pmap_kextract_ma
148#endif
149
150/*
151 * Allocate a device specific dma_tag.
152 */
153static int
154bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
155    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
156    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
157    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
158    void *lockfuncarg, bus_dma_tag_t *dmat)
159{
160	bus_dma_tag_t newtag;
161	int error;
162
163	*dmat = NULL;
164	error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
165	    NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
166	    maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
167	    sizeof (struct bus_dma_tag), (void **)&newtag);
168	if (error != 0)
169		return (error);
170
171	newtag->common.impl = &bus_dma_bounce_impl;
172	newtag->map_count = 0;
173	newtag->segments = NULL;
174
175	if (parent != NULL && ((newtag->common.filter != NULL) ||
176	    ((parent->common.flags & BUS_DMA_COULD_BOUNCE) != 0)))
177		newtag->common.flags |= BUS_DMA_COULD_BOUNCE;
178
179	if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
180	    newtag->common.alignment > 1)
181		newtag->common.flags |= BUS_DMA_COULD_BOUNCE;
182
183	if (((newtag->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
184	    (flags & BUS_DMA_ALLOCNOW) != 0) {
185		struct bounce_zone *bz;
186
187		/* Must bounce */
188		if ((error = alloc_bounce_zone(newtag)) != 0) {
189			free(newtag, M_DEVBUF);
190			return (error);
191		}
192		bz = newtag->bounce_zone;
193
194		if (ptoa(bz->total_bpages) < maxsize) {
195			int pages;
196
197			pages = atop(maxsize) - bz->total_bpages;
198
199			/* Add pages to our bounce pool */
200			if (alloc_bounce_pages(newtag, pages) < pages)
201				error = ENOMEM;
202		}
203		/* Performed initial allocation */
204		newtag->common.flags |= BUS_DMA_MIN_ALLOC_COMP;
205	} else
206		error = 0;
207
208	if (error != 0)
209		free(newtag, M_DEVBUF);
210	else
211		*dmat = newtag;
212	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
213	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
214	    error);
215	return (error);
216}
217
218static int
219bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
220{
221	bus_dma_tag_t dmat_copy, parent;
222	int error;
223
224	error = 0;
225	dmat_copy = dmat;
226
227	if (dmat != NULL) {
228		if (dmat->map_count != 0) {
229			error = EBUSY;
230			goto out;
231		}
232		while (dmat != NULL) {
233			parent = (bus_dma_tag_t)dmat->common.parent;
234			atomic_subtract_int(&dmat->common.ref_count, 1);
235			if (dmat->common.ref_count == 0) {
236				if (dmat->segments != NULL)
237					free(dmat->segments, M_DEVBUF);
238				free(dmat, M_DEVBUF);
239				/*
240				 * Last reference count, so
241				 * release our reference
242				 * count on our parent.
243				 */
244				dmat = parent;
245			} else
246				dmat = NULL;
247		}
248	}
249out:
250	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
251	return (error);
252}
253
254/*
255 * Allocate a handle for mapping from kva/uva/physical
256 * address space into bus device space.
257 */
258static int
259bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
260{
261	struct bounce_zone *bz;
262	int error, maxpages, pages;
263
264	error = 0;
265
266	if (dmat->segments == NULL) {
267		dmat->segments = (bus_dma_segment_t *)malloc(
268		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
269		    M_DEVBUF, M_NOWAIT);
270		if (dmat->segments == NULL) {
271			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
272			    __func__, dmat, ENOMEM);
273			return (ENOMEM);
274		}
275	}
276
277	/*
278	 * Bouncing might be required if the driver asks for an active
279	 * exclusion region, a data alignment that is stricter than 1, and/or
280	 * an active address boundary.
281	 */
282	if (dmat->common.flags & BUS_DMA_COULD_BOUNCE) {
283		/* Must bounce */
284		if (dmat->bounce_zone == NULL) {
285			if ((error = alloc_bounce_zone(dmat)) != 0)
286				return (error);
287		}
288		bz = dmat->bounce_zone;
289
290		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
291		    M_NOWAIT | M_ZERO);
292		if (*mapp == NULL) {
293			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
294			    __func__, dmat, ENOMEM);
295			return (ENOMEM);
296		}
297
298		/* Initialize the new map */
299		STAILQ_INIT(&((*mapp)->bpages));
300
301		/*
302		 * Attempt to add pages to our pool on a per-instance
303		 * basis up to a sane limit.
304		 */
305		if (dmat->common.alignment > 1)
306			maxpages = MAX_BPAGES;
307		else
308			maxpages = MIN(MAX_BPAGES, Maxmem -
309			    atop(dmat->common.lowaddr));
310		if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
311		    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
312			pages = MAX(atop(dmat->common.maxsize), 1);
313			pages = MIN(maxpages - bz->total_bpages, pages);
314			pages = MAX(pages, 1);
315			if (alloc_bounce_pages(dmat, pages) < pages)
316				error = ENOMEM;
317			if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP)
318			    == 0) {
319				if (error == 0) {
320					dmat->common.flags |=
321					    BUS_DMA_MIN_ALLOC_COMP;
322				}
323			} else
324				error = 0;
325		}
326		bz->map_count++;
327	} else {
328		*mapp = NULL;
329	}
330	if (error == 0)
331		dmat->map_count++;
332	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
333	    __func__, dmat, dmat->common.flags, error);
334	return (error);
335}
336
337/*
338 * Destroy a handle for mapping from kva/uva/physical
339 * address space into bus device space.
340 */
341static int
342bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
343{
344
345	if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) {
346		if (STAILQ_FIRST(&map->bpages) != NULL) {
347			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
348			    __func__, dmat, EBUSY);
349			return (EBUSY);
350		}
351		if (dmat->bounce_zone)
352			dmat->bounce_zone->map_count--;
353		free(map, M_DEVBUF);
354	}
355	dmat->map_count--;
356	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
357	return (0);
358}
359
360
361/*
362 * Allocate a piece of memory that can be efficiently mapped into
363 * bus device space based on the constraints lited in the dma tag.
364 * A dmamap to for use with dmamap_load is also allocated.
365 */
366static int
367bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
368    bus_dmamap_t *mapp)
369{
370	vm_memattr_t attr;
371	int mflags;
372
373	if (flags & BUS_DMA_NOWAIT)
374		mflags = M_NOWAIT;
375	else
376		mflags = M_WAITOK;
377
378	/* If we succeed, no mapping/bouncing will be required */
379	*mapp = NULL;
380
381	if (dmat->segments == NULL) {
382		dmat->segments = (bus_dma_segment_t *)malloc(
383		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
384		    M_DEVBUF, mflags);
385		if (dmat->segments == NULL) {
386			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
387			    __func__, dmat, dmat->common.flags, ENOMEM);
388			return (ENOMEM);
389		}
390	}
391	if (flags & BUS_DMA_ZERO)
392		mflags |= M_ZERO;
393	if (flags & BUS_DMA_NOCACHE)
394		attr = VM_MEMATTR_UNCACHEABLE;
395	else
396		attr = VM_MEMATTR_DEFAULT;
397
398	/*
399	 * XXX:
400	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
401	 * alignment guarantees of malloc need to be nailed down, and the
402	 * code below should be rewritten to take that into account.
403	 *
404	 * In the meantime, we'll warn the user if malloc gets it wrong.
405	 */
406	if ((dmat->common.maxsize <= PAGE_SIZE) &&
407	   (dmat->common.alignment < dmat->common.maxsize) &&
408	    dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
409	    attr == VM_MEMATTR_DEFAULT) {
410		*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
411	} else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) &&
412	    dmat->common.alignment <= PAGE_SIZE &&
413	    (dmat->common.boundary == 0 ||
414	    dmat->common.boundary >= dmat->common.lowaddr)) {
415		/* Page-based multi-segment allocations allowed */
416		*vaddr = (void *)kmem_alloc_attr(kernel_arena,
417		    dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
418		    attr);
419		*mapp = &contig_dmamap;
420	} else {
421		*vaddr = (void *)kmem_alloc_contig(kernel_arena,
422		    dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
423		    dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
424		    dmat->common.boundary, attr);
425		*mapp = &contig_dmamap;
426	}
427	if (*vaddr == NULL) {
428		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
429		    __func__, dmat, dmat->common.flags, ENOMEM);
430		return (ENOMEM);
431	} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
432		printf("bus_dmamem_alloc failed to align memory properly.\n");
433	}
434	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
435	    __func__, dmat, dmat->common.flags, 0);
436	return (0);
437}
438
439/*
440 * Free a piece of memory and it's allociated dmamap, that was allocated
441 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
442 */
443static void
444bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
445{
446	/*
447	 * dmamem does not need to be bounced, so the map should be
448	 * NULL if malloc() was used and contig_dmamap if
449	 * kmem_alloc_contig() was used.
450	 */
451	if (!(map == NULL || map == &contig_dmamap))
452		panic("bus_dmamem_free: Invalid map freed\n");
453	if (map == NULL)
454		free(vaddr, M_DEVBUF);
455	else
456		kmem_free(kernel_arena, (vm_offset_t)vaddr,
457		    dmat->common.maxsize);
458	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
459	    dmat->common.flags);
460}
461
462static void
463_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
464    bus_size_t buflen, int flags)
465{
466	bus_addr_t curaddr;
467	bus_size_t sgsize;
468
469	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
470		/*
471		 * Count the number of bounce pages
472		 * needed in order to complete this transfer
473		 */
474		curaddr = buf;
475		while (buflen != 0) {
476			sgsize = MIN(buflen, dmat->common.maxsegsz);
477			if (bus_dma_run_filter(&dmat->common, curaddr)) {
478				sgsize = MIN(sgsize, PAGE_SIZE);
479				map->pagesneeded++;
480			}
481			curaddr += sgsize;
482			buflen -= sgsize;
483		}
484		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
485	}
486}
487
488static void
489_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
490    void *buf, bus_size_t buflen, int flags)
491{
492	vm_offset_t vaddr;
493	vm_offset_t vendaddr;
494	bus_addr_t paddr;
495	bus_size_t sg_len;
496
497	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
498		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
499		    "alignment= %d", dmat->common.lowaddr,
500		    ptoa((vm_paddr_t)Maxmem),
501		    dmat->common.boundary, dmat->common.alignment);
502		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
503		    map, &nobounce_dmamap, map->pagesneeded);
504		/*
505		 * Count the number of bounce pages
506		 * needed in order to complete this transfer
507		 */
508		vaddr = (vm_offset_t)buf;
509		vendaddr = (vm_offset_t)buf + buflen;
510
511		while (vaddr < vendaddr) {
512			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
513			if (pmap == kernel_pmap)
514				paddr = pmap_kextract(vaddr);
515			else
516				paddr = pmap_extract(pmap, vaddr);
517			if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
518				sg_len = roundup2(sg_len,
519				    dmat->common.alignment);
520				map->pagesneeded++;
521			}
522			vaddr += sg_len;
523		}
524		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
525	}
526}
527
528static int
529_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
530{
531
532	/* Reserve Necessary Bounce Pages */
533	mtx_lock(&bounce_lock);
534	if (flags & BUS_DMA_NOWAIT) {
535		if (reserve_bounce_pages(dmat, map, 0) != 0) {
536			mtx_unlock(&bounce_lock);
537			return (ENOMEM);
538		}
539	} else {
540		if (reserve_bounce_pages(dmat, map, 1) != 0) {
541			/* Queue us for resources */
542			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
543			mtx_unlock(&bounce_lock);
544			return (EINPROGRESS);
545		}
546	}
547	mtx_unlock(&bounce_lock);
548
549	return (0);
550}
551
552/*
553 * Add a single contiguous physical range to the segment list.
554 */
555static int
556_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
557    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
558{
559	bus_addr_t baddr, bmask;
560	int seg;
561
562	/*
563	 * Make sure we don't cross any boundaries.
564	 */
565	bmask = ~(dmat->common.boundary - 1);
566	if (dmat->common.boundary > 0) {
567		baddr = (curaddr + dmat->common.boundary) & bmask;
568		if (sgsize > (baddr - curaddr))
569			sgsize = (baddr - curaddr);
570	}
571
572	/*
573	 * Insert chunk into a segment, coalescing with
574	 * previous segment if possible.
575	 */
576	seg = *segp;
577	if (seg == -1) {
578		seg = 0;
579		segs[seg].ds_addr = curaddr;
580		segs[seg].ds_len = sgsize;
581	} else {
582		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
583		    (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
584		    (dmat->common.boundary == 0 ||
585		     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
586			segs[seg].ds_len += sgsize;
587		else {
588			if (++seg >= dmat->common.nsegments)
589				return (0);
590			segs[seg].ds_addr = curaddr;
591			segs[seg].ds_len = sgsize;
592		}
593	}
594	*segp = seg;
595	return (sgsize);
596}
597
598/*
599 * Utility function to load a physical buffer.  segp contains
600 * the starting segment on entrace, and the ending segment on exit.
601 */
602static int
603bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
604    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
605    int *segp)
606{
607	bus_size_t sgsize;
608	bus_addr_t curaddr;
609	int error;
610
611	if (map == NULL || map == &contig_dmamap)
612		map = &nobounce_dmamap;
613
614	if (segs == NULL)
615		segs = dmat->segments;
616
617	if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) {
618		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
619		if (map->pagesneeded != 0) {
620			error = _bus_dmamap_reserve_pages(dmat, map, flags);
621			if (error)
622				return (error);
623		}
624	}
625
626	while (buflen > 0) {
627		curaddr = buf;
628		sgsize = MIN(buflen, dmat->common.maxsegsz);
629		if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
630		    map->pagesneeded != 0 &&
631		    bus_dma_run_filter(&dmat->common, curaddr)) {
632			sgsize = MIN(sgsize, PAGE_SIZE);
633			curaddr = add_bounce_page(dmat, map, 0, curaddr,
634			    sgsize);
635		}
636		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
637		    segp);
638		if (sgsize == 0)
639			break;
640		buf += sgsize;
641		buflen -= sgsize;
642	}
643
644	/*
645	 * Did we fit?
646	 */
647	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
648}
649
650/*
651 * Utility function to load a linear buffer.  segp contains
652 * the starting segment on entrace, and the ending segment on exit.
653 */
654static int
655bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
656    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
657    int *segp)
658{
659	bus_size_t sgsize, max_sgsize;
660	bus_addr_t curaddr;
661	vm_offset_t vaddr;
662	int error;
663
664	if (map == NULL || map == &contig_dmamap)
665		map = &nobounce_dmamap;
666
667	if (segs == NULL)
668		segs = dmat->segments;
669
670	if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) {
671		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
672		if (map->pagesneeded != 0) {
673			error = _bus_dmamap_reserve_pages(dmat, map, flags);
674			if (error)
675				return (error);
676		}
677	}
678
679	vaddr = (vm_offset_t)buf;
680	while (buflen > 0) {
681		/*
682		 * Get the physical address for this segment.
683		 */
684		if (pmap == kernel_pmap)
685			curaddr = pmap_kextract(vaddr);
686		else
687			curaddr = pmap_extract(pmap, vaddr);
688
689		/*
690		 * Compute the segment size, and adjust counts.
691		 */
692		max_sgsize = MIN(buflen, dmat->common.maxsegsz);
693		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
694		if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
695		    map->pagesneeded != 0 &&
696		    bus_dma_run_filter(&dmat->common, curaddr)) {
697			sgsize = roundup2(sgsize, dmat->common.alignment);
698			sgsize = MIN(sgsize, max_sgsize);
699			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
700			    sgsize);
701		} else {
702			sgsize = MIN(sgsize, max_sgsize);
703		}
704		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
705		    segp);
706		if (sgsize == 0)
707			break;
708		vaddr += sgsize;
709		buflen -= sgsize;
710	}
711
712	/*
713	 * Did we fit?
714	 */
715	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
716}
717
718static void
719bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
720    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
721{
722
723	if (map == NULL)
724		return;
725	map->mem = *mem;
726	map->dmat = dmat;
727	map->callback = callback;
728	map->callback_arg = callback_arg;
729}
730
731static bus_dma_segment_t *
732bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
733    bus_dma_segment_t *segs, int nsegs, int error)
734{
735
736	if (segs == NULL)
737		segs = dmat->segments;
738	return (segs);
739}
740
741/*
742 * Release the mapping held by map.
743 */
744static void
745bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
746{
747	struct bounce_page *bpage;
748
749	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
750		STAILQ_REMOVE_HEAD(&map->bpages, links);
751		free_bounce_page(dmat, bpage);
752	}
753}
754
755static void
756bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
757    bus_dmasync_op_t op)
758{
759	struct bounce_page *bpage;
760
761	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
762		/*
763		 * Handle data bouncing.  We might also
764		 * want to add support for invalidating
765		 * the caches on broken hardware
766		 */
767		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
768		    "performing bounce", __func__, dmat,
769		    dmat->common.flags, op);
770
771		if ((op & BUS_DMASYNC_PREWRITE) != 0) {
772			while (bpage != NULL) {
773				if (bpage->datavaddr != 0) {
774					bcopy((void *)bpage->datavaddr,
775					    (void *)bpage->vaddr,
776					    bpage->datacount);
777				} else {
778					physcopyout(bpage->dataaddr,
779					    (void *)bpage->vaddr,
780					    bpage->datacount);
781				}
782				bpage = STAILQ_NEXT(bpage, links);
783			}
784			dmat->bounce_zone->total_bounced++;
785		}
786
787		if ((op & BUS_DMASYNC_POSTREAD) != 0) {
788			while (bpage != NULL) {
789				if (bpage->datavaddr != 0) {
790					bcopy((void *)bpage->vaddr,
791					    (void *)bpage->datavaddr,
792					    bpage->datacount);
793				} else {
794					physcopyin((void *)bpage->vaddr,
795					    bpage->dataaddr,
796					    bpage->datacount);
797				}
798				bpage = STAILQ_NEXT(bpage, links);
799			}
800			dmat->bounce_zone->total_bounced++;
801		}
802	}
803}
804
805static void
806init_bounce_pages(void *dummy __unused)
807{
808
809	total_bpages = 0;
810	STAILQ_INIT(&bounce_zone_list);
811	STAILQ_INIT(&bounce_map_waitinglist);
812	STAILQ_INIT(&bounce_map_callbacklist);
813	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
814}
815SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
816
817static struct sysctl_ctx_list *
818busdma_sysctl_tree(struct bounce_zone *bz)
819{
820	return (&bz->sysctl_tree);
821}
822
823static struct sysctl_oid *
824busdma_sysctl_tree_top(struct bounce_zone *bz)
825{
826	return (bz->sysctl_tree_top);
827}
828
829#if defined(__amd64__) || defined(PAE)
830#define	SYSCTL_ADD_BUS_SIZE_T	SYSCTL_ADD_UQUAD
831#else
832#define	SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc)	\
833	SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc)
834#endif
835
836static int
837alloc_bounce_zone(bus_dma_tag_t dmat)
838{
839	struct bounce_zone *bz;
840
841	/* Check to see if we already have a suitable zone */
842	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
843		if ((dmat->common.alignment <= bz->alignment) &&
844		    (dmat->common.lowaddr >= bz->lowaddr)) {
845			dmat->bounce_zone = bz;
846			return (0);
847		}
848	}
849
850	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
851	    M_NOWAIT | M_ZERO)) == NULL)
852		return (ENOMEM);
853
854	STAILQ_INIT(&bz->bounce_page_list);
855	bz->free_bpages = 0;
856	bz->reserved_bpages = 0;
857	bz->active_bpages = 0;
858	bz->lowaddr = dmat->common.lowaddr;
859	bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
860	bz->map_count = 0;
861	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
862	busdma_zonecount++;
863	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
864	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
865	dmat->bounce_zone = bz;
866
867	sysctl_ctx_init(&bz->sysctl_tree);
868	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
869	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
870	    CTLFLAG_RD, 0, "");
871	if (bz->sysctl_tree_top == NULL) {
872		sysctl_ctx_free(&bz->sysctl_tree);
873		return (0);	/* XXX error code? */
874	}
875
876	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
877	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
878	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
879	    "Total bounce pages");
880	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
881	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
882	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
883	    "Free bounce pages");
884	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
885	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
886	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
887	    "Reserved bounce pages");
888	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
889	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
890	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
891	    "Active bounce pages");
892	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
893	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
894	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
895	    "Total bounce requests");
896	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
897	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
898	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
899	    "Total bounce requests that were deferred");
900	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
901	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
902	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
903	SYSCTL_ADD_BUS_SIZE_T(busdma_sysctl_tree(bz),
904	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
905	    "alignment", CTLFLAG_RD, &bz->alignment, "");
906
907	return (0);
908}
909
910static int
911alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
912{
913	struct bounce_zone *bz;
914	int count;
915
916	bz = dmat->bounce_zone;
917	count = 0;
918	while (numpages > 0) {
919		struct bounce_page *bpage;
920
921		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
922						     M_NOWAIT | M_ZERO);
923
924		if (bpage == NULL)
925			break;
926		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
927							 M_NOWAIT, 0ul,
928							 bz->lowaddr,
929							 PAGE_SIZE,
930							 0);
931		if (bpage->vaddr == 0) {
932			free(bpage, M_DEVBUF);
933			break;
934		}
935		bpage->busaddr = pmap_kextract(bpage->vaddr);
936		mtx_lock(&bounce_lock);
937		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
938		total_bpages++;
939		bz->total_bpages++;
940		bz->free_bpages++;
941		mtx_unlock(&bounce_lock);
942		count++;
943		numpages--;
944	}
945	return (count);
946}
947
948static int
949reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
950{
951	struct bounce_zone *bz;
952	int pages;
953
954	mtx_assert(&bounce_lock, MA_OWNED);
955	bz = dmat->bounce_zone;
956	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
957	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
958		return (map->pagesneeded - (map->pagesreserved + pages));
959	bz->free_bpages -= pages;
960	bz->reserved_bpages += pages;
961	map->pagesreserved += pages;
962	pages = map->pagesneeded - map->pagesreserved;
963
964	return (pages);
965}
966
967static bus_addr_t
968add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
969		bus_addr_t addr, bus_size_t size)
970{
971	struct bounce_zone *bz;
972	struct bounce_page *bpage;
973
974	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
975	KASSERT(map != NULL && map != &nobounce_dmamap && map != &contig_dmamap,
976	    ("add_bounce_page: bad map %p", map));
977
978	bz = dmat->bounce_zone;
979	if (map->pagesneeded == 0)
980		panic("add_bounce_page: map doesn't need any pages");
981	map->pagesneeded--;
982
983	if (map->pagesreserved == 0)
984		panic("add_bounce_page: map doesn't need any pages");
985	map->pagesreserved--;
986
987	mtx_lock(&bounce_lock);
988	bpage = STAILQ_FIRST(&bz->bounce_page_list);
989	if (bpage == NULL)
990		panic("add_bounce_page: free page list is empty");
991
992	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
993	bz->reserved_bpages--;
994	bz->active_bpages++;
995	mtx_unlock(&bounce_lock);
996
997	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
998		/* Page offset needs to be preserved. */
999		bpage->vaddr |= vaddr & PAGE_MASK;
1000		bpage->busaddr |= vaddr & PAGE_MASK;
1001	}
1002	bpage->datavaddr = vaddr;
1003	bpage->dataaddr = addr;
1004	bpage->datacount = size;
1005	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1006	return (bpage->busaddr);
1007}
1008
1009static void
1010free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1011{
1012	struct bus_dmamap *map;
1013	struct bounce_zone *bz;
1014
1015	bz = dmat->bounce_zone;
1016	bpage->datavaddr = 0;
1017	bpage->datacount = 0;
1018	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
1019		/*
1020		 * Reset the bounce page to start at offset 0.  Other uses
1021		 * of this bounce page may need to store a full page of
1022		 * data and/or assume it starts on a page boundary.
1023		 */
1024		bpage->vaddr &= ~PAGE_MASK;
1025		bpage->busaddr &= ~PAGE_MASK;
1026	}
1027
1028	mtx_lock(&bounce_lock);
1029	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1030	bz->free_bpages++;
1031	bz->active_bpages--;
1032	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1033		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1034			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1035			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1036			    map, links);
1037			busdma_swi_pending = 1;
1038			bz->total_deferred++;
1039			swi_sched(vm_ih, 0);
1040		}
1041	}
1042	mtx_unlock(&bounce_lock);
1043}
1044
1045void
1046busdma_swi(void)
1047{
1048	bus_dma_tag_t dmat;
1049	struct bus_dmamap *map;
1050
1051	mtx_lock(&bounce_lock);
1052	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1053		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1054		mtx_unlock(&bounce_lock);
1055		dmat = map->dmat;
1056		(dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK);
1057		bus_dmamap_load_mem(map->dmat, map, &map->mem,
1058		    map->callback, map->callback_arg, BUS_DMA_WAITOK);
1059		(dmat->common.lockfunc)(dmat->common.lockfuncarg,
1060		    BUS_DMA_UNLOCK);
1061		mtx_lock(&bounce_lock);
1062	}
1063	mtx_unlock(&bounce_lock);
1064}
1065
1066struct bus_dma_impl bus_dma_bounce_impl = {
1067	.tag_create = bounce_bus_dma_tag_create,
1068	.tag_destroy = bounce_bus_dma_tag_destroy,
1069	.map_create = bounce_bus_dmamap_create,
1070	.map_destroy = bounce_bus_dmamap_destroy,
1071	.mem_alloc = bounce_bus_dmamem_alloc,
1072	.mem_free = bounce_bus_dmamem_free,
1073	.load_phys = bounce_bus_dmamap_load_phys,
1074	.load_buffer = bounce_bus_dmamap_load_buffer,
1075	.load_ma = bus_dmamap_load_ma_triv,
1076	.map_waitok = bounce_bus_dmamap_waitok,
1077	.map_complete = bounce_bus_dmamap_complete,
1078	.map_unload = bounce_bus_dmamap_unload,
1079	.map_sync = bounce_bus_dmamap_sync
1080};
1081