busdma_machdep.c revision 282506
133965Sjdp/*-
278828Sobrien * Copyright (c) 1997 Justin T. Gibbs.
3218822Sdim * All rights reserved.
438889Sjdp *
533965Sjdp * Redistribution and use in source and binary forms, with or without
633965Sjdp * modification, are permitted provided that the following conditions
791041Sobrien * are met:
833965Sjdp * 1. Redistributions of source code must retain the above copyright
991041Sobrien *    notice, this list of conditions, and the following disclaimer,
1091041Sobrien *    without modification, immediately at the beginning of the file.
1191041Sobrien * 2. The name of the author may not be used to endorse or promote products
1291041Sobrien *    derived from this software without specific prior written permission.
1333965Sjdp *
1491041Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1591041Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1691041Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1791041Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1833965Sjdp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1991041Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2091041Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21218822Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2233965Sjdp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23218822Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2433965Sjdp * SUCH DAMAGE.
2533965Sjdp */
2633965Sjdp
2733965Sjdp#include <sys/cdefs.h>
2833965Sjdp__FBSDID("$FreeBSD: stable/10/sys/ia64/ia64/busdma_machdep.c 282506 2015-05-05 19:47:17Z hselasky $");
2933965Sjdp
3033965Sjdp#include <sys/param.h>
3133965Sjdp#include <sys/systm.h>
3233965Sjdp#include <sys/kernel.h>
33218822Sdim#include <sys/malloc.h>
34218822Sdim#include <sys/lock.h>
35218822Sdim#include <sys/mutex.h>
3633965Sjdp#include <sys/bus.h>
3733965Sjdp#include <sys/interrupt.h>
3833965Sjdp#include <sys/memdesc.h>
3933965Sjdp#include <sys/proc.h>
4033965Sjdp#include <sys/sysctl.h>
4133965Sjdp#include <sys/uio.h>
4233965Sjdp
4333965Sjdp#include <vm/vm.h>
4433965Sjdp#include <vm/vm_page.h>
4533965Sjdp#include <vm/vm_map.h>
4633965Sjdp
47130561Sobrien#include <machine/atomic.h>
48130561Sobrien#include <machine/bus.h>
4933965Sjdp#include <machine/md_var.h>
5033965Sjdp
51130561Sobrien#define	MAX_BPAGES	1024
5233965Sjdp
5333965Sjdpstruct bus_dma_tag {
5433965Sjdp	bus_dma_tag_t	parent;
5533965Sjdp	bus_size_t	alignment;
5633965Sjdp	bus_addr_t	boundary;
57130561Sobrien	bus_addr_t	lowaddr;
58130561Sobrien	bus_addr_t	highaddr;
5933965Sjdp	bus_dma_filter_t *filter;
60130561Sobrien	void		*filterarg;
6133965Sjdp	bus_size_t	maxsize;
6233965Sjdp	u_int		nsegments;
6333965Sjdp	bus_size_t	maxsegsz;
6433965Sjdp	int		flags;
6533965Sjdp	int		ref_count;
66130561Sobrien	int		map_count;
67130561Sobrien	bus_dma_lock_t	*lockfunc;
6833965Sjdp	void		*lockfuncarg;
6933965Sjdp	bus_dma_segment_t *segments;
7033965Sjdp};
7133965Sjdp
7233965Sjdpstruct bounce_page {
7377298Sobrien	vm_offset_t	vaddr;		/* kva of bounce buffer */
74130561Sobrien	bus_addr_t	busaddr;	/* Physical address */
7533965Sjdp	vm_offset_t	datavaddr;	/* kva of client data */
7633965Sjdp	bus_addr_t	dataaddr;	/* client physical address */
7733965Sjdp	bus_size_t	datacount;	/* client data count */
7833965Sjdp	STAILQ_ENTRY(bounce_page) links;
7977298Sobrien};
80130561Sobrien
8133965Sjdpu_int busdma_swi_pending;
8233965Sjdp
8333965Sjdpstatic struct mtx bounce_lock;
8433965Sjdpstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
8533965Sjdpstatic int free_bpages;
86130561Sobrienstatic int reserved_bpages;
8733965Sjdpstatic int active_bpages;
8833965Sjdpstatic int total_bpages;
8933965Sjdpstatic int total_bounced;
9033965Sjdpstatic int total_deferred;
9133965Sjdp
9233965Sjdpstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
9333965SjdpSYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
9433965Sjdp    "Free bounce pages");
95130561SobrienSYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
9633965Sjdp    0, "Reserved bounce pages");
9733965SjdpSYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
9833965Sjdp    "Active bounce pages");
9933965SjdpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
10033965Sjdp    "Total bounce pages");
10177298SobrienSYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
102130561Sobrien    "Total bounce requests");
10333965SjdpSYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred,
10433965Sjdp    0, "Total bounce requests that were deferred");
10533965Sjdp
106218822Sdimstruct bus_dmamap {
107218822Sdim	struct bp_list	bpages;
108218822Sdim	int		pagesneeded;
109218822Sdim	int		pagesreserved;
110218822Sdim	bus_dma_tag_t	dmat;
111218822Sdim	struct memdesc	mem;
112218822Sdim	bus_dmamap_callback_t *callback;
113218822Sdim	void		*callback_arg;
114218822Sdim	STAILQ_ENTRY(bus_dmamap) links;
115218822Sdim};
116218822Sdim
117218822Sdimstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
118218822Sdimstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
119218822Sdimstatic struct bus_dmamap nobounce_dmamap;
120218822Sdim
121218822Sdimstatic void init_bounce_pages(void *dummy);
122218822Sdimstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
123130561Sobrienstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
124130561Sobrien    int commit);
125130561Sobrienstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
126130561Sobrien    vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
12733965Sjdpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
12833965Sjdpstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
129130561Sobrien    bus_size_t len);
13033965Sjdp
13133965Sjdp/*
13233965Sjdp * Return true if a match is made.
13333965Sjdp *
13433965Sjdp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
13533965Sjdp *
136130561Sobrien * If paddr is within the bounds of the dma tag then call the filter callback
13733965Sjdp * to check for a match, if there is no filter callback then assume a match.
13833965Sjdp */
139130561Sobrienstatic __inline int
14033965Sjdprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
14133965Sjdp{
14233965Sjdp	bus_addr_t bndy;
14333965Sjdp	int retval;
14433965Sjdp
14533965Sjdp	retval = 0;
146130561Sobrien	bndy = dmat->boundary;
14733965Sjdp	do {
14833965Sjdp		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
14933965Sjdp		    (paddr & (dmat->alignment - 1)) != 0 ||
15033965Sjdp		    (paddr & bndy) != ((paddr + len) & bndy)) &&
15133965Sjdp		    (dmat->filter == NULL ||
15233965Sjdp		    (*dmat->filter)(dmat->filterarg, paddr) != 0))
153130561Sobrien			retval = 1;
15433965Sjdp		dmat = dmat->parent;
15533965Sjdp	} while (retval == 0 && dmat != NULL);
15633965Sjdp	return (retval);
15733965Sjdp}
15833965Sjdp
15933965Sjdp/*
16033965Sjdp * Convenience function for manipulating driver locks from busdma (during
161130561Sobrien * busdma_swi, for example).  Drivers that don't provide their own locks
162130561Sobrien * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
16333965Sjdp * non-mutex locking scheme don't have to use this at all.
164130561Sobrien */
16533965Sjdpvoid
16689857Sobrienbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
16789857Sobrien{
16889857Sobrien	struct mtx *dmtx;
16989857Sobrien
17089857Sobrien	dmtx = (struct mtx *)arg;
17189857Sobrien	switch (op) {
172130561Sobrien	case BUS_DMA_LOCK:
17389857Sobrien		mtx_lock(dmtx);
17433965Sjdp		break;
17589857Sobrien	case BUS_DMA_UNLOCK:
17633965Sjdp		mtx_unlock(dmtx);
17733965Sjdp		break;
17833965Sjdp	default:
179218822Sdim		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
180218822Sdim	}
181218822Sdim}
182218822Sdim
183218822Sdim/*
184218822Sdim * dflt_lock should never get called.  It gets put into the dma tag when
185218822Sdim * lockfunc == NULL, which is only valid if the maps that are associated
186218822Sdim * with the tag are meant to never be defered.
187218822Sdim * XXX Should have a way to identify which driver is responsible here.
188218822Sdim */
189218822Sdimstatic void
190218822Sdimdflt_lock(void *arg, bus_dma_lock_op_t op)
191218822Sdim{
192218822Sdim	panic("driver error: busdma dflt_lock called");
193218822Sdim}
194218822Sdim
195218822Sdim#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
196218822Sdim
197218822Sdim/*
198218822Sdim * Allocate a device specific dma_tag.
199218822Sdim */
200218822Sdimint
201218822Sdimbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
202218822Sdim    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
203218822Sdim    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
204218822Sdim    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
205218822Sdim    void *lockfuncarg, bus_dma_tag_t *dmat)
206218822Sdim{
207218822Sdim	bus_dma_tag_t newtag;
208218822Sdim	int error = 0;
20933965Sjdp
21033965Sjdp	/* Basic sanity checking */
211130561Sobrien	if (boundary != 0 && boundary < maxsegsz)
212130561Sobrien		maxsegsz = boundary;
21333965Sjdp
214130561Sobrien	/* Return a NULL tag on failure */
21533965Sjdp	*dmat = NULL;
21689857Sobrien
21789857Sobrien	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
21889857Sobrien	if (newtag == NULL)
21989857Sobrien		return (ENOMEM);
22089857Sobrien
22189857Sobrien	newtag->parent = parent;
22233965Sjdp	newtag->alignment = alignment;
223130561Sobrien	newtag->boundary = boundary;
22433965Sjdp	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
225130561Sobrien	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
22633965Sjdp	newtag->filter = filter;
22789857Sobrien	newtag->filterarg = filterarg;
22833965Sjdp	newtag->maxsize = maxsize;
22933965Sjdp	newtag->nsegments = nsegments;
23033965Sjdp	newtag->maxsegsz = maxsegsz;
23133965Sjdp	newtag->flags = flags;
23233965Sjdp	newtag->ref_count = 1; /* Count ourself */
233218822Sdim	newtag->map_count = 0;
234218822Sdim	if (lockfunc != NULL) {
235218822Sdim		newtag->lockfunc = lockfunc;
236218822Sdim		newtag->lockfuncarg = lockfuncarg;
237218822Sdim	} else {
238218822Sdim		newtag->lockfunc = dflt_lock;
239218822Sdim		newtag->lockfuncarg = NULL;
240218822Sdim	}
241218822Sdim	newtag->segments = NULL;
242218822Sdim
243218822Sdim	/* Take into account any restrictions imposed by our parent tag */
244218822Sdim	if (parent != NULL) {
245218822Sdim		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
246218822Sdim		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
247218822Sdim		if (newtag->boundary == 0)
248218822Sdim			newtag->boundary = parent->boundary;
249218822Sdim		else if (parent->boundary != 0)
250218822Sdim			newtag->boundary = MIN(parent->boundary,
251218822Sdim			    newtag->boundary);
252218822Sdim		if (newtag->filter == NULL) {
253218822Sdim			/*
254218822Sdim			 * Short circuit looking at our parent directly
255218822Sdim			 * since we have encapsulated all of its information
256218822Sdim			 */
257218822Sdim			newtag->filter = parent->filter;
258218822Sdim			newtag->filterarg = parent->filterarg;
259218822Sdim			newtag->parent = parent->parent;
260218822Sdim		}
261218822Sdim		if (newtag->parent != NULL)
262218822Sdim			atomic_add_int(&parent->ref_count, 1);
263218822Sdim	}
264218822Sdim
265218822Sdim	if (newtag->lowaddr < paddr_max && (flags & BUS_DMA_ALLOCNOW) != 0) {
266218822Sdim		/* Must bounce */
26733965Sjdp
26833965Sjdp		if (ptoa(total_bpages) < maxsize) {
269130561Sobrien			int pages;
270130561Sobrien
27133965Sjdp			pages = atop(maxsize) - total_bpages;
272130561Sobrien
27333965Sjdp			/* Add pages to our bounce pool */
27489857Sobrien			if (alloc_bounce_pages(newtag, pages) < pages)
27589857Sobrien				error = ENOMEM;
27689857Sobrien		}
27789857Sobrien		/* Performed initial allocation */
27889857Sobrien		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
27933965Sjdp	}
280130561Sobrien
28189857Sobrien	if (error != 0) {
28289857Sobrien		free(newtag, M_DEVBUF);
28333965Sjdp	} else {
28433965Sjdp		*dmat = newtag;
28533965Sjdp	}
28633965Sjdp	return (error);
28789857Sobrien}
28833965Sjdp
28933965Sjdpint
29033965Sjdpbus_dma_tag_destroy(bus_dma_tag_t dmat)
29133965Sjdp{
292218822Sdim	if (dmat != NULL) {
293218822Sdim
294218822Sdim		if (dmat->map_count != 0)
295218822Sdim			return (EBUSY);
296218822Sdim
297218822Sdim		while (dmat != NULL) {
298218822Sdim			bus_dma_tag_t parent;
299218822Sdim
300218822Sdim			parent = dmat->parent;
301218822Sdim			atomic_subtract_int(&dmat->ref_count, 1);
302218822Sdim			if (dmat->ref_count == 0) {
303218822Sdim				if (dmat->segments != NULL)
304218822Sdim					free(dmat->segments, M_DEVBUF);
305218822Sdim				free(dmat, M_DEVBUF);
306218822Sdim				/*
307218822Sdim				 * Last reference count, so
308218822Sdim				 * release our reference
309218822Sdim				 * count on our parent.
310218822Sdim				 */
311218822Sdim				dmat = parent;
312218822Sdim			} else
313218822Sdim				dmat = NULL;
314218822Sdim		}
315218822Sdim	}
316218822Sdim	return (0);
317218822Sdim}
318218822Sdim
319218822Sdim/*
320218822Sdim * Allocate a handle for mapping from kva/uva/physical
321218822Sdim * address space into bus device space.
322218822Sdim */
323218822Sdimint
324218822Sdimbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
325218822Sdim{
326218822Sdim	int error;
327218822Sdim
328218822Sdim	error = 0;
329218822Sdim
33033965Sjdp	if (dmat->segments == NULL) {
33133965Sjdp		dmat->segments = (bus_dma_segment_t *)malloc(
33233965Sjdp		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
33333965Sjdp		    M_NOWAIT);
33433965Sjdp		if (dmat->segments == NULL)
335130561Sobrien			return (ENOMEM);
33633965Sjdp	}
33733965Sjdp
33833965Sjdp	/*
33933965Sjdp	 * Bouncing might be required if the driver asks for an active
34033965Sjdp	 * exclusion region, a data alignment that is stricter than 1, and/or
34133965Sjdp	 * an active address boundary.
34233965Sjdp	 */
343130561Sobrien	if (dmat->lowaddr < paddr_max) {
344130561Sobrien		/* Must bounce */
34533965Sjdp		int maxpages;
34633965Sjdp
34789857Sobrien		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
348130561Sobrien		    M_NOWAIT | M_ZERO);
34933965Sjdp		if (*mapp == NULL)
35033965Sjdp			return (ENOMEM);
35133965Sjdp
35233965Sjdp		/* Initialize the new map */
35333965Sjdp		STAILQ_INIT(&((*mapp)->bpages));
35433965Sjdp
35533965Sjdp		/*
35633965Sjdp		 * Attempt to add pages to our pool on a per-instance
35733965Sjdp		 * basis up to a sane limit.
35833965Sjdp		 */
35933965Sjdp		maxpages = MIN(MAX_BPAGES, atop(paddr_max - dmat->lowaddr));
36077298Sobrien		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
36133965Sjdp		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
36233965Sjdp			int pages;
36333965Sjdp
36433965Sjdp			pages = MAX(atop(dmat->maxsize), 1);
36533965Sjdp			pages = MIN(maxpages - total_bpages, pages);
36633965Sjdp			if (alloc_bounce_pages(dmat, pages) < pages)
36733965Sjdp				error = ENOMEM;
36833965Sjdp
36933965Sjdp			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
37033965Sjdp				if (error == 0)
37133965Sjdp					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
37233965Sjdp			} else {
37333965Sjdp				error = 0;
37433965Sjdp			}
37533965Sjdp		}
37633965Sjdp	} else {
37733965Sjdp		*mapp = NULL;
37833965Sjdp	}
37933965Sjdp	if (error == 0)
38033965Sjdp		dmat->map_count++;
38133965Sjdp	return (error);
38233965Sjdp}
38333965Sjdp
38433965Sjdp/*
38533965Sjdp * Destroy a handle for mapping from kva/uva/physical
38633965Sjdp * address space into bus device space.
38733965Sjdp */
38833965Sjdpint
38933965Sjdpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
39033965Sjdp{
391130561Sobrien
39233965Sjdp	if (map != NULL && map != &nobounce_dmamap) {
393130561Sobrien		if (STAILQ_FIRST(&map->bpages) != NULL)
39433965Sjdp			return (EBUSY);
395130561Sobrien		free(map, M_DEVBUF);
39633965Sjdp	}
397130561Sobrien	dmat->map_count--;
39833965Sjdp	return (0);
39933965Sjdp}
400130561Sobrien
40133965Sjdp
402130561Sobrien/*
40333965Sjdp * Allocate a piece of memory that can be efficiently mapped into
404130561Sobrien * bus device space based on the constraints lited in the dma tag.
40533965Sjdp * A dmamap to for use with dmamap_load is also allocated.
406130561Sobrien */
40733965Sjdpint
40833965Sjdpbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
409130561Sobrien    bus_dmamap_t *mapp)
41033965Sjdp{
411130561Sobrien	int mflags;
41233965Sjdp
413130561Sobrien	if (flags & BUS_DMA_NOWAIT)
41433965Sjdp		mflags = M_NOWAIT;
415130561Sobrien	else
41633965Sjdp		mflags = M_WAITOK;
41733965Sjdp
418130561Sobrien	/* If we succeed, no mapping/bouncing will be required */
41933965Sjdp	*mapp = NULL;
420130561Sobrien
42133965Sjdp	if (dmat->segments == NULL) {
422130561Sobrien		dmat->segments = (bus_dma_segment_t *)malloc(
42333965Sjdp		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
424130561Sobrien		    mflags);
42533965Sjdp		if (dmat->segments == NULL)
426130561Sobrien			return (ENOMEM);
427130561Sobrien	}
428130561Sobrien	if (flags & BUS_DMA_ZERO)
429130561Sobrien		mflags |= M_ZERO;
430130561Sobrien
431130561Sobrien	/*
43260484Sobrien	 * XXX:
433130561Sobrien	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
434130561Sobrien	 * alignment guarantees of malloc need to be nailed down, and the
435130561Sobrien	 * code below should be rewritten to take that into account.
436130561Sobrien	 *
437130561Sobrien	 * In the meantime, we'll warn the user if malloc gets it wrong.
438130561Sobrien	 */
43960484Sobrien	if ((dmat->maxsize <= PAGE_SIZE) &&
44077298Sobrien	   (dmat->alignment < dmat->maxsize) &&
44133965Sjdp	    dmat->lowaddr >= paddr_max) {
44233965Sjdp		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
44333965Sjdp	} else {
44433965Sjdp		/*
44533965Sjdp		 * XXX Use Contigmalloc until it is merged into this facility
44633965Sjdp		 *     and handles multi-seg allocations.  Nobody is doing
44733965Sjdp		 *     multi-seg allocations yet though.
44833965Sjdp		 * XXX Certain AGP hardware does.
44989857Sobrien		 */
45033965Sjdp		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
45133965Sjdp		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
45233965Sjdp		    dmat->boundary);
45333965Sjdp	}
45433965Sjdp	if (*vaddr == NULL)
45533965Sjdp		return (ENOMEM);
45633965Sjdp	else if (vtophys(*vaddr) & (dmat->alignment - 1))
45789857Sobrien		printf("bus_dmamem_alloc failed to align memory properly.\n");
45833965Sjdp	return (0);
45989857Sobrien}
46033965Sjdp
46189857Sobrien/*
46233965Sjdp * Free a piece of memory and it's allociated dmamap, that was allocated
46389857Sobrien * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
46433965Sjdp */
46533965Sjdpvoid
46689857Sobrienbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
46733965Sjdp{
46889857Sobrien	/*
46933965Sjdp	 * dmamem does not need to be bounced, so the map should be
47089857Sobrien	 * NULL
47133965Sjdp	 */
47289857Sobrien	if (map != NULL)
47333965Sjdp		panic("bus_dmamem_free: Invalid map freed\n");
47433965Sjdp	if ((dmat->maxsize <= PAGE_SIZE) &&
47589857Sobrien	   (dmat->alignment < dmat->maxsize) &&
47633965Sjdp	    dmat->lowaddr >= paddr_max)
47789857Sobrien		free(vaddr, M_DEVBUF);
47833965Sjdp	else {
47989857Sobrien		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
48033965Sjdp	}
48189857Sobrien}
48233965Sjdp
48333965Sjdpstatic void
48489857Sobrien_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
48533965Sjdp    bus_size_t buflen, int flags)
48689857Sobrien{
48733965Sjdp	bus_addr_t curaddr;
48889857Sobrien	bus_size_t sgsize;
48933965Sjdp
49089857Sobrien	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
49133965Sjdp	    dmat->alignment > 1) && map != &nobounce_dmamap &&
492130561Sobrien	    map->pagesneeded == 0) {
49389857Sobrien		/*
494130561Sobrien		 * Count the number of bounce pages
495130561Sobrien		 * needed in order to complete this transfer
496130561Sobrien		 */
497130561Sobrien		curaddr = buf;
498130561Sobrien		while (buflen != 0) {
499130561Sobrien			sgsize = MIN(buflen, dmat->maxsegsz);
500130561Sobrien			if (run_filter(dmat, curaddr, 0) != 0) {
501130561Sobrien				sgsize = MIN(sgsize, PAGE_SIZE);
502130561Sobrien				map->pagesneeded++;
503130561Sobrien			}
504130561Sobrien			curaddr += sgsize;
505130561Sobrien			buflen -= sgsize;
506130561Sobrien		}
507130561Sobrien	}
508130561Sobrien}
509130561Sobrien
51089857Sobrienstatic void
51189857Sobrien_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
51233965Sjdp    void *buf, bus_size_t buflen, int flags)
51333965Sjdp{
51433965Sjdp	vm_offset_t vaddr;
515130561Sobrien	vm_offset_t vendaddr;
516130561Sobrien	bus_addr_t paddr;
51733965Sjdp
518130561Sobrien	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
51933965Sjdp	    dmat->alignment > 1) && map != &nobounce_dmamap &&
52033965Sjdp	    map->pagesneeded == 0) {
521130561Sobrien		/*
52233965Sjdp		 * Count the number of bounce pages
523130561Sobrien		 * needed in order to complete this transfer
52433965Sjdp		 */
52533965Sjdp		vaddr = trunc_page((vm_offset_t)buf);
52633965Sjdp		vendaddr = (vm_offset_t)buf + buflen;
52733965Sjdp
528130561Sobrien		while (vaddr < vendaddr) {
52933965Sjdp			if (pmap == kernel_pmap)
530130561Sobrien				paddr = pmap_kextract(vaddr);
53133965Sjdp			else
53233965Sjdp				paddr = pmap_extract(pmap, vaddr);
53333965Sjdp			if (run_filter(dmat, paddr, 0) != 0)
53433965Sjdp				map->pagesneeded++;
535130561Sobrien			vaddr += PAGE_SIZE;
53633965Sjdp		}
537130561Sobrien	}
538130561Sobrien}
53933965Sjdp
54033965Sjdpstatic int
54133965Sjdp_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
542130561Sobrien{
54333965Sjdp
544130561Sobrien	/* Reserve Necessary Bounce Pages */
545130561Sobrien	mtx_lock(&bounce_lock);
54633965Sjdp	if (flags & BUS_DMA_NOWAIT) {
54733965Sjdp		if (reserve_bounce_pages(dmat, map, 0) != 0) {
54833965Sjdp			mtx_unlock(&bounce_lock);
549130561Sobrien			return (ENOMEM);
55033965Sjdp		}
551130561Sobrien	} else {
552130561Sobrien		if (reserve_bounce_pages(dmat, map, 1) != 0) {
553130561Sobrien			/* Queue us for resources */
55433965Sjdp			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
55533965Sjdp			    map, links);
55633965Sjdp			mtx_unlock(&bounce_lock);
557130561Sobrien			return (EINPROGRESS);
55833965Sjdp		}
559130561Sobrien	}
560130561Sobrien	mtx_unlock(&bounce_lock);
561130561Sobrien
56233965Sjdp	return (0);
56333965Sjdp}
56433965Sjdp
565130561Sobrien/*
56633965Sjdp * Add a single contiguous physical range to the segment list.
567130561Sobrien */
56860484Sobrienstatic int
56960484Sobrien_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
57060484Sobrien    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
57160484Sobrien{
57260484Sobrien	bus_addr_t baddr, bmask;
57360484Sobrien	int seg;
574130561Sobrien
57533965Sjdp	/*
57633965Sjdp	 * Make sure we don't cross any boundaries.
57733965Sjdp	 */
578130561Sobrien	bmask = ~(dmat->boundary - 1);
57933965Sjdp	if (dmat->boundary > 0) {
580130561Sobrien		baddr = (curaddr + dmat->boundary) & bmask;
58160484Sobrien		if (sgsize > (baddr - curaddr))
58260484Sobrien			sgsize = (baddr - curaddr);
58360484Sobrien	}
58460484Sobrien
58560484Sobrien	/*
58660484Sobrien	 * Insert chunk into a segment, coalescing with
587130561Sobrien	 * previous segment if possible.
58833965Sjdp	 */
58933965Sjdp	seg = *segp;
59033965Sjdp	if (seg == -1) {
591130561Sobrien		seg = 0;
59233965Sjdp		segs[seg].ds_addr = curaddr;
593130561Sobrien		segs[seg].ds_len = sgsize;
59460484Sobrien	} else {
59560484Sobrien		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
59660484Sobrien		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
59760484Sobrien		    (dmat->boundary == 0 ||
59860484Sobrien		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
59960484Sobrien			segs[seg].ds_len += sgsize;
60060484Sobrien		else {
60133965Sjdp			if (++seg >= dmat->nsegments)
60233965Sjdp				return (0);
60333965Sjdp			segs[seg].ds_addr = curaddr;
604130561Sobrien			segs[seg].ds_len = sgsize;
60533965Sjdp		}
606130561Sobrien	}
60760484Sobrien	*segp = seg;
60860484Sobrien	return (sgsize);
60960484Sobrien}
61060484Sobrien
61160484Sobrien/*
61260484Sobrien * Utility function to load a physical buffer.  segp contains
61360484Sobrien * the starting segment on entrace, and the ending segment on exit.
61433965Sjdp */
61533965Sjdpint
616130561Sobrien_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
617130561Sobrien    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
61833965Sjdp    int *segp)
619130561Sobrien{
620130561Sobrien	bus_addr_t curaddr;
621130561Sobrien	bus_size_t sgsize;
62233965Sjdp	int error;
623130561Sobrien
624130561Sobrien	if (map == NULL)
625130561Sobrien		map = &nobounce_dmamap;
626130561Sobrien
627130561Sobrien	if (segs == NULL)
628130561Sobrien		segs = dmat->segments;
629130561Sobrien
630130561Sobrien	if (map != &nobounce_dmamap) {
63133965Sjdp		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
632130561Sobrien		if (map->pagesneeded != 0) {
63333965Sjdp			error = _bus_dmamap_reserve_pages(dmat, map, flags);
63433965Sjdp			if (error)
63533965Sjdp				return (error);
63633965Sjdp		}
63733965Sjdp	}
63833965Sjdp
639130561Sobrien	while (buflen > 0) {
640130561Sobrien		curaddr = buf;
64133965Sjdp		sgsize = MIN(buflen, dmat->maxsegsz);
642130561Sobrien		if (map->pagesneeded != 0 &&
643130561Sobrien		    run_filter(dmat, curaddr, sgsize)) {
644130561Sobrien			sgsize = MIN(sgsize, PAGE_SIZE);
64533965Sjdp			curaddr = add_bounce_page(dmat, map, 0, curaddr,
646130561Sobrien			    sgsize);
647130561Sobrien		}
648130561Sobrien		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
649130561Sobrien		    segp);
650130561Sobrien		if (sgsize == 0)
651130561Sobrien			break;
652130561Sobrien		buf += sgsize;
653130561Sobrien		buflen -= sgsize;
65433965Sjdp	}
655130561Sobrien
65633965Sjdp	/*
65733965Sjdp	 * Did we fit?
65833965Sjdp	 */
65933965Sjdp	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
66033965Sjdp}
66133965Sjdp
66233965Sjdpint
663130561Sobrien_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
664130561Sobrien    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
66533965Sjdp    bus_dma_segment_t *segs, int *segp)
666130561Sobrien{
667130561Sobrien
668130561Sobrien	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
66933965Sjdp	    segs, segp));
670130561Sobrien}
671130561Sobrien
672130561Sobrien/*
673130561Sobrien * Utility function to load a linear buffer.  segp contains
674130561Sobrien * the starting segment on entrace, and the ending segment on exit.
675130561Sobrien */
676130561Sobrienint
677130561Sobrien_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
67833965Sjdp    bus_size_t buflen, pmap_t pmap, int flags,
679130561Sobrien    bus_dma_segment_t *segs, int *segp)
68033965Sjdp{
68133965Sjdp	bus_size_t sgsize;
68233965Sjdp	bus_addr_t curaddr;
68333965Sjdp	vm_offset_t vaddr;
68433965Sjdp	int error;
68533965Sjdp
686130561Sobrien	if (map == NULL)
687130561Sobrien		map = &nobounce_dmamap;
68833965Sjdp
689130561Sobrien	if (segs == NULL)
690130561Sobrien		segs = dmat->segments;
691130561Sobrien
69233965Sjdp	if (map != &nobounce_dmamap) {
693130561Sobrien		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
694130561Sobrien		if (map->pagesneeded != 0) {
695130561Sobrien			error = _bus_dmamap_reserve_pages(dmat, map, flags);
696130561Sobrien			if (error)
697130561Sobrien				return (error);
698130561Sobrien		}
699130561Sobrien	}
700130561Sobrien
70133965Sjdp	vaddr = (vm_offset_t)buf;
702130561Sobrien
70333965Sjdp	while (buflen > 0) {
70433965Sjdp		/*
70533965Sjdp		 * Get the physical address for this segment.
70633965Sjdp		 */
70733965Sjdp		if (pmap == kernel_pmap)
70833965Sjdp			curaddr = pmap_kextract(vaddr);
70933965Sjdp		else
710130561Sobrien			curaddr = pmap_extract(pmap, vaddr);
71133965Sjdp
712130561Sobrien		/*
713130561Sobrien		 * Compute the segment size, and adjust counts.
714130561Sobrien		 */
715130561Sobrien		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
716130561Sobrien		if (sgsize > dmat->maxsegsz)
71733965Sjdp			sgsize = dmat->maxsegsz;
71833965Sjdp		if (buflen < sgsize)
71933965Sjdp			sgsize = buflen;
720130561Sobrien
72133965Sjdp		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
722130561Sobrien			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
723130561Sobrien			    sgsize);
724130561Sobrien
725130561Sobrien		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
726130561Sobrien		    segp);
72733965Sjdp		if (sgsize == 0)
72833965Sjdp			break;
72933965Sjdp
730130561Sobrien		vaddr += sgsize;
73133965Sjdp		buflen -= sgsize;
732130561Sobrien	}
733130561Sobrien
734130561Sobrien	/*
735130561Sobrien	 * Did we fit?
736130561Sobrien	 */
737130561Sobrien	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
738130561Sobrien}
739130561Sobrien
740130561Sobrien
741130561Sobrienvoid
74233965Sjdp__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
74333965Sjdp    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
74433965Sjdp{
74533965Sjdp	if (map != NULL) {
74633965Sjdp		map->dmat = dmat;
74733965Sjdp		map->mem = *mem;
748130561Sobrien		map->callback = callback;
74933965Sjdp		map->callback_arg = callback_arg;
750130561Sobrien	}
751130561Sobrien}
752130561Sobrien
753130561Sobrienbus_dma_segment_t *
754130561Sobrien_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
755130561Sobrien    bus_dma_segment_t *segs, int nsegs, int error)
756130561Sobrien{
757130561Sobrien
758130561Sobrien	if (segs == NULL)
759130561Sobrien		segs = dmat->segments;
76033965Sjdp	return (segs);
76133965Sjdp}
76233965Sjdp
76333965Sjdp/*
76477298Sobrien * Release the mapping held by map.
76577298Sobrien */
766130561Sobrienvoid
76777298Sobrien_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
768130561Sobrien{
76977298Sobrien	struct bounce_page *bpage;
77077298Sobrien
77177298Sobrien	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
77277298Sobrien		STAILQ_REMOVE_HEAD(&map->bpages, links);
77377298Sobrien		free_bounce_page(dmat, bpage);
77477298Sobrien	}
77577298Sobrien}
77677298Sobrien
77777298Sobrienvoid
77877298Sobrien_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
77977298Sobrien{
780130561Sobrien	struct bounce_page *bpage;
78177298Sobrien
78277298Sobrien	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
78377298Sobrien		/*
78477298Sobrien		 * Handle data bouncing.  We might also
785130561Sobrien		 * want to add support for invalidating
786130561Sobrien		 * the caches on broken hardware
78777298Sobrien		 */
788130561Sobrien
789130561Sobrien		if (op & BUS_DMASYNC_PREWRITE) {
79077298Sobrien			while (bpage != NULL) {
79177298Sobrien				if (bpage->datavaddr != 0)
79277298Sobrien					bcopy((void *)bpage->datavaddr,
79377298Sobrien					    (void *)bpage->vaddr,
79477298Sobrien					    bpage->datacount);
79577298Sobrien				else
79677298Sobrien					physcopyout(bpage->dataaddr,
79777298Sobrien					    (void *)bpage->vaddr,
79877298Sobrien					    bpage->datacount);
79977298Sobrien				bpage = STAILQ_NEXT(bpage, links);
80077298Sobrien			}
80177298Sobrien			total_bounced++;
80277298Sobrien		}
80377298Sobrien
80477298Sobrien		if (op & BUS_DMASYNC_POSTREAD) {
80577298Sobrien			while (bpage != NULL) {
80677298Sobrien				if (bpage->datavaddr != 0)
80733965Sjdp					bcopy((void *)bpage->vaddr,
80833965Sjdp					    (void *)bpage->datavaddr,
80933965Sjdp					    bpage->datacount);
810130561Sobrien				else
811130561Sobrien					physcopyin((void *)bpage->vaddr,
812130561Sobrien					    bpage->dataaddr,
813130561Sobrien					    bpage->datacount);
814130561Sobrien				bpage = STAILQ_NEXT(bpage, links);
815130561Sobrien			}
81633965Sjdp			total_bounced++;
817218822Sdim		}
81860484Sobrien	}
819130561Sobrien}
82060484Sobrien
821218822Sdimstatic void
822218822Sdiminit_bounce_pages(void *dummy __unused)
82360484Sobrien{
82460484Sobrien
825130561Sobrien	free_bpages = 0;
82660484Sobrien	reserved_bpages = 0;
82760484Sobrien	active_bpages = 0;
82860484Sobrien	total_bpages = 0;
82989857Sobrien	STAILQ_INIT(&bounce_page_list);
830130561Sobrien	STAILQ_INIT(&bounce_map_waitinglist);
83160484Sobrien	STAILQ_INIT(&bounce_map_callbacklist);
832130561Sobrien	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
83333965Sjdp}
83433965SjdpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
835130561Sobrien
836130561Sobrienstatic int
837130561Sobrienalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
838130561Sobrien{
839130561Sobrien	int count;
840130561Sobrien
841130561Sobrien	count = 0;
84233965Sjdp	while (numpages > 0) {
84333965Sjdp		struct bounce_page *bpage;
844218822Sdim
845218822Sdim		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
84633965Sjdp		    M_NOWAIT | M_ZERO);
847130561Sobrien		if (bpage == NULL)
848130561Sobrien			break;
849130561Sobrien		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
85033965Sjdp		    M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary);
85133965Sjdp		if (bpage->vaddr == 0) {
85233965Sjdp			free(bpage, M_DEVBUF);
85333965Sjdp			break;
85433965Sjdp		}
85533965Sjdp		bpage->busaddr = pmap_kextract(bpage->vaddr);
85633965Sjdp		mtx_lock(&bounce_lock);
857130561Sobrien		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
85833965Sjdp		total_bpages++;
859130561Sobrien		free_bpages++;
860130561Sobrien		mtx_unlock(&bounce_lock);
86133965Sjdp		count++;
86233965Sjdp		numpages--;
86333965Sjdp	}
86433965Sjdp	return (count);
865130561Sobrien}
86633965Sjdp
86733965Sjdpstatic int
86833965Sjdpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
86933965Sjdp{
87033965Sjdp	int pages;
87133965Sjdp
87233965Sjdp	mtx_assert(&bounce_lock, MA_OWNED);
873218822Sdim	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
874218822Sdim	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
875104834Sobrien		return (map->pagesneeded - (map->pagesreserved + pages));
876130561Sobrien	free_bpages -= pages;
877130561Sobrien	reserved_bpages += pages;
878130561Sobrien	map->pagesreserved += pages;
87933965Sjdp	pages = map->pagesneeded - map->pagesreserved;
88033965Sjdp
88133965Sjdp	return (pages);
88233965Sjdp}
88333965Sjdp
88433965Sjdpstatic bus_addr_t
88533965Sjdpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
88633965Sjdp    bus_addr_t addr, bus_size_t size)
88733965Sjdp{
888130561Sobrien	struct bounce_page *bpage;
889130561Sobrien
890130561Sobrien	KASSERT(map != NULL && map != &nobounce_dmamap,
891130561Sobrien	    ("add_bounce_page: bad map %p", map));
892130561Sobrien
893130561Sobrien	if (map->pagesneeded == 0)
89433965Sjdp		panic("add_bounce_page: map doesn't need any pages");
89533965Sjdp	map->pagesneeded--;
896130561Sobrien
89733965Sjdp	if (map->pagesreserved == 0)
89889857Sobrien		panic("add_bounce_page: map doesn't need any pages");
89989857Sobrien	map->pagesreserved--;
900130561Sobrien
90133965Sjdp	mtx_lock(&bounce_lock);
902130561Sobrien	bpage = STAILQ_FIRST(&bounce_page_list);
90333965Sjdp	if (bpage == NULL)
90433965Sjdp		panic("add_bounce_page: free page list is empty");
90533965Sjdp
90633965Sjdp	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
90733965Sjdp	reserved_bpages--;
90833965Sjdp	active_bpages++;
90933965Sjdp	mtx_unlock(&bounce_lock);
91089857Sobrien
91133965Sjdp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
91233965Sjdp		/* Page offset needs to be preserved. */
91333965Sjdp		bpage->vaddr |= addr & PAGE_MASK;
91489857Sobrien		bpage->busaddr |= addr & PAGE_MASK;
91533965Sjdp	}
91633965Sjdp	bpage->datavaddr = vaddr;
91760484Sobrien	bpage->dataaddr = addr;
918130561Sobrien	bpage->datacount = size;
91933965Sjdp	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
92060484Sobrien	return (bpage->busaddr);
92160484Sobrien}
92260484Sobrien
92360484Sobrienstatic void
92433965Sjdpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
92533965Sjdp{
92633965Sjdp	struct bus_dmamap *map;
927130561Sobrien
928130561Sobrien	bpage->datavaddr = 0;
92933965Sjdp	bpage->datacount = 0;
93033965Sjdp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
93133965Sjdp		/*
932130561Sobrien		 * Reset the bounce page to start at offset 0.  Other uses
93333965Sjdp		 * of this bounce page may need to store a full page of
93433965Sjdp		 * data and/or assume it starts on a page boundary.
93560484Sobrien		 */
93660484Sobrien		bpage->vaddr &= ~PAGE_MASK;
937130561Sobrien		bpage->busaddr &= ~PAGE_MASK;
938130561Sobrien	}
939130561Sobrien
94060484Sobrien	mtx_lock(&bounce_lock);
94160484Sobrien	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
94277298Sobrien	free_bpages++;
94360484Sobrien	active_bpages--;
94460484Sobrien	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
94577298Sobrien		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
94660484Sobrien			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
94777298Sobrien			STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map,
948218822Sdim			    links);
94977298Sobrien			busdma_swi_pending = 1;
950218822Sdim			total_deferred++;
95177298Sobrien			swi_sched(vm_ih, 0);
952218822Sdim		}
95377298Sobrien	}
95460484Sobrien	mtx_unlock(&bounce_lock);
955130561Sobrien}
95660484Sobrien
95760484Sobrienvoid
958130561Sobrienbusdma_swi(void)
95960484Sobrien{
96089857Sobrien	bus_dma_tag_t dmat;
96189857Sobrien	struct bus_dmamap *map;
96289857Sobrien
96391041Sobrien	mtx_lock(&bounce_lock);
96489857Sobrien	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
965130561Sobrien		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
966130561Sobrien		mtx_unlock(&bounce_lock);
967130561Sobrien		dmat = map->dmat;
968130561Sobrien		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
96989857Sobrien		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
97089857Sobrien		    map->callback_arg, BUS_DMA_WAITOK);
97189857Sobrien		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
97289857Sobrien		mtx_lock(&bounce_lock);
97389857Sobrien	}
97489857Sobrien	mtx_unlock(&bounce_lock);
975130561Sobrien}
97691041Sobrien