subr_rman.c revision 133177
1252602Spjd/*
2252602Spjd * Copyright 1998 Massachusetts Institute of Technology
3252603Spjd *
4252602Spjd * Permission to use, copy, modify, and distribute this software and
51553Srgrimes * its documentation for any purpose and without fee is hereby
61553Srgrimes * granted, provided that both the above copyright notice and this
71553Srgrimes * permission notice appear in all copies, that both the above
81553Srgrimes * copyright notice and this permission notice appear in all
91553Srgrimes * supporting documentation, and that the name of M.I.T. not be used
101553Srgrimes * in advertising or publicity pertaining to distribution of the
111553Srgrimes * software without specific, written prior permission.  M.I.T. makes
121553Srgrimes * no representations about the suitability of this software for any
131553Srgrimes * purpose.  It is provided "as is" without express or implied
141553Srgrimes * warranty.
151553Srgrimes *
161553Srgrimes * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
171553Srgrimes * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
181553Srgrimes * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
191553Srgrimes * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
201553Srgrimes * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
211553Srgrimes * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
221553Srgrimes * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
231553Srgrimes * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
241553Srgrimes * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
251553Srgrimes * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
261553Srgrimes * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
271553Srgrimes * SUCH DAMAGE.
281553Srgrimes */
291553Srgrimes
301553Srgrimes/*
311553Srgrimes * The kernel resource manager.  This code is responsible for keeping track
3230380Scharnier * of hardware resources which are apportioned out to various drivers.
331553Srgrimes * It does not actually assign those resources, and it is not expected
341553Srgrimes * that end-device drivers will call into this code directly.  Rather,
351553Srgrimes * the code which implements the buses that those devices are attached to,
361553Srgrimes * and the code which manages CPU resources, will call this code, and the
37117278Scharnier * end-device drivers will make upcalls to that code to actually perform
381553Srgrimes * the allocation.
391553Srgrimes *
40117278Scharnier * There are two sorts of resources managed by this code.  The first is
4130380Scharnier * the more familiar array (RMAN_ARRAY) type; resources in this class
421553Srgrimes * consist of a sequence of individually-allocatable objects which have
43117278Scharnier * been numbered in some well-defined order.  Most of the resources
44117278Scharnier * are of this type, as it is the most familiar.  The second type is
45117278Scharnier * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46312033Sngie * resources in which each instance is indistinguishable from every
47263234Srwatson * other instance).  The principal anticipated application of gauges
48312033Sngie * is in the context of power consumption, where a bus may have a specific
49312033Sngie * power budget which all attached devices share.  RMAN_GAUGE is not
501553Srgrimes * implemented yet.
511553Srgrimes *
521553Srgrimes * For array resources, we make one simplifying assumption: two clients
531553Srgrimes * sharing the same resource must use the same range of indices.  That
54252603Spjd * is to say, sharing of overlapping-but-not-identical regions is not
551553Srgrimes * permitted.
561553Srgrimes */
571553Srgrimes
581553Srgrimes#include <sys/cdefs.h>
591553Srgrimes__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 133177 2004-08-05 15:48:18Z jhb $");
6070284Siedowse
611553Srgrimes#define __RMAN_RESOURCE_VISIBLE
621553Srgrimes#include <sys/param.h>
631553Srgrimes#include <sys/systm.h>
6430380Scharnier#include <sys/kernel.h>
651553Srgrimes#include <sys/lock.h>
661553Srgrimes#include <sys/malloc.h>
67252602Spjd#include <sys/mutex.h>
681553Srgrimes#include <sys/bus.h>		/* XXX debugging */
691553Srgrimes#include <machine/bus.h>
70252602Spjd#include <sys/rman.h>
711553Srgrimes#include <sys/sysctl.h>
721553Srgrimes
731553Srgrimesint     rman_debug = 0;
741553SrgrimesTUNABLE_INT("debug.rman_debug", &rman_debug);
7599825SalfredSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
76252602Spjd    &rman_debug, 0, "rman debug");
771553Srgrimes
781553Srgrimes#define DPRINTF(params) if (rman_debug) printf params
7917832Spst
8017829Spststatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
8117829Spst
8210087Sjkhstruct	rman_head rman_head;
8310087Sjkhstatic	struct mtx rman_mtx; /* mutex to protect rman_head */
8410087Sjkhstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
8510087Sjkh				       struct resource **whohas);
8610087Sjkhstatic	int int_rman_deactivate_resource(struct resource *r);
8710087Sjkhstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
8810087Sjkh
89252602Spjdint
9010087Sjkhrman_init(struct rman *rm)
91252602Spjd{
92252602Spjd	static int once;
93252602Spjd
94252602Spjd	if (once == 0) {
95252602Spjd		once = 1;
96252602Spjd		TAILQ_INIT(&rman_head);
97252602Spjd		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
9810087Sjkh	}
9910087Sjkh
100252603Spjd	if (rm->rm_type == RMAN_UNINIT)
1011553Srgrimes		panic("rman_init");
1021553Srgrimes	if (rm->rm_type == RMAN_GAUGE)
103252603Spjd		panic("implement RMAN_GAUGE");
1041553Srgrimes
1051553Srgrimes	TAILQ_INIT(&rm->rm_list);
1061553Srgrimes	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
1071553Srgrimes	if (rm->rm_mtx == 0)
1081553Srgrimes		return ENOMEM;
1091553Srgrimes	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
1101553Srgrimes
1111553Srgrimes	mtx_lock(&rman_mtx);
1121553Srgrimes	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
1131553Srgrimes	mtx_unlock(&rman_mtx);
114252602Spjd	return 0;
1151553Srgrimes}
116252602Spjd
117252602Spjd/*
1181553Srgrimes * NB: this interface is not robust against programming errors which
1191553Srgrimes * add multiple copies of the same region.
1201553Srgrimes */
1211553Srgrimesint
122252602Spjdrman_manage_region(struct rman *rm, u_long start, u_long end)
123201060Sed{
124252603Spjd	struct resource *r, *s;
125252603Spjd
1261553Srgrimes	r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
12799825Salfred	if (r == 0)
1281553Srgrimes		return ENOMEM;
129252602Spjd	r->r_start = start;
130252602Spjd	r->r_end = end;
131252603Spjd	r->r_rm = rm;
132252602Spjd
133252602Spjd	mtx_lock(rm->rm_mtx);
134252602Spjd	for (s = TAILQ_FIRST(&rm->rm_list);
135252603Spjd	     s && s->r_end < r->r_start;
136252602Spjd	     s = TAILQ_NEXT(s, r_link))
13799825Salfred		;
138252602Spjd
1391553Srgrimes	if (s == NULL) {
140252602Spjd		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
141252602Spjd	} else {
142252602Spjd		TAILQ_INSERT_BEFORE(s, r, r_link);
1431553Srgrimes	}
1441553Srgrimes
1451553Srgrimes	mtx_unlock(rm->rm_mtx);
146252602Spjd	return 0;
147252602Spjd}
148252602Spjd
149252602Spjdint
150252602Spjdrman_fini(struct rman *rm)
151252602Spjd{
152252602Spjd	struct resource *r;
153252602Spjd
154252602Spjd	mtx_lock(rm->rm_mtx);
155252602Spjd	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
156252602Spjd		if (r->r_flags & RF_ALLOCATED) {
157252602Spjd			mtx_unlock(rm->rm_mtx);
158252602Spjd			return EBUSY;
159252602Spjd		}
160252602Spjd	}
161252602Spjd
162252602Spjd	/*
163252602Spjd	 * There really should only be one of these if we are in this
164252602Spjd	 * state and the code is working properly, but it can't hurt.
165252602Spjd	 */
166252602Spjd	while (!TAILQ_EMPTY(&rm->rm_list)) {
167252602Spjd		r = TAILQ_FIRST(&rm->rm_list);
168252602Spjd		TAILQ_REMOVE(&rm->rm_list, r, r_link);
169252602Spjd		free(r, M_RMAN);
170252602Spjd	}
171252602Spjd	mtx_unlock(rm->rm_mtx);
172252602Spjd	mtx_lock(&rman_mtx);
173252602Spjd	TAILQ_REMOVE(&rman_head, rm, rm_link);
174252602Spjd	mtx_unlock(&rman_mtx);
1751553Srgrimes	mtx_destroy(rm->rm_mtx);
17699825Salfred	free(rm->rm_mtx, M_RMAN);
1771553Srgrimes
178252602Spjd	return 0;
1791553Srgrimes}
18099825Salfred
18117829Spststruct resource *
18217829Spstrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
1831553Srgrimes		      u_long count, u_long bound,  u_int flags,
184252602Spjd		      struct device *dev)
18530380Scharnier{
18630380Scharnier	u_int	want_activate;
18717829Spst	struct	resource *r, *s, *rv;
18817829Spst	u_long	rstart, rend, amask, bmask;
18917829Spst
190252602Spjd	rv = 0;
191252602Spjd
19210087Sjkh	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
19310087Sjkh	       "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
19410087Sjkh	       flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
195252602Spjd	want_activate = (flags & RF_ACTIVE);
196252602Spjd	flags &= ~RF_ACTIVE;
19710087Sjkh
19810087Sjkh	mtx_lock(rm->rm_mtx);
199252602Spjd
20030380Scharnier	for (r = TAILQ_FIRST(&rm->rm_list);
201252602Spjd	     r && r->r_end < start;
202252602Spjd	     r = TAILQ_NEXT(r, r_link))
203252602Spjd		;
204252602Spjd
20510087Sjkh	if (r == NULL) {
206252602Spjd		DPRINTF(("could not find a region\n"));
20742508Ssteve		goto out;
208252602Spjd	}
20942508Ssteve
210252602Spjd	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
21147963Sbrian	/* If bound is 0, bmask will also be 0 */
212252602Spjd	bmask = ~(bound - 1);
21330380Scharnier	/*
214252602Spjd	 * First try to find an acceptable totally-unshared region.
215252602Spjd	 */
216252602Spjd	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
21710087Sjkh		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
21830380Scharnier		if (s->r_start > end) {
21930380Scharnier			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
2201553Srgrimes			break;
2211553Srgrimes		}
2221553Srgrimes		if (s->r_flags & RF_ALLOCATED) {
22310087Sjkh			DPRINTF(("region is allocated\n"));
224252605Spjd			continue;
22510087Sjkh		}
22610087Sjkh		rstart = ulmax(s->r_start, start);
227117278Scharnier		/*
22810087Sjkh		 * Try to find a region by adjusting to boundary and alignment
22910087Sjkh		 * until both conditions are satisfied. This is not an optimal
2301553Srgrimes		 * algorithm, but in most cases it isn't really bad, either.
23153770Scharnier		 */
2321553Srgrimes		do {
2331553Srgrimes			rstart = (rstart + amask) & ~amask;
2341553Srgrimes			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
2351553Srgrimes				rstart += bound - (rstart & ~bmask);
2361553Srgrimes		} while ((rstart & amask) != 0 && rstart < end &&
2371553Srgrimes		    rstart < s->r_end);
2381553Srgrimes		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
2391553Srgrimes		if (rstart > rend) {
2401553Srgrimes			DPRINTF(("adjusted start exceeds end\n"));
241229403Sed			continue;
2421553Srgrimes		}
243252602Spjd		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
2441553Srgrimes		       rstart, rend, (rend - rstart + 1), count));
2451553Srgrimes
2461553Srgrimes		if ((rend - rstart + 1) >= count) {
2471553Srgrimes			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
2481553Srgrimes			       rend, rstart, (rend - rstart + 1)));
2491553Srgrimes			if ((s->r_end - s->r_start + 1) == count) {
2501553Srgrimes				DPRINTF(("candidate region is entire chunk\n"));
2511553Srgrimes				rv = s;
2521553Srgrimes				rv->r_flags |= RF_ALLOCATED | flags;
25399825Salfred				rv->r_dev = dev;
25499825Salfred				goto out;
25599825Salfred			}
25699825Salfred
25799825Salfred			/*
2581553Srgrimes			 * If s->r_start < rstart and
2591553Srgrimes			 *    s->r_end > rstart + count - 1, then
2601553Srgrimes			 * we need to split the region into three pieces
261220969Ssimon			 * (the middle one will get returned to the user).
262220969Ssimon			 * Otherwise, we are allocating at either the
263220969Ssimon			 * beginning or the end of s, so we only need to
264220969Ssimon			 * split it in two.  The first case requires
265252602Spjd			 * two new allocations; the second requires but one.
266220969Ssimon			 */
267220969Ssimon			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
268220969Ssimon			if (rv == 0)
269220969Ssimon				goto out;
270220969Ssimon			rv->r_start = rstart;
271220969Ssimon			rv->r_end = rstart + count - 1;
272220969Ssimon			rv->r_flags = flags | RF_ALLOCATED;
2731553Srgrimes			rv->r_dev = dev;
2741553Srgrimes			rv->r_rm = rm;
27542508Ssteve
276252603Spjd			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
277252603Spjd				DPRINTF(("splitting region in three parts: "
278252603Spjd				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
279252603Spjd				       s->r_start, rv->r_start - 1,
280252603Spjd				       rv->r_start, rv->r_end,
281252603Spjd				       rv->r_end + 1, s->r_end));
282255227Spjd				/*
283255227Spjd				 * We are allocating in the middle.
284258768Spjd				 */
285255227Spjd				r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
286255227Spjd				if (r == 0) {
287255227Spjd					free(rv, M_RMAN);
288252603Spjd					rv = 0;
289252603Spjd					goto out;
290252603Spjd				}
291252603Spjd				r->r_start = rv->r_end + 1;
29242508Ssteve				r->r_end = s->r_end;
293252603Spjd				r->r_flags = s->r_flags;
294252603Spjd				r->r_rm = rm;
295252603Spjd				s->r_end = rv->r_start - 1;
296252603Spjd				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
297252603Spjd						     r_link);
298252603Spjd				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
299252603Spjd						     r_link);
300252603Spjd			} else if (s->r_start == rv->r_start) {
301252603Spjd				DPRINTF(("allocating from the beginning\n"));
302252603Spjd				/*
303252603Spjd				 * We are allocating at the beginning.
304252603Spjd				 */
305252603Spjd				s->r_start = rv->r_end + 1;
306252603Spjd				TAILQ_INSERT_BEFORE(s, rv, r_link);
307252603Spjd			} else {
308252603Spjd				DPRINTF(("allocating at the end\n"));
309252603Spjd				/*
310252603Spjd				 * We are allocating at the end.
311252603Spjd				 */
312252603Spjd				s->r_end = rv->r_start - 1;
313252603Spjd				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
314252603Spjd						     r_link);
315252603Spjd			}
316252603Spjd			goto out;
317252603Spjd		}
318252603Spjd	}
319252603Spjd
320252603Spjd	/*
321252603Spjd	 * Now find an acceptable shared region, if the client's requirements
322252603Spjd	 * allow sharing.  By our implementation restriction, a candidate
323252603Spjd	 * region must match exactly by both size and sharing type in order
324252603Spjd	 * to be considered compatible with the client's request.  (The
325252603Spjd	 * former restriction could probably be lifted without too much
326252603Spjd	 * additional work, but this does not seem warranted.)
327252603Spjd	 */
328252603Spjd	DPRINTF(("no unshared regions found\n"));
329252603Spjd	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
330252603Spjd		goto out;
331252603Spjd
332252603Spjd	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
333252603Spjd		if (s->r_start > end)
334252603Spjd			break;
335252603Spjd		if ((s->r_flags & flags) != flags)
336254486Spjd			continue;
337254486Spjd		rstart = ulmax(s->r_start, start);
338254486Spjd		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
339252603Spjd		if (s->r_start >= start && s->r_end <= end
340254486Spjd		    && (s->r_end - s->r_start + 1) == count &&
341252603Spjd		    (s->r_start & amask) == 0 &&
342252603Spjd		    ((s->r_start ^ s->r_end) & bmask) == 0) {
343252603Spjd			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
344252603Spjd			if (rv == 0)
345252603Spjd				goto out;
346252603Spjd			rv->r_start = s->r_start;
347252603Spjd			rv->r_end = s->r_end;
348252603Spjd			rv->r_flags = s->r_flags &
349252603Spjd				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
350252603Spjd			rv->r_dev = dev;
351252603Spjd			rv->r_rm = rm;
352252603Spjd			if (s->r_sharehead == 0) {
353255219Spjd				s->r_sharehead = malloc(sizeof *s->r_sharehead,
354252603Spjd						M_RMAN, M_NOWAIT | M_ZERO);
355252605Spjd				if (s->r_sharehead == 0) {
356252603Spjd					free(rv, M_RMAN);
357252603Spjd					rv = 0;
358252603Spjd					goto out;
359252603Spjd				}
360252603Spjd				LIST_INIT(s->r_sharehead);
361252603Spjd				LIST_INSERT_HEAD(s->r_sharehead, s,
362252605Spjd						 r_sharelink);
363252605Spjd				s->r_flags |= RF_FIRSTSHARE;
364252605Spjd			}
365252605Spjd			rv->r_sharehead = s->r_sharehead;
366252605Spjd			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
367255219Spjd			goto out;
368255219Spjd		}
369255219Spjd	}
370252605Spjd
371252605Spjd	/*
372252605Spjd	 * We couldn't find anything.
373252605Spjd	 */
374252605Spjdout:
375252605Spjd	/*
376252605Spjd	 * If the user specified RF_ACTIVE in the initial flags,
3771553Srgrimes	 * which is reflected in `want_activate', we attempt to atomically
378252602Spjd	 * activate the resource.  If this fails, we release the resource
379252602Spjd	 * and indicate overall failure.  (This behavior probably doesn't
3801553Srgrimes	 * make sense for RF_TIMESHARE-type resources.)
3811553Srgrimes	 */
3821553Srgrimes	if (rv && want_activate) {
3831553Srgrimes		struct resource *whohas;
3841553Srgrimes		if (int_rman_activate_resource(rm, rv, &whohas)) {
38541895Sdes			int_rman_release_resource(rm, rv);
38670284Siedowse			rv = 0;
38770284Siedowse		}
3881553Srgrimes	}
3891553Srgrimes
39070284Siedowse	mtx_unlock(rm->rm_mtx);
39170284Siedowse	return (rv);
39270284Siedowse}
39370284Siedowse
39470284Siedowsestruct resource *
3951553Srgrimesrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
3961553Srgrimes		      u_int flags, struct device *dev)
3971553Srgrimes{
3981553Srgrimes
399252602Spjd	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
40070284Siedowse	    dev));
40170284Siedowse}
4021553Srgrimes
4031553Srgrimesstatic int
404252602Spjdint_rman_activate_resource(struct rman *rm, struct resource *r,
4051553Srgrimes			   struct resource **whohas)
4061553Srgrimes{
4071553Srgrimes	struct resource *s;
4081553Srgrimes	int ok;
409252605Spjd
4101553Srgrimes	/*
4111553Srgrimes	 * If we are not timesharing, then there is nothing much to do.
4121553Srgrimes	 * If we already have the resource, then there is nothing at all to do.
4131553Srgrimes	 * If we are not on a sharing list with anybody else, then there is
414255219Spjd	 * little to do.
415255219Spjd	 */
416252605Spjd	if ((r->r_flags & RF_TIMESHARE) == 0
417252605Spjd	    || (r->r_flags & RF_ACTIVE) != 0
418252605Spjd	    || r->r_sharehead == 0) {
4191553Srgrimes		r->r_flags |= RF_ACTIVE;
4201553Srgrimes		return 0;
4211553Srgrimes	}
422252602Spjd
4231553Srgrimes	ok = 1;
424252602Spjd	for (s = LIST_FIRST(r->r_sharehead); s && ok;
4251553Srgrimes	     s = LIST_NEXT(s, r_sharelink)) {
4261553Srgrimes		if ((s->r_flags & RF_ACTIVE) != 0) {
4271553Srgrimes			ok = 0;
4281553Srgrimes			*whohas = s;
4291553Srgrimes		}
4301553Srgrimes	}
4311553Srgrimes	if (ok) {
4321553Srgrimes		r->r_flags |= RF_ACTIVE;
4331553Srgrimes		return 0;
4341553Srgrimes	}
4351553Srgrimes	return EBUSY;
4361553Srgrimes}
4371553Srgrimes
4381553Srgrimesint
43985640Sdillonrman_activate_resource(struct resource *r)
44089572Sdillon{
4411553Srgrimes	int rv;
4421553Srgrimes	struct resource *whohas;
4431553Srgrimes	struct rman *rm;
4441553Srgrimes
4451553Srgrimes	rm = r->r_rm;
446252605Spjd	mtx_lock(rm->rm_mtx);
4471553Srgrimes	rv = int_rman_activate_resource(rm, r, &whohas);
4481553Srgrimes	mtx_unlock(rm->rm_mtx);
44917829Spst	return rv;
450252603Spjd}
45117829Spst
452252603Spjdint
4531553Srgrimesrman_await_resource(struct resource *r, int pri, int timo)
4541553Srgrimes{
455252603Spjd	int	rv;
456252602Spjd	struct	resource *whohas;
457252602Spjd	struct	rman *rm;
458252602Spjd
459252602Spjd	rm = r->r_rm;
4601553Srgrimes	mtx_lock(rm->rm_mtx);
461252603Spjd	for (;;) {
462252603Spjd		rv = int_rman_activate_resource(rm, r, &whohas);
463252603Spjd		if (rv != EBUSY)
464252603Spjd			return (rv);	/* returns with mutex held */
465252603Spjd
466252603Spjd		if (r->r_sharehead == 0)
467252603Spjd			panic("rman_await_resource");
468252603Spjd		whohas->r_flags |= RF_WANTED;
469252603Spjd		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
470252603Spjd		if (rv) {
471252603Spjd			mtx_unlock(rm->rm_mtx);
472252603Spjd			return (rv);
473252603Spjd		}
474252603Spjd	}
475252603Spjd}
476252603Spjd
477252603Spjdstatic int
478252603Spjdint_rman_deactivate_resource(struct resource *r)
479252603Spjd{
480252603Spjd
481252603Spjd	r->r_flags &= ~RF_ACTIVE;
4821553Srgrimes	if (r->r_flags & RF_WANTED) {
483252603Spjd		r->r_flags &= ~RF_WANTED;
484252603Spjd		wakeup(r->r_sharehead);
485252603Spjd	}
486252603Spjd	return 0;
487252603Spjd}
488252603Spjd
489252603Spjdint
490252603Spjdrman_deactivate_resource(struct resource *r)
491252603Spjd{
492252603Spjd	struct	rman *rm;
493252603Spjd
494252603Spjd	rm = r->r_rm;
495252603Spjd	mtx_lock(rm->rm_mtx);
496252603Spjd	int_rman_deactivate_resource(r);
497252603Spjd	mtx_unlock(rm->rm_mtx);
498252603Spjd	return 0;
499252603Spjd}
500252603Spjd
501252603Spjdstatic int
502252603Spjdint_rman_release_resource(struct rman *rm, struct resource *r)
503252603Spjd{
504252603Spjd	struct	resource *s, *t;
505252603Spjd
506252603Spjd	if (r->r_flags & RF_ACTIVE)
507252603Spjd		int_rman_deactivate_resource(r);
508252603Spjd
509252603Spjd	/*
510252603Spjd	 * Check for a sharing list first.  If there is one, then we don't
511252603Spjd	 * have to think as hard.
512252603Spjd	 */
513252603Spjd	if (r->r_sharehead) {
514252603Spjd		/*
515252603Spjd		 * If a sharing list exists, then we know there are at
516252603Spjd		 * least two sharers.
517252603Spjd		 *
518252603Spjd		 * If we are in the main circleq, appoint someone else.
519252603Spjd		 */
520252603Spjd		LIST_REMOVE(r, r_sharelink);
521252603Spjd		s = LIST_FIRST(r->r_sharehead);
522252603Spjd		if (r->r_flags & RF_FIRSTSHARE) {
523252603Spjd			s->r_flags |= RF_FIRSTSHARE;
524252603Spjd			TAILQ_INSERT_BEFORE(r, s, r_link);
525252603Spjd			TAILQ_REMOVE(&rm->rm_list, r, r_link);
526252602Spjd		}
52710087Sjkh
528252602Spjd		/*
529252603Spjd		 * Make sure that the sharing list goes away completely
530252603Spjd		 * if the resource is no longer being shared at all.
531252603Spjd		 */
532252603Spjd		if (LIST_NEXT(s, r_sharelink) == 0) {
533252603Spjd			free(s->r_sharehead, M_RMAN);
534252603Spjd			s->r_sharehead = 0;
535252603Spjd			s->r_flags &= ~RF_FIRSTSHARE;
536252603Spjd		}
537252603Spjd		goto out;
53810087Sjkh	}
5391553Srgrimes
5401553Srgrimes	/*
5411553Srgrimes	 * Look at the adjacent resources in the list and see if our
542252602Spjd	 * segment can be merged with any of them.  If either of the
5431553Srgrimes	 * resources is allocated or is not exactly adjacent then they
5441553Srgrimes	 * cannot be merged with our segment.
5451553Srgrimes	 */
5461553Srgrimes	s = TAILQ_PREV(r, resource_head, r_link);
5471553Srgrimes	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
5481553Srgrimes	    s->r_end + 1 != r->r_start))
5491553Srgrimes		s = NULL;
5501553Srgrimes	t = TAILQ_NEXT(r, r_link);
551312033Sngie	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
5521553Srgrimes	    r->r_end + 1 != t->r_start))
5531553Srgrimes		t = NULL;
5541553Srgrimes
55589572Sdillon	if (s != NULL && t != NULL) {
5561553Srgrimes		/*
5571553Srgrimes		 * Merge all three segments.
5581553Srgrimes		 */
559252602Spjd		s->r_end = t->r_end;
5601553Srgrimes		TAILQ_REMOVE(&rm->rm_list, r, r_link);
561252602Spjd		TAILQ_REMOVE(&rm->rm_list, t, r_link);
56262989Skris		free(t, M_RMAN);
5631553Srgrimes	} else if (s != NULL) {
5641553Srgrimes		/*
5651553Srgrimes		 * Merge previous segment with ours.
5661553Srgrimes		 */
567252602Spjd		s->r_end = r->r_end;
5681553Srgrimes		TAILQ_REMOVE(&rm->rm_list, r, r_link);
569252602Spjd	} else if (t != NULL) {
570252602Spjd		/*
5711553Srgrimes		 * Merge next segment with ours.
5721553Srgrimes		 */
573252602Spjd		t->r_start = r->r_start;
5741553Srgrimes		TAILQ_REMOVE(&rm->rm_list, r, r_link);
5751553Srgrimes	} else {
576252602Spjd		/*
577252602Spjd		 * At this point, we know there is nothing we
578128186Sluigi		 * can potentially merge with, because on each
5791553Srgrimes		 * side, there is either nothing there or what is
5801553Srgrimes		 * there is still allocated.  In that case, we don't
5811553Srgrimes		 * want to remove r from the list; we simply want to
5821553Srgrimes		 * change it to an unallocated region and return
5831553Srgrimes		 * without freeing anything.
5841553Srgrimes		 */
5851553Srgrimes		r->r_flags &= ~RF_ALLOCATED;
5861553Srgrimes		return 0;
587252602Spjd	}
5881553Srgrimes
589252602Spjdout:
590252602Spjd	free(r, M_RMAN);
591252602Spjd	return 0;
5921553Srgrimes}
5931553Srgrimes
594252602Spjdint
5951553Srgrimesrman_release_resource(struct resource *r)
5961553Srgrimes{
5971553Srgrimes	int	rv;
598252602Spjd	struct	rman *rm = r->r_rm;
59910087Sjkh
60010087Sjkh	mtx_lock(rm->rm_mtx);
60110087Sjkh	rv = int_rman_release_resource(rm, r);
60210087Sjkh	mtx_unlock(rm->rm_mtx);
60310087Sjkh	return (rv);
60410087Sjkh}
60510087Sjkh
60610087Sjkhuint32_t
60710087Sjkhrman_make_alignment_flags(uint32_t size)
60810087Sjkh{
60910087Sjkh	int	i;
61099825Salfred
611252602Spjd	/*
61210087Sjkh	 * Find the hightest bit set, and add one if more than one bit
613252602Spjd	 * set.  We're effectively computing the ceil(log2(size)) here.
614252602Spjd	 */
61510087Sjkh	for (i = 31; i > 0; i--)
61610087Sjkh		if ((1 << i) & size)
617252602Spjd			break;
618252602Spjd	if (~(1 << i) & size)
61910087Sjkh		i++;
620252602Spjd
621252602Spjd	return(RF_ALIGNMENT_LOG2(i));
62210087Sjkh}
623252602Spjd
62410087Sjkhu_long
62510087Sjkhrman_get_start(struct resource *r)
6261553Srgrimes{
6271553Srgrimes	return (r->r_start);
6281553Srgrimes}
6291553Srgrimes
6301553Srgrimesu_long
6311553Srgrimesrman_get_end(struct resource *r)
632312033Sngie{
6331553Srgrimes	return (r->r_end);
6341553Srgrimes}
6351553Srgrimes
636312033Sngieu_long
6371553Srgrimesrman_get_size(struct resource *r)
6381553Srgrimes{
6391553Srgrimes	return (r->r_end - r->r_start + 1);
6401553Srgrimes}
6411553Srgrimes
6421553Srgrimesu_int
6431553Srgrimesrman_get_flags(struct resource *r)
6441553Srgrimes{
6451553Srgrimes	return (r->r_flags);
6461553Srgrimes}
6471553Srgrimes
648252602Spjdvoid
6491553Srgrimesrman_set_virtual(struct resource *r, void *v)
650252602Spjd{
651252602Spjd	r->r_virtual = v;
652252602Spjd}
653252602Spjd
654252602Spjdvoid *
6551553Srgrimesrman_get_virtual(struct resource *r)
6561553Srgrimes{
6571553Srgrimes	return (r->r_virtual);
6581553Srgrimes}
6591553Srgrimes
660252602Spjdvoid
6611553Srgrimesrman_set_bustag(struct resource *r, bus_space_tag_t t)
662252602Spjd{
663252602Spjd	r->r_bustag = t;
664252602Spjd}
665252602Spjd
6661553Srgrimesbus_space_tag_t
6671553Srgrimesrman_get_bustag(struct resource *r)
6681553Srgrimes{
669252602Spjd	return (r->r_bustag);
6701553Srgrimes}
671252602Spjd
672252602Spjdvoid
6731553Srgrimesrman_set_bushandle(struct resource *r, bus_space_handle_t h)
674252602Spjd{
675252602Spjd	r->r_bushandle = h;
6761553Srgrimes}
6771553Srgrimes
6781553Srgrimesbus_space_handle_t
679252602Spjdrman_get_bushandle(struct resource *r)
6801553Srgrimes{
6811553Srgrimes	return (r->r_bushandle);
6821553Srgrimes}
6831553Srgrimes
6841553Srgrimesvoid
6851553Srgrimesrman_set_rid(struct resource *r, int rid)
6861553Srgrimes{
68710087Sjkh	r->r_rid = rid;
68810087Sjkh}
68910087Sjkh
690252602Spjdvoid
691252602Spjdrman_set_start(struct resource *r, u_long start)
69210087Sjkh{
69310087Sjkh	r->r_start = start;
69410087Sjkh}
695252602Spjd
69610087Sjkhvoid
69710087Sjkhrman_set_end(struct resource *r, u_long end)
698252602Spjd{
69910087Sjkh	r->r_end = end;
700252602Spjd}
70110087Sjkh
70210087Sjkhint
70310087Sjkhrman_get_rid(struct resource *r)
70410087Sjkh{
70510087Sjkh	return (r->r_rid);
70610087Sjkh}
707252602Spjd
70810087Sjkhstruct device *
70910087Sjkhrman_get_device(struct resource *r)
71010087Sjkh{
71110087Sjkh	return (r->r_dev);
7121553Srgrimes}
7131553Srgrimes