subr_rman.c revision 71576
138889Sjdp/*
233965Sjdp * Copyright 1998 Massachusetts Institute of Technology
338889Sjdp *
438889Sjdp * Permission to use, copy, modify, and distribute this software and
538889Sjdp * its documentation for any purpose and without fee is hereby
638889Sjdp * granted, provided that both the above copyright notice and this
733965Sjdp * permission notice appear in all copies, that both the above
833965Sjdp * copyright notice and this permission notice appear in all
938889Sjdp * supporting documentation, and that the name of M.I.T. not be used
1038889Sjdp * in advertising or publicity pertaining to distribution of the
1138889Sjdp * software without specific, written prior permission.  M.I.T. makes
1233965Sjdp * no representations about the suitability of this software for any
1338889Sjdp * purpose.  It is provided "as is" without express or implied
1438889Sjdp * warranty.
1538889Sjdp *
1638889Sjdp * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1738889Sjdp * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1833965Sjdp * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1933965Sjdp * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2038889Sjdp * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2133965Sjdp * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2233965Sjdp * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2338889Sjdp * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2438889Sjdp * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2538889Sjdp * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2638889Sjdp * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2738889Sjdp * SUCH DAMAGE.
2838889Sjdp *
2933965Sjdp * $FreeBSD: head/sys/kern/subr_rman.c 71576 2001-01-24 12:35:55Z jasone $
3038889Sjdp */
3133965Sjdp
3233965Sjdp/*
3338889Sjdp * The kernel resource manager.  This code is responsible for keeping track
3433965Sjdp * of hardware resources which are apportioned out to various drivers.
3538889Sjdp * It does not actually assign those resources, and it is not expected
3638889Sjdp * that end-device drivers will call into this code directly.  Rather,
3738889Sjdp * the code which implements the buses that those devices are attached to,
3833965Sjdp * and the code which manages CPU resources, will call this code, and the
3938889Sjdp * end-device drivers will make upcalls to that code to actually perform
4038889Sjdp * the allocation.
4138889Sjdp *
4238889Sjdp * There are two sorts of resources managed by this code.  The first is
4338889Sjdp * the more familiar array (RMAN_ARRAY) type; resources in this class
4438889Sjdp * consist of a sequence of individually-allocatable objects which have
4538889Sjdp * been numbered in some well-defined order.  Most of the resources
4638889Sjdp * are of this type, as it is the most familiar.  The second type is
4733965Sjdp * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4833965Sjdp * resources in which each instance is indistinguishable from every
4938889Sjdp * other instance).  The principal anticipated application of gauges
5038889Sjdp * is in the context of power consumption, where a bus may have a specific
5133965Sjdp * power budget which all attached devices share.  RMAN_GAUGE is not
5238889Sjdp * implemented yet.
5338889Sjdp *
5438889Sjdp * For array resources, we make one simplifying assumption: two clients
5538889Sjdp * sharing the same resource must use the same range of indices.  That
5638889Sjdp * is to say, sharing of overlapping-but-not-identical regions is not
5738889Sjdp * permitted.
5838889Sjdp */
5938889Sjdp
6038889Sjdp#include <sys/param.h>
6138889Sjdp#include <sys/systm.h>
6238889Sjdp#include <sys/kernel.h>
6338889Sjdp#include <sys/lock.h>
6438889Sjdp#include <sys/malloc.h>
6538889Sjdp#include <sys/mutex.h>
6638889Sjdp#include <sys/bus.h>		/* XXX debugging */
6738889Sjdp#include <machine/bus.h>
6838889Sjdp#include <sys/rman.h>
6933965Sjdp
7038889Sjdp#ifdef RMAN_DEBUG
7138889Sjdp#define DPRINTF(params) printf##params
7238889Sjdp#else
7338889Sjdp#define DPRINTF(params)
7438889Sjdp#endif
7538889Sjdp
7638889Sjdpstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
7738889Sjdp
7838889Sjdpstruct	rman_head rman_head;
7938889Sjdpstatic	struct mtx rman_mtx; /* mutex to protect rman_head */
8038889Sjdpstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
8138889Sjdp				       struct resource **whohas);
8238889Sjdpstatic	int int_rman_deactivate_resource(struct resource *r);
8338889Sjdpstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
8438889Sjdp
8538889Sjdpint
8638889Sjdprman_init(struct rman *rm)
8738889Sjdp{
8838889Sjdp	static int once;
8933965Sjdp
9038889Sjdp	if (once == 0) {
9133965Sjdp		once = 1;
9238889Sjdp		TAILQ_INIT(&rman_head);
9338889Sjdp		mtx_init(&rman_mtx, "rman head", MTX_DEF);
9433965Sjdp	}
9533965Sjdp
9633965Sjdp	if (rm->rm_type == RMAN_UNINIT)
9733965Sjdp		panic("rman_init");
9833965Sjdp	if (rm->rm_type == RMAN_GAUGE)
9933965Sjdp		panic("implement RMAN_GAUGE");
10033965Sjdp
10133965Sjdp	TAILQ_INIT(&rm->rm_list);
10238889Sjdp	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT);
10333965Sjdp	if (rm->rm_mtx == 0)
10433965Sjdp		return ENOMEM;
10533965Sjdp	mtx_init(rm->rm_mtx, "rman", MTX_DEF);
10633965Sjdp
10733965Sjdp	mtx_enter(&rman_mtx, MTX_DEF);
10838889Sjdp	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
10933965Sjdp	mtx_exit(&rman_mtx, MTX_DEF);
11033965Sjdp	return 0;
11133965Sjdp}
11233965Sjdp
11333965Sjdp/*
11433965Sjdp * NB: this interface is not robust against programming errors which
11533965Sjdp * add multiple copies of the same region.
11633965Sjdp */
11733965Sjdpint
11833965Sjdprman_manage_region(struct rman *rm, u_long start, u_long end)
11938889Sjdp{
12033965Sjdp	struct resource *r, *s;
12133965Sjdp
12233965Sjdp	r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
12338889Sjdp	if (r == 0)
12438889Sjdp		return ENOMEM;
12533965Sjdp	r->r_sharehead = 0;
12638889Sjdp	r->r_start = start;
12733965Sjdp	r->r_end = end;
12838889Sjdp	r->r_flags = 0;
12938889Sjdp	r->r_dev = 0;
13038889Sjdp	r->r_rm = rm;
13138889Sjdp
13238889Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
13338889Sjdp	for (s = TAILQ_FIRST(&rm->rm_list);
13433965Sjdp	     s && s->r_end < r->r_start;
13538889Sjdp	     s = TAILQ_NEXT(s, r_link))
13633965Sjdp		;
13733965Sjdp
13833965Sjdp	if (s == NULL) {
13933965Sjdp		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
14038889Sjdp	} else {
14133965Sjdp		TAILQ_INSERT_BEFORE(s, r, r_link);
14233965Sjdp	}
14338889Sjdp
14433965Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
14538889Sjdp	return 0;
14638889Sjdp}
14733965Sjdp
14833965Sjdpint
14933965Sjdprman_fini(struct rman *rm)
15033965Sjdp{
15133965Sjdp	struct resource *r;
15233965Sjdp
15333965Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
15438889Sjdp	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
15538889Sjdp		if (r->r_flags & RF_ALLOCATED) {
15633965Sjdp			mtx_exit(rm->rm_mtx, MTX_DEF);
15733965Sjdp			return EBUSY;
15833965Sjdp		}
15938889Sjdp	}
16033965Sjdp
16138889Sjdp	/*
16238889Sjdp	 * There really should only be one of these if we are in this
16333965Sjdp	 * state and the code is working properly, but it can't hurt.
16433965Sjdp	 */
16538889Sjdp	while (!TAILQ_EMPTY(&rm->rm_list)) {
16633965Sjdp		r = TAILQ_FIRST(&rm->rm_list);
16738889Sjdp		TAILQ_REMOVE(&rm->rm_list, r, r_link);
16833965Sjdp		free(r, M_RMAN);
16938889Sjdp	}
17033965Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
17138889Sjdp	mtx_enter(&rman_mtx, MTX_DEF);
17233965Sjdp	TAILQ_REMOVE(&rman_head, rm, rm_link);
17333965Sjdp	mtx_exit(&rman_mtx, MTX_DEF);
17433965Sjdp	mtx_destroy(rm->rm_mtx);
17533965Sjdp	free(rm->rm_mtx, M_RMAN);
17633965Sjdp
17733965Sjdp	return 0;
17833965Sjdp}
17933965Sjdp
18033965Sjdpstruct resource *
18133965Sjdprman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
18233965Sjdp		      u_int flags, struct device *dev)
18333965Sjdp{
18433965Sjdp	u_int	want_activate;
18533965Sjdp	struct	resource *r, *s, *rv;
18633965Sjdp	u_long	rstart, rend;
18733965Sjdp
18833965Sjdp	rv = 0;
18933965Sjdp
19033965Sjdp	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
19138889Sjdp	       "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end,
19233965Sjdp	       count, flags, device_get_name(dev), device_get_unit(dev)));
19333965Sjdp	want_activate = (flags & RF_ACTIVE);
19433965Sjdp	flags &= ~RF_ACTIVE;
19538889Sjdp
19633965Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
19738889Sjdp
19833965Sjdp	for (r = TAILQ_FIRST(&rm->rm_list);
19938889Sjdp	     r && r->r_end < start;
20033965Sjdp	     r = TAILQ_NEXT(r, r_link))
20138889Sjdp		;
20233965Sjdp
20338889Sjdp	if (r == NULL) {
20433965Sjdp		DPRINTF(("could not find a region\n"));
20538889Sjdp		goto out;
20633965Sjdp	}
20738889Sjdp
20833965Sjdp	/*
20938889Sjdp	 * First try to find an acceptable totally-unshared region.
21038889Sjdp	 */
21133965Sjdp	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
21238889Sjdp		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
21338889Sjdp		if (s->r_start > end) {
21433965Sjdp			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
21538889Sjdp			break;
21638889Sjdp		}
21733965Sjdp		if (s->r_flags & RF_ALLOCATED) {
21838889Sjdp			DPRINTF(("region is allocated\n"));
21938889Sjdp			continue;
22033965Sjdp		}
22138889Sjdp		rstart = max(s->r_start, start);
22233965Sjdp		rstart = (rstart + ((1ul << RF_ALIGNMENT(flags))) - 1) &
22338889Sjdp		    ~((1ul << RF_ALIGNMENT(flags)) - 1);
22433965Sjdp		rend = min(s->r_end, max(rstart + count, end));
22538889Sjdp		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
22638889Sjdp		       rstart, rend, (rend - rstart + 1), count));
22733965Sjdp
22838889Sjdp		if ((rend - rstart + 1) >= count) {
22933965Sjdp			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
23038889Sjdp			       rend, rstart, (rend - rstart + 1)));
23133965Sjdp			if ((s->r_end - s->r_start + 1) == count) {
23238889Sjdp				DPRINTF(("candidate region is entire chunk\n"));
23333965Sjdp				rv = s;
23438889Sjdp				rv->r_flags |= RF_ALLOCATED | flags;
23538889Sjdp				rv->r_dev = dev;
23638889Sjdp				goto out;
23733965Sjdp			}
23838889Sjdp
23938889Sjdp			/*
24033965Sjdp			 * If s->r_start < rstart and
24138889Sjdp			 *    s->r_end > rstart + count - 1, then
24238889Sjdp			 * we need to split the region into three pieces
24333965Sjdp			 * (the middle one will get returned to the user).
24438889Sjdp			 * Otherwise, we are allocating at either the
24538889Sjdp			 * beginning or the end of s, so we only need to
24633965Sjdp			 * split it in two.  The first case requires
24738889Sjdp			 * two new allocations; the second requires but one.
24838889Sjdp			 */
24938889Sjdp			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
25038889Sjdp			if (rv == 0)
25138889Sjdp				goto out;
25238889Sjdp			rv->r_start = rstart;
25338889Sjdp			rv->r_end = rstart + count - 1;
25438889Sjdp			rv->r_flags = flags | RF_ALLOCATED;
25538889Sjdp			rv->r_dev = dev;
25638889Sjdp			rv->r_sharehead = 0;
25738889Sjdp			rv->r_rm = rm;
25838889Sjdp
25938889Sjdp			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
26038889Sjdp				DPRINTF(("splitting region in three parts: "
26138889Sjdp				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
26238889Sjdp				       s->r_start, rv->r_start - 1,
26338889Sjdp				       rv->r_start, rv->r_end,
26438889Sjdp				       rv->r_end + 1, s->r_end));
26538889Sjdp				/*
26638889Sjdp				 * We are allocating in the middle.
26738889Sjdp				 */
26838889Sjdp				r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
26938889Sjdp				if (r == 0) {
27038889Sjdp					free(rv, M_RMAN);
27138889Sjdp					rv = 0;
27238889Sjdp					goto out;
27338889Sjdp				}
27438889Sjdp				r->r_start = rv->r_end + 1;
27538889Sjdp				r->r_end = s->r_end;
27638889Sjdp				r->r_flags = s->r_flags;
27738889Sjdp				r->r_dev = 0;
27838889Sjdp				r->r_sharehead = 0;
27938889Sjdp				r->r_rm = rm;
28038889Sjdp				s->r_end = rv->r_start - 1;
28138889Sjdp				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
28238889Sjdp						     r_link);
28338889Sjdp				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
28438889Sjdp						     r_link);
28538889Sjdp			} else if (s->r_start == rv->r_start) {
28638889Sjdp				DPRINTF(("allocating from the beginning\n"));
28738889Sjdp				/*
28838889Sjdp				 * We are allocating at the beginning.
28938889Sjdp				 */
29038889Sjdp				s->r_start = rv->r_end + 1;
29138889Sjdp				TAILQ_INSERT_BEFORE(s, rv, r_link);
29238889Sjdp			} else {
29338889Sjdp				DPRINTF(("allocating at the end\n"));
29438889Sjdp				/*
29538889Sjdp				 * We are allocating at the end.
29638889Sjdp				 */
29738889Sjdp				s->r_end = rv->r_start - 1;
29838889Sjdp				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
29938889Sjdp						     r_link);
30038889Sjdp			}
30138889Sjdp			goto out;
30238889Sjdp		}
30338889Sjdp	}
30438889Sjdp
30538889Sjdp	/*
30638889Sjdp	 * Now find an acceptable shared region, if the client's requirements
30738889Sjdp	 * allow sharing.  By our implementation restriction, a candidate
30838889Sjdp	 * region must match exactly by both size and sharing type in order
30938889Sjdp	 * to be considered compatible with the client's request.  (The
31038889Sjdp	 * former restriction could probably be lifted without too much
31138889Sjdp	 * additional work, but this does not seem warranted.)
31238889Sjdp	 */
31338889Sjdp	DPRINTF(("no unshared regions found\n"));
31438889Sjdp	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
31538889Sjdp		goto out;
31638889Sjdp
31738889Sjdp	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
31838889Sjdp		if (s->r_start > end)
31938889Sjdp			break;
32038889Sjdp		if ((s->r_flags & flags) != flags)
32138889Sjdp			continue;
32238889Sjdp		rstart = max(s->r_start, start);
32338889Sjdp		rend = min(s->r_end, max(start + count, end));
32438889Sjdp		if (s->r_start >= start && s->r_end <= end
32538889Sjdp		    && (s->r_end - s->r_start + 1) == count) {
32638889Sjdp			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
32738889Sjdp			if (rv == 0)
32838889Sjdp				goto out;
32938889Sjdp			rv->r_start = s->r_start;
33038889Sjdp			rv->r_end = s->r_end;
33138889Sjdp			rv->r_flags = s->r_flags &
33238889Sjdp				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
33338889Sjdp			rv->r_dev = dev;
33438889Sjdp			rv->r_rm = rm;
33538889Sjdp			if (s->r_sharehead == 0) {
33638889Sjdp				s->r_sharehead = malloc(sizeof *s->r_sharehead,
33738889Sjdp						M_RMAN, M_NOWAIT | M_ZERO);
33838889Sjdp				if (s->r_sharehead == 0) {
33938889Sjdp					free(rv, M_RMAN);
34038889Sjdp					rv = 0;
34138889Sjdp					goto out;
34238889Sjdp				}
34338889Sjdp				LIST_INIT(s->r_sharehead);
34438889Sjdp				LIST_INSERT_HEAD(s->r_sharehead, s,
34538889Sjdp						 r_sharelink);
34638889Sjdp				s->r_flags |= RF_FIRSTSHARE;
34738889Sjdp			}
34838889Sjdp			rv->r_sharehead = s->r_sharehead;
34938889Sjdp			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
35038889Sjdp			goto out;
35138889Sjdp		}
35238889Sjdp	}
35338889Sjdp
35438889Sjdp	/*
35538889Sjdp	 * We couldn't find anything.
35638889Sjdp	 */
35738889Sjdpout:
35838889Sjdp	/*
35938889Sjdp	 * If the user specified RF_ACTIVE in the initial flags,
36038889Sjdp	 * which is reflected in `want_activate', we attempt to atomically
36138889Sjdp	 * activate the resource.  If this fails, we release the resource
36238889Sjdp	 * and indicate overall failure.  (This behavior probably doesn't
36338889Sjdp	 * make sense for RF_TIMESHARE-type resources.)
36438889Sjdp	 */
36538889Sjdp	if (rv && want_activate) {
36638889Sjdp		struct resource *whohas;
36738889Sjdp		if (int_rman_activate_resource(rm, rv, &whohas)) {
36838889Sjdp			int_rman_release_resource(rm, rv);
36938889Sjdp			rv = 0;
37038889Sjdp		}
37138889Sjdp	}
37238889Sjdp
37338889Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
37438889Sjdp	return (rv);
37538889Sjdp}
37638889Sjdp
37738889Sjdpstatic int
37838889Sjdpint_rman_activate_resource(struct rman *rm, struct resource *r,
37938889Sjdp			   struct resource **whohas)
38038889Sjdp{
38138889Sjdp	struct resource *s;
38238889Sjdp	int ok;
38338889Sjdp
38438889Sjdp	/*
38538889Sjdp	 * If we are not timesharing, then there is nothing much to do.
38638889Sjdp	 * If we already have the resource, then there is nothing at all to do.
38738889Sjdp	 * If we are not on a sharing list with anybody else, then there is
38838889Sjdp	 * little to do.
38938889Sjdp	 */
39038889Sjdp	if ((r->r_flags & RF_TIMESHARE) == 0
39138889Sjdp	    || (r->r_flags & RF_ACTIVE) != 0
39238889Sjdp	    || r->r_sharehead == 0) {
39338889Sjdp		r->r_flags |= RF_ACTIVE;
39438889Sjdp		return 0;
39538889Sjdp	}
39638889Sjdp
39738889Sjdp	ok = 1;
39838889Sjdp	for (s = LIST_FIRST(r->r_sharehead); s && ok;
39938889Sjdp	     s = LIST_NEXT(s, r_sharelink)) {
40038889Sjdp		if ((s->r_flags & RF_ACTIVE) != 0) {
40138889Sjdp			ok = 0;
40238889Sjdp			*whohas = s;
40338889Sjdp		}
40438889Sjdp	}
40538889Sjdp	if (ok) {
40638889Sjdp		r->r_flags |= RF_ACTIVE;
40738889Sjdp		return 0;
40838889Sjdp	}
40938889Sjdp	return EBUSY;
41038889Sjdp}
41138889Sjdp
41238889Sjdpint
41338889Sjdprman_activate_resource(struct resource *r)
41438889Sjdp{
41538889Sjdp	int rv;
41638889Sjdp	struct resource *whohas;
41738889Sjdp	struct rman *rm;
41838889Sjdp
41938889Sjdp	rm = r->r_rm;
42038889Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
42138889Sjdp	rv = int_rman_activate_resource(rm, r, &whohas);
42238889Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
42338889Sjdp	return rv;
42438889Sjdp}
42538889Sjdp
42638889Sjdpint
42738889Sjdprman_await_resource(struct resource *r, int pri, int timo)
42838889Sjdp{
42938889Sjdp	int	rv, s;
43038889Sjdp	struct	resource *whohas;
43138889Sjdp	struct	rman *rm;
43238889Sjdp
43338889Sjdp	rm = r->r_rm;
43438889Sjdp	for (;;) {
43538889Sjdp		mtx_enter(rm->rm_mtx, MTX_DEF);
43638889Sjdp		rv = int_rman_activate_resource(rm, r, &whohas);
43738889Sjdp		if (rv != EBUSY)
43838889Sjdp			return (rv);	/* returns with mutex held */
43938889Sjdp
44038889Sjdp		if (r->r_sharehead == 0)
44138889Sjdp			panic("rman_await_resource");
44238889Sjdp		/*
44338889Sjdp		 * splhigh hopefully will prevent a race between
44438889Sjdp		 * mtx_exit and tsleep where a process
44538889Sjdp		 * could conceivably get in and release the resource
44638889Sjdp		 * before we have a chance to sleep on it.
44738889Sjdp		 */
44838889Sjdp		s = splhigh();
44938889Sjdp		whohas->r_flags |= RF_WANTED;
45038889Sjdp		mtx_exit(rm->rm_mtx, MTX_DEF);
45138889Sjdp		rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
45238889Sjdp		if (rv) {
45338889Sjdp			splx(s);
45438889Sjdp			return rv;
45538889Sjdp		}
45638889Sjdp		mtx_enter(rm->rm_mtx, MTX_DEF);
45738889Sjdp		splx(s);
45838889Sjdp	}
45938889Sjdp}
46038889Sjdp
46138889Sjdpstatic int
46238889Sjdpint_rman_deactivate_resource(struct resource *r)
46338889Sjdp{
46438889Sjdp	struct	rman *rm;
46538889Sjdp
46638889Sjdp	rm = r->r_rm;
46738889Sjdp	r->r_flags &= ~RF_ACTIVE;
46838889Sjdp	if (r->r_flags & RF_WANTED) {
46938889Sjdp		r->r_flags &= ~RF_WANTED;
47038889Sjdp		wakeup(r->r_sharehead);
47138889Sjdp	}
47238889Sjdp	return 0;
47338889Sjdp}
47438889Sjdp
47538889Sjdpint
47638889Sjdprman_deactivate_resource(struct resource *r)
47738889Sjdp{
47838889Sjdp	struct	rman *rm;
47938889Sjdp
48038889Sjdp	rm = r->r_rm;
48138889Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
48238889Sjdp	int_rman_deactivate_resource(r);
48338889Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
48438889Sjdp	return 0;
48538889Sjdp}
48638889Sjdp
48738889Sjdpstatic int
48838889Sjdpint_rman_release_resource(struct rman *rm, struct resource *r)
48938889Sjdp{
49038889Sjdp	struct	resource *s, *t;
49138889Sjdp
49238889Sjdp	if (r->r_flags & RF_ACTIVE)
49338889Sjdp		int_rman_deactivate_resource(r);
49438889Sjdp
49538889Sjdp	/*
49638889Sjdp	 * Check for a sharing list first.  If there is one, then we don't
49738889Sjdp	 * have to think as hard.
49838889Sjdp	 */
49938889Sjdp	if (r->r_sharehead) {
50038889Sjdp		/*
50138889Sjdp		 * If a sharing list exists, then we know there are at
50238889Sjdp		 * least two sharers.
50338889Sjdp		 *
50438889Sjdp		 * If we are in the main circleq, appoint someone else.
50538889Sjdp		 */
50638889Sjdp		LIST_REMOVE(r, r_sharelink);
50738889Sjdp		s = LIST_FIRST(r->r_sharehead);
50838889Sjdp		if (r->r_flags & RF_FIRSTSHARE) {
50938889Sjdp			s->r_flags |= RF_FIRSTSHARE;
51038889Sjdp			TAILQ_INSERT_BEFORE(r, s, r_link);
51138889Sjdp			TAILQ_REMOVE(&rm->rm_list, r, r_link);
51238889Sjdp		}
51338889Sjdp
51438889Sjdp		/*
51538889Sjdp		 * Make sure that the sharing list goes away completely
51638889Sjdp		 * if the resource is no longer being shared at all.
51738889Sjdp		 */
51838889Sjdp		if (LIST_NEXT(s, r_sharelink) == 0) {
51938889Sjdp			free(s->r_sharehead, M_RMAN);
52038889Sjdp			s->r_sharehead = 0;
52138889Sjdp			s->r_flags &= ~RF_FIRSTSHARE;
52238889Sjdp		}
52338889Sjdp		goto out;
52438889Sjdp	}
52538889Sjdp
52638889Sjdp	/*
52738889Sjdp	 * Look at the adjacent resources in the list and see if our
52838889Sjdp	 * segment can be merged with any of them.
52938889Sjdp	 */
53038889Sjdp	s = TAILQ_PREV(r, resource_head, r_link);
53138889Sjdp	t = TAILQ_NEXT(r, r_link);
53238889Sjdp
53338889Sjdp	if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
53438889Sjdp	    && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
53538889Sjdp		/*
53638889Sjdp		 * Merge all three segments.
53738889Sjdp		 */
53833965Sjdp		s->r_end = t->r_end;
53938889Sjdp		TAILQ_REMOVE(&rm->rm_list, r, r_link);
54038889Sjdp		TAILQ_REMOVE(&rm->rm_list, t, r_link);
54133965Sjdp		free(t, M_RMAN);
54238889Sjdp	} else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
54338889Sjdp		/*
54433965Sjdp		 * Merge previous segment with ours.
54538889Sjdp		 */
54638889Sjdp		s->r_end = r->r_end;
54733965Sjdp		TAILQ_REMOVE(&rm->rm_list, r, r_link);
54833965Sjdp	} else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
54938889Sjdp		/*
55033965Sjdp		 * Merge next segment with ours.
55138889Sjdp		 */
55238889Sjdp		t->r_start = r->r_start;
55338889Sjdp		TAILQ_REMOVE(&rm->rm_list, r, r_link);
55433965Sjdp	} else {
55538889Sjdp		/*
55638889Sjdp		 * At this point, we know there is nothing we
55738889Sjdp		 * can potentially merge with, because on each
55833965Sjdp		 * side, there is either nothing there or what is
55938889Sjdp		 * there is still allocated.  In that case, we don't
56038889Sjdp		 * want to remove r from the list; we simply want to
56138889Sjdp		 * change it to an unallocated region and return
56233965Sjdp		 * without freeing anything.
56338889Sjdp		 */
56438889Sjdp		r->r_flags &= ~RF_ALLOCATED;
56538889Sjdp		return 0;
56633965Sjdp	}
56738889Sjdp
56838889Sjdpout:
56938889Sjdp	free(r, M_RMAN);
57033965Sjdp	return 0;
57138889Sjdp}
57238889Sjdp
57338889Sjdpint
57438889Sjdprman_release_resource(struct resource *r)
57538889Sjdp{
57633965Sjdp	int	rv;
57738889Sjdp	struct	rman *rm = r->r_rm;
57838889Sjdp
57938889Sjdp	mtx_enter(rm->rm_mtx, MTX_DEF);
58038889Sjdp	rv = int_rman_release_resource(rm, r);
58138889Sjdp	mtx_exit(rm->rm_mtx, MTX_DEF);
58238889Sjdp	return (rv);
58338889Sjdp}
58438889Sjdp
58538889Sjdpuint32_t
58638889Sjdprman_make_alignment_flags(uint32_t size)
58738889Sjdp{
58838889Sjdp	int	i;
58938889Sjdp
59038889Sjdp	/*
59138889Sjdp	 * Find the hightest bit set, and add one if more than one bit
59238889Sjdp	 * set.  We're effectively computing the ceil(log2(size)) here.
59338889Sjdp	 */
59438889Sjdp	for (i = 32; i > 0; i--)
59538889Sjdp		if ((1 << i) & size)
59633965Sjdp			break;
59738889Sjdp	if (~(1 << i) & size)
59838889Sjdp		i++;
59938889Sjdp
60038889Sjdp	return(RF_ALIGNMENT_LOG2(i));
60138889Sjdp}
60238889Sjdp