subr_rman.c revision 69781
140711Swollman/*
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman *
2950477Speter * $FreeBSD: head/sys/kern/subr_rman.c 69781 2000-12-08 21:51:06Z dwmalone $
3040711Swollman */
3140711Swollman
3240711Swollman/*
3340711Swollman * The kernel resource manager.  This code is responsible for keeping track
3440711Swollman * of hardware resources which are apportioned out to various drivers.
3540711Swollman * It does not actually assign those resources, and it is not expected
3640711Swollman * that end-device drivers will call into this code directly.  Rather,
3740711Swollman * the code which implements the buses that those devices are attached to,
3840711Swollman * and the code which manages CPU resources, will call this code, and the
3940711Swollman * end-device drivers will make upcalls to that code to actually perform
4040711Swollman * the allocation.
4140711Swollman *
4240711Swollman * There are two sorts of resources managed by this code.  The first is
4340711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4440711Swollman * consist of a sequence of individually-allocatable objects which have
4540711Swollman * been numbered in some well-defined order.  Most of the resources
4640711Swollman * are of this type, as it is the most familiar.  The second type is
4740711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4840711Swollman * resources in which each instance is indistinguishable from every
4940711Swollman * other instance).  The principal anticipated application of gauges
5040711Swollman * is in the context of power consumption, where a bus may have a specific
5140711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5240711Swollman * implemented yet.
5340711Swollman *
5440711Swollman * For array resources, we make one simplifying assumption: two clients
5540711Swollman * sharing the same resource must use the same range of indices.  That
5640711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5740711Swollman * permitted.
5840711Swollman */
5940711Swollman
6040711Swollman#include <sys/param.h>
6140711Swollman#include <sys/systm.h>
6241304Sbde#include <sys/kernel.h>
6340711Swollman#include <sys/lock.h>
6440711Swollman#include <sys/malloc.h>
6545720Speter#include <sys/bus.h>		/* XXX debugging */
6645720Speter#include <machine/bus.h>
6740711Swollman#include <sys/rman.h>
6840711Swollman
6959910Spaul#ifdef RMAN_DEBUG
7059910Spaul#define DPRINTF(params) printf##params
7159910Spaul#else
7259910Spaul#define DPRINTF(params)
7359910Spaul#endif
7459910Spaul
7545569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
7640711Swollman
7740711Swollmanstruct	rman_head rman_head;
7842248Sbde#ifndef NULL_SIMPLELOCKS
7940711Swollmanstatic	struct simplelock rman_lock; /* mutex to protect rman_head */
8042248Sbde#endif
8140711Swollmanstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
8240711Swollman				       struct resource **whohas);
8345720Speterstatic	int int_rman_deactivate_resource(struct resource *r);
8440711Swollmanstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
8540711Swollman
8640711Swollmanint
8740711Swollmanrman_init(struct rman *rm)
8840711Swollman{
8940711Swollman	static int once;
9040711Swollman
9140711Swollman	if (once == 0) {
9240711Swollman		once = 1;
9340711Swollman		TAILQ_INIT(&rman_head);
9440711Swollman		simple_lock_init(&rman_lock);
9540711Swollman	}
9640711Swollman
9740711Swollman	if (rm->rm_type == RMAN_UNINIT)
9840711Swollman		panic("rman_init");
9940711Swollman	if (rm->rm_type == RMAN_GAUGE)
10040711Swollman		panic("implement RMAN_GAUGE");
10140711Swollman
10268727Smckusick	TAILQ_INIT(&rm->rm_list);
10340711Swollman	rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
10440711Swollman	if (rm->rm_slock == 0)
10540711Swollman		return ENOMEM;
10640711Swollman	simple_lock_init(rm->rm_slock);
10740711Swollman
10840711Swollman	simple_lock(&rman_lock);
10940711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
11040711Swollman	simple_unlock(&rman_lock);
11140711Swollman	return 0;
11240711Swollman}
11340711Swollman
11440711Swollman/*
11540711Swollman * NB: this interface is not robust against programming errors which
11640711Swollman * add multiple copies of the same region.
11740711Swollman */
11840711Swollmanint
11940711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
12040711Swollman{
12140711Swollman	struct resource *r, *s;
12240711Swollman
12369781Sdwmalone	r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
12440711Swollman	if (r == 0)
12540711Swollman		return ENOMEM;
12640711Swollman	r->r_sharehead = 0;
12740711Swollman	r->r_start = start;
12840711Swollman	r->r_end = end;
12940711Swollman	r->r_flags = 0;
13040711Swollman	r->r_dev = 0;
13140711Swollman	r->r_rm = rm;
13240711Swollman
13340711Swollman	simple_lock(rm->rm_slock);
13468727Smckusick	for (s = TAILQ_FIRST(&rm->rm_list);
13568727Smckusick	     s && s->r_end < r->r_start;
13668727Smckusick	     s = TAILQ_NEXT(s, r_link))
13740711Swollman		;
13840711Swollman
13968727Smckusick	if (s == NULL) {
14068727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
14140711Swollman	} else {
14268727Smckusick		TAILQ_INSERT_BEFORE(s, r, r_link);
14340711Swollman	}
14440711Swollman
14540711Swollman	simple_unlock(rm->rm_slock);
14640711Swollman	return 0;
14740711Swollman}
14840711Swollman
14940711Swollmanint
15040711Swollmanrman_fini(struct rman *rm)
15140711Swollman{
15240711Swollman	struct resource *r;
15340711Swollman
15440711Swollman	simple_lock(rm->rm_slock);
15568727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
15645720Speter		if (r->r_flags & RF_ALLOCATED) {
15745720Speter			simple_unlock(rm->rm_slock);
15840711Swollman			return EBUSY;
15945720Speter		}
16040711Swollman	}
16140711Swollman
16240711Swollman	/*
16340711Swollman	 * There really should only be one of these if we are in this
16440711Swollman	 * state and the code is working properly, but it can't hurt.
16540711Swollman	 */
16668727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
16768727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
16868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
16940711Swollman		free(r, M_RMAN);
17040711Swollman	}
17140711Swollman	simple_unlock(rm->rm_slock);
17240711Swollman	simple_lock(&rman_lock);
17340711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
17440711Swollman	simple_unlock(&rman_lock);
17540711Swollman	free(rm->rm_slock, M_RMAN);
17640711Swollman
17740711Swollman	return 0;
17840711Swollman}
17940711Swollman
18040711Swollmanstruct resource *
18140711Swollmanrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
18240711Swollman		      u_int flags, struct device *dev)
18340711Swollman{
18440711Swollman	u_int	want_activate;
18540711Swollman	struct	resource *r, *s, *rv;
18640711Swollman	u_long	rstart, rend;
18740711Swollman
18840711Swollman	rv = 0;
18940711Swollman
19059910Spaul	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
19140711Swollman	       "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end,
19259910Spaul	       count, flags, device_get_name(dev), device_get_unit(dev)));
19340711Swollman	want_activate = (flags & RF_ACTIVE);
19440711Swollman	flags &= ~RF_ACTIVE;
19540711Swollman
19640711Swollman	simple_lock(rm->rm_slock);
19740711Swollman
19868727Smckusick	for (r = TAILQ_FIRST(&rm->rm_list);
19968727Smckusick	     r && r->r_end < start;
20068727Smckusick	     r = TAILQ_NEXT(r, r_link))
20140711Swollman		;
20240711Swollman
20368727Smckusick	if (r == NULL) {
20459910Spaul		DPRINTF(("could not find a region\n"));
20540711Swollman		goto out;
20640711Swollman	}
20740711Swollman
20840711Swollman	/*
20940711Swollman	 * First try to find an acceptable totally-unshared region.
21040711Swollman	 */
21168727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
21259910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
21340711Swollman		if (s->r_start > end) {
21459910Spaul			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
21540711Swollman			break;
21640711Swollman		}
21740711Swollman		if (s->r_flags & RF_ALLOCATED) {
21859910Spaul			DPRINTF(("region is allocated\n"));
21940711Swollman			continue;
22040711Swollman		}
22140711Swollman		rstart = max(s->r_start, start);
22267261Simp		rstart = (rstart + ((1ul << RF_ALIGNMENT(flags))) - 1) &
22367261Simp		    ~((1ul << RF_ALIGNMENT(flags)) - 1);
22467261Simp		rend = min(s->r_end, max(rstart + count, end));
22559910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
22659910Spaul		       rstart, rend, (rend - rstart + 1), count));
22740711Swollman
22840711Swollman		if ((rend - rstart + 1) >= count) {
22959910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
23059910Spaul			       rend, rstart, (rend - rstart + 1)));
23140711Swollman			if ((s->r_end - s->r_start + 1) == count) {
23259910Spaul				DPRINTF(("candidate region is entire chunk\n"));
23340711Swollman				rv = s;
23448235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
23540711Swollman				rv->r_dev = dev;
23640711Swollman				goto out;
23740711Swollman			}
23840711Swollman
23940711Swollman			/*
24040711Swollman			 * If s->r_start < rstart and
24140711Swollman			 *    s->r_end > rstart + count - 1, then
24240711Swollman			 * we need to split the region into three pieces
24340711Swollman			 * (the middle one will get returned to the user).
24440711Swollman			 * Otherwise, we are allocating at either the
24540711Swollman			 * beginning or the end of s, so we only need to
24640711Swollman			 * split it in two.  The first case requires
24740711Swollman			 * two new allocations; the second requires but one.
24840711Swollman			 */
24969781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
25040711Swollman			if (rv == 0)
25140711Swollman				goto out;
25240711Swollman			rv->r_start = rstart;
25340711Swollman			rv->r_end = rstart + count - 1;
25440711Swollman			rv->r_flags = flags | RF_ALLOCATED;
25540711Swollman			rv->r_dev = dev;
25640711Swollman			rv->r_sharehead = 0;
25745720Speter			rv->r_rm = rm;
25840711Swollman
25940711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
26059910Spaul				DPRINTF(("splitting region in three parts: "
26140711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
26240711Swollman				       s->r_start, rv->r_start - 1,
26340711Swollman				       rv->r_start, rv->r_end,
26459910Spaul				       rv->r_end + 1, s->r_end));
26540711Swollman				/*
26640711Swollman				 * We are allocating in the middle.
26740711Swollman				 */
26869781Sdwmalone				r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
26940711Swollman				if (r == 0) {
27040711Swollman					free(rv, M_RMAN);
27140711Swollman					rv = 0;
27240711Swollman					goto out;
27340711Swollman				}
27440711Swollman				r->r_start = rv->r_end + 1;
27540711Swollman				r->r_end = s->r_end;
27640711Swollman				r->r_flags = s->r_flags;
27740711Swollman				r->r_dev = 0;
27840711Swollman				r->r_sharehead = 0;
27945720Speter				r->r_rm = rm;
28040711Swollman				s->r_end = rv->r_start - 1;
28168727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
28240711Swollman						     r_link);
28368727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
28440711Swollman						     r_link);
28540711Swollman			} else if (s->r_start == rv->r_start) {
28659910Spaul				DPRINTF(("allocating from the beginning\n"));
28740711Swollman				/*
28840711Swollman				 * We are allocating at the beginning.
28940711Swollman				 */
29040711Swollman				s->r_start = rv->r_end + 1;
29168727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
29240711Swollman			} else {
29359910Spaul				DPRINTF(("allocating at the end\n"));
29440711Swollman				/*
29540711Swollman				 * We are allocating at the end.
29640711Swollman				 */
29740711Swollman				s->r_end = rv->r_start - 1;
29868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
29940711Swollman						     r_link);
30040711Swollman			}
30140711Swollman			goto out;
30240711Swollman		}
30340711Swollman	}
30440711Swollman
30540711Swollman	/*
30640711Swollman	 * Now find an acceptable shared region, if the client's requirements
30740711Swollman	 * allow sharing.  By our implementation restriction, a candidate
30840711Swollman	 * region must match exactly by both size and sharing type in order
30940711Swollman	 * to be considered compatible with the client's request.  (The
31040711Swollman	 * former restriction could probably be lifted without too much
31140711Swollman	 * additional work, but this does not seem warranted.)
31240711Swollman	 */
31359910Spaul	DPRINTF(("no unshared regions found\n"));
31440711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
31540711Swollman		goto out;
31640711Swollman
31768727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
31840711Swollman		if (s->r_start > end)
31940711Swollman			break;
32040711Swollman		if ((s->r_flags & flags) != flags)
32140711Swollman			continue;
32240711Swollman		rstart = max(s->r_start, start);
32340711Swollman		rend = min(s->r_end, max(start + count, end));
32440711Swollman		if (s->r_start >= start && s->r_end <= end
32540711Swollman		    && (s->r_end - s->r_start + 1) == count) {
32669781Sdwmalone			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
32740711Swollman			if (rv == 0)
32840711Swollman				goto out;
32940711Swollman			rv->r_start = s->r_start;
33040711Swollman			rv->r_end = s->r_end;
33140711Swollman			rv->r_flags = s->r_flags &
33240711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
33340711Swollman			rv->r_dev = dev;
33440711Swollman			rv->r_rm = rm;
33540711Swollman			if (s->r_sharehead == 0) {
33640711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
33769781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
33840711Swollman				if (s->r_sharehead == 0) {
33940711Swollman					free(rv, M_RMAN);
34040711Swollman					rv = 0;
34140711Swollman					goto out;
34240711Swollman				}
34340711Swollman				LIST_INIT(s->r_sharehead);
34440711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
34540711Swollman						 r_sharelink);
34645106Sdfr				s->r_flags |= RF_FIRSTSHARE;
34740711Swollman			}
34840711Swollman			rv->r_sharehead = s->r_sharehead;
34940711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
35040711Swollman			goto out;
35140711Swollman		}
35240711Swollman	}
35340711Swollman
35440711Swollman	/*
35540711Swollman	 * We couldn't find anything.
35640711Swollman	 */
35740711Swollmanout:
35840711Swollman	/*
35940711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
36040711Swollman	 * which is reflected in `want_activate', we attempt to atomically
36140711Swollman	 * activate the resource.  If this fails, we release the resource
36240711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
36340711Swollman	 * make sense for RF_TIMESHARE-type resources.)
36440711Swollman	 */
36540711Swollman	if (rv && want_activate) {
36640711Swollman		struct resource *whohas;
36740711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
36840711Swollman			int_rman_release_resource(rm, rv);
36940711Swollman			rv = 0;
37040711Swollman		}
37140711Swollman	}
37240711Swollman
37340711Swollman	simple_unlock(rm->rm_slock);
37440711Swollman	return (rv);
37540711Swollman}
37640711Swollman
37740711Swollmanstatic int
37840711Swollmanint_rman_activate_resource(struct rman *rm, struct resource *r,
37940711Swollman			   struct resource **whohas)
38040711Swollman{
38140711Swollman	struct resource *s;
38240711Swollman	int ok;
38340711Swollman
38440711Swollman	/*
38540711Swollman	 * If we are not timesharing, then there is nothing much to do.
38640711Swollman	 * If we already have the resource, then there is nothing at all to do.
38740711Swollman	 * If we are not on a sharing list with anybody else, then there is
38840711Swollman	 * little to do.
38940711Swollman	 */
39040711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
39140711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
39240711Swollman	    || r->r_sharehead == 0) {
39340711Swollman		r->r_flags |= RF_ACTIVE;
39440711Swollman		return 0;
39540711Swollman	}
39640711Swollman
39740711Swollman	ok = 1;
39853225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
39953225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
40040711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
40140711Swollman			ok = 0;
40240711Swollman			*whohas = s;
40340711Swollman		}
40440711Swollman	}
40540711Swollman	if (ok) {
40640711Swollman		r->r_flags |= RF_ACTIVE;
40740711Swollman		return 0;
40840711Swollman	}
40940711Swollman	return EBUSY;
41040711Swollman}
41140711Swollman
41240711Swollmanint
41340711Swollmanrman_activate_resource(struct resource *r)
41440711Swollman{
41540711Swollman	int rv;
41640711Swollman	struct resource *whohas;
41740711Swollman	struct rman *rm;
41840711Swollman
41940711Swollman	rm = r->r_rm;
42040711Swollman	simple_lock(rm->rm_slock);
42140711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
42240711Swollman	simple_unlock(rm->rm_slock);
42340711Swollman	return rv;
42440711Swollman}
42540711Swollman
42640711Swollmanint
42740711Swollmanrman_await_resource(struct resource *r, int pri, int timo)
42840711Swollman{
42940711Swollman	int	rv, s;
43040711Swollman	struct	resource *whohas;
43140711Swollman	struct	rman *rm;
43240711Swollman
43340711Swollman	rm = r->r_rm;
43440711Swollman	for (;;) {
43540711Swollman		simple_lock(rm->rm_slock);
43640711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
43740711Swollman		if (rv != EBUSY)
43845720Speter			return (rv);	/* returns with simplelock */
43940711Swollman
44040711Swollman		if (r->r_sharehead == 0)
44140711Swollman			panic("rman_await_resource");
44240711Swollman		/*
44340711Swollman		 * splhigh hopefully will prevent a race between
44440711Swollman		 * simple_unlock and tsleep where a process
44540711Swollman		 * could conceivably get in and release the resource
44640711Swollman		 * before we have a chance to sleep on it.
44740711Swollman		 */
44840711Swollman		s = splhigh();
44940711Swollman		whohas->r_flags |= RF_WANTED;
45040711Swollman		simple_unlock(rm->rm_slock);
45140711Swollman		rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
45240711Swollman		if (rv) {
45340711Swollman			splx(s);
45440711Swollman			return rv;
45540711Swollman		}
45640711Swollman		simple_lock(rm->rm_slock);
45740711Swollman		splx(s);
45840711Swollman	}
45940711Swollman}
46040711Swollman
46145720Speterstatic int
46245720Speterint_rman_deactivate_resource(struct resource *r)
46340711Swollman{
46440711Swollman	struct	rman *rm;
46540711Swollman
46640711Swollman	rm = r->r_rm;
46740711Swollman	r->r_flags &= ~RF_ACTIVE;
46840711Swollman	if (r->r_flags & RF_WANTED) {
46940711Swollman		r->r_flags &= ~RF_WANTED;
47040711Swollman		wakeup(r->r_sharehead);
47140711Swollman	}
47245720Speter	return 0;
47345720Speter}
47445720Speter
47545720Speterint
47645720Speterrman_deactivate_resource(struct resource *r)
47745720Speter{
47845720Speter	struct	rman *rm;
47945720Speter
48045720Speter	rm = r->r_rm;
48145720Speter	simple_lock(rm->rm_slock);
48245720Speter	int_rman_deactivate_resource(r);
48340711Swollman	simple_unlock(rm->rm_slock);
48440711Swollman	return 0;
48540711Swollman}
48640711Swollman
48740711Swollmanstatic int
48840711Swollmanint_rman_release_resource(struct rman *rm, struct resource *r)
48940711Swollman{
49040711Swollman	struct	resource *s, *t;
49140711Swollman
49240711Swollman	if (r->r_flags & RF_ACTIVE)
49345720Speter		int_rman_deactivate_resource(r);
49440711Swollman
49540711Swollman	/*
49640711Swollman	 * Check for a sharing list first.  If there is one, then we don't
49740711Swollman	 * have to think as hard.
49840711Swollman	 */
49940711Swollman	if (r->r_sharehead) {
50040711Swollman		/*
50140711Swollman		 * If a sharing list exists, then we know there are at
50240711Swollman		 * least two sharers.
50340711Swollman		 *
50440711Swollman		 * If we are in the main circleq, appoint someone else.
50540711Swollman		 */
50640711Swollman		LIST_REMOVE(r, r_sharelink);
50753225Sphk		s = LIST_FIRST(r->r_sharehead);
50840711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
50940711Swollman			s->r_flags |= RF_FIRSTSHARE;
51068727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
51168727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
51240711Swollman		}
51340711Swollman
51440711Swollman		/*
51540711Swollman		 * Make sure that the sharing list goes away completely
51640711Swollman		 * if the resource is no longer being shared at all.
51740711Swollman		 */
51853225Sphk		if (LIST_NEXT(s, r_sharelink) == 0) {
51940711Swollman			free(s->r_sharehead, M_RMAN);
52040711Swollman			s->r_sharehead = 0;
52140711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
52240711Swollman		}
52340711Swollman		goto out;
52440711Swollman	}
52540711Swollman
52640711Swollman	/*
52740711Swollman	 * Look at the adjacent resources in the list and see if our
52840711Swollman	 * segment can be merged with any of them.
52940711Swollman	 */
53068727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
53168727Smckusick	t = TAILQ_NEXT(r, r_link);
53240711Swollman
53368764Smckusick	if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
53468764Smckusick	    && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
53540711Swollman		/*
53640711Swollman		 * Merge all three segments.
53740711Swollman		 */
53840711Swollman		s->r_end = t->r_end;
53968727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
54068727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
54140711Swollman		free(t, M_RMAN);
54268764Smckusick	} else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
54340711Swollman		/*
54440711Swollman		 * Merge previous segment with ours.
54540711Swollman		 */
54640711Swollman		s->r_end = r->r_end;
54768727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
54868764Smckusick	} else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
54940711Swollman		/*
55040711Swollman		 * Merge next segment with ours.
55140711Swollman		 */
55240711Swollman		t->r_start = r->r_start;
55368727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
55440711Swollman	} else {
55540711Swollman		/*
55640711Swollman		 * At this point, we know there is nothing we
55740711Swollman		 * can potentially merge with, because on each
55840711Swollman		 * side, there is either nothing there or what is
55940711Swollman		 * there is still allocated.  In that case, we don't
56040711Swollman		 * want to remove r from the list; we simply want to
56140711Swollman		 * change it to an unallocated region and return
56240711Swollman		 * without freeing anything.
56340711Swollman		 */
56440711Swollman		r->r_flags &= ~RF_ALLOCATED;
56540711Swollman		return 0;
56640711Swollman	}
56740711Swollman
56840711Swollmanout:
56940711Swollman	free(r, M_RMAN);
57040711Swollman	return 0;
57140711Swollman}
57240711Swollman
57340711Swollmanint
57440711Swollmanrman_release_resource(struct resource *r)
57540711Swollman{
57640711Swollman	int	rv;
57740711Swollman	struct	rman *rm = r->r_rm;
57840711Swollman
57940711Swollman	simple_lock(rm->rm_slock);
58040711Swollman	rv = int_rman_release_resource(rm, r);
58140711Swollman	simple_unlock(rm->rm_slock);
58240711Swollman	return (rv);
58340711Swollman}
58467261Simp
58567261Simpuint32_t
58667261Simprman_make_alignment_flags(uint32_t size)
58767261Simp{
58867261Simp	int	i;
58967261Simp
59067425Simp	/*
59167425Simp	 * Find the hightest bit set, and add one if more than one bit
59267425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
59367425Simp	 */
59467425Simp	for (i = 32; i > 0; i--)
59567425Simp		if ((1 << i) & size)
59667425Simp			break;
59767425Simp	if (~(1 << i) & size)
59867425Simp		i++;
59967261Simp
60067261Simp	return(RF_ALIGNMENT_LOG2(i));
60167425Simp}
602