subr_rman.c revision 45106
140711Swollman/*
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman *
2945106Sdfr *	$Id: subr_rman.c,v 1.4 1999/01/02 11:34:55 bde Exp $
3040711Swollman */
3140711Swollman
3240711Swollman/*
3340711Swollman * The kernel resource manager.  This code is responsible for keeping track
3440711Swollman * of hardware resources which are apportioned out to various drivers.
3540711Swollman * It does not actually assign those resources, and it is not expected
3640711Swollman * that end-device drivers will call into this code directly.  Rather,
3740711Swollman * the code which implements the buses that those devices are attached to,
3840711Swollman * and the code which manages CPU resources, will call this code, and the
3940711Swollman * end-device drivers will make upcalls to that code to actually perform
4040711Swollman * the allocation.
4140711Swollman *
4240711Swollman * There are two sorts of resources managed by this code.  The first is
4340711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4440711Swollman * consist of a sequence of individually-allocatable objects which have
4540711Swollman * been numbered in some well-defined order.  Most of the resources
4640711Swollman * are of this type, as it is the most familiar.  The second type is
4740711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4840711Swollman * resources in which each instance is indistinguishable from every
4940711Swollman * other instance).  The principal anticipated application of gauges
5040711Swollman * is in the context of power consumption, where a bus may have a specific
5140711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5240711Swollman * implemented yet.
5340711Swollman *
5440711Swollman * For array resources, we make one simplifying assumption: two clients
5540711Swollman * sharing the same resource must use the same range of indices.  That
5640711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5740711Swollman * permitted.
5840711Swollman */
5940711Swollman
6040711Swollman#include <sys/param.h>
6140711Swollman#include <sys/systm.h>
6241304Sbde#include <sys/kernel.h>
6340711Swollman#include <sys/lock.h>
6440711Swollman#include <sys/malloc.h>
6540711Swollman#include <sys/rman.h>
6640711Swollman#include <sys/bus.h>		/* XXX debugging */
6740711Swollman
6840711SwollmanMALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
6940711Swollman
7040711Swollmanstruct	rman_head rman_head;
7142248Sbde#ifndef NULL_SIMPLELOCKS
7240711Swollmanstatic	struct simplelock rman_lock; /* mutex to protect rman_head */
7342248Sbde#endif
7440711Swollmanstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
7540711Swollman				       struct resource **whohas);
7640711Swollmanstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
7740711Swollman
7840711Swollman#define	CIRCLEQ_TERMCOND(var, head)	(var == (void *)&(head))
7940711Swollman
8040711Swollmanint
8140711Swollmanrman_init(struct rman *rm)
8240711Swollman{
8340711Swollman	static int once;
8440711Swollman
8540711Swollman	if (once == 0) {
8640711Swollman		once = 1;
8740711Swollman		TAILQ_INIT(&rman_head);
8840711Swollman		simple_lock_init(&rman_lock);
8940711Swollman	}
9040711Swollman
9140711Swollman	if (rm->rm_type == RMAN_UNINIT)
9240711Swollman		panic("rman_init");
9340711Swollman	if (rm->rm_type == RMAN_GAUGE)
9440711Swollman		panic("implement RMAN_GAUGE");
9540711Swollman
9640711Swollman	CIRCLEQ_INIT(&rm->rm_list);
9740711Swollman	rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
9840711Swollman	if (rm->rm_slock == 0)
9940711Swollman		return ENOMEM;
10040711Swollman	simple_lock_init(rm->rm_slock);
10140711Swollman
10240711Swollman	simple_lock(&rman_lock);
10340711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
10440711Swollman	simple_unlock(&rman_lock);
10540711Swollman	return 0;
10640711Swollman}
10740711Swollman
10840711Swollman/*
10940711Swollman * NB: this interface is not robust against programming errors which
11040711Swollman * add multiple copies of the same region.
11140711Swollman */
11240711Swollmanint
11340711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
11440711Swollman{
11540711Swollman	struct resource *r, *s;
11640711Swollman
11740711Swollman	r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
11840711Swollman	if (r == 0)
11940711Swollman		return ENOMEM;
12040711Swollman	r->r_sharehead = 0;
12140711Swollman	r->r_start = start;
12240711Swollman	r->r_end = end;
12340711Swollman	r->r_flags = 0;
12440711Swollman	r->r_dev = 0;
12540711Swollman	r->r_rm = rm;
12640711Swollman
12740711Swollman	simple_lock(rm->rm_slock);
12840711Swollman	for (s = rm->rm_list.cqh_first;
12940711Swollman	     !CIRCLEQ_TERMCOND(s, rm->rm_list) && s->r_end < r->r_start;
13040711Swollman	     s = s->r_link.cqe_next)
13140711Swollman		;
13240711Swollman
13340711Swollman	if (CIRCLEQ_TERMCOND(s, rm->rm_list)) {
13440711Swollman		CIRCLEQ_INSERT_TAIL(&rm->rm_list, r, r_link);
13540711Swollman	} else {
13640711Swollman		CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, r, r_link);
13740711Swollman	}
13840711Swollman
13940711Swollman	simple_unlock(rm->rm_slock);
14040711Swollman	return 0;
14140711Swollman}
14240711Swollman
14340711Swollmanint
14440711Swollmanrman_fini(struct rman *rm)
14540711Swollman{
14640711Swollman	struct resource *r;
14740711Swollman
14840711Swollman	simple_lock(rm->rm_slock);
14940711Swollman	for (r = rm->rm_list.cqh_first;	!CIRCLEQ_TERMCOND(r, rm->rm_list);
15040711Swollman	     r = r->r_link.cqe_next) {
15140711Swollman		if (r->r_flags & RF_ALLOCATED)
15240711Swollman			return EBUSY;
15340711Swollman	}
15440711Swollman
15540711Swollman	/*
15640711Swollman	 * There really should only be one of these if we are in this
15740711Swollman	 * state and the code is working properly, but it can't hurt.
15840711Swollman	 */
15940711Swollman	for (r = rm->rm_list.cqh_first;	!CIRCLEQ_TERMCOND(r, rm->rm_list);
16040711Swollman	     r = rm->rm_list.cqh_first) {
16140711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
16240711Swollman		free(r, M_RMAN);
16340711Swollman	}
16440711Swollman	simple_unlock(rm->rm_slock);
16540711Swollman	simple_lock(&rman_lock);
16640711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
16740711Swollman	simple_unlock(&rman_lock);
16840711Swollman	free(rm->rm_slock, M_RMAN);
16940711Swollman
17040711Swollman	return 0;
17140711Swollman}
17240711Swollman
17340711Swollmanstruct resource *
17440711Swollmanrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
17540711Swollman		      u_int flags, struct device *dev)
17640711Swollman{
17740711Swollman	u_int	want_activate;
17840711Swollman	struct	resource *r, *s, *rv;
17940711Swollman	u_long	rstart, rend;
18040711Swollman
18140711Swollman	rv = 0;
18240711Swollman
18340711Swollman#ifdef RMAN_DEBUG
18440711Swollman	printf("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
18540711Swollman	       "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end,
18640711Swollman	       count, flags, device_get_name(dev), device_get_unit(dev));
18740711Swollman#endif /* RMAN_DEBUG */
18840711Swollman	want_activate = (flags & RF_ACTIVE);
18940711Swollman	flags &= ~RF_ACTIVE;
19040711Swollman
19140711Swollman	simple_lock(rm->rm_slock);
19240711Swollman
19340711Swollman	for (r = rm->rm_list.cqh_first;
19440711Swollman	     !CIRCLEQ_TERMCOND(r, rm->rm_list) && r->r_end < start;
19540711Swollman	     r = r->r_link.cqe_next)
19640711Swollman		;
19740711Swollman
19840711Swollman	if (CIRCLEQ_TERMCOND(r, rm->rm_list)) {
19940711Swollman#ifdef RMAN_DEBUG
20040711Swollman		printf("could not find a region\n");
20140711Swollman#endif RMAN_DEBUG
20240711Swollman		goto out;
20340711Swollman	}
20440711Swollman
20540711Swollman	/*
20640711Swollman	 * First try to find an acceptable totally-unshared region.
20740711Swollman	 */
20840711Swollman	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
20940711Swollman	     s = s->r_link.cqe_next) {
21040711Swollman#ifdef RMAN_DEBUG
21140711Swollman		printf("considering [%#lx, %#lx]\n", s->r_start, s->r_end);
21240711Swollman#endif /* RMAN_DEBUG */
21340711Swollman		if (s->r_start > end) {
21440711Swollman#ifdef RMAN_DEBUG
21540711Swollman			printf("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end);
21640711Swollman#endif /* RMAN_DEBUG */
21740711Swollman			break;
21840711Swollman		}
21940711Swollman		if (s->r_flags & RF_ALLOCATED) {
22040711Swollman#ifdef RMAN_DEBUG
22140711Swollman			printf("region is allocated\n");
22240711Swollman#endif /* RMAN_DEBUG */
22340711Swollman			continue;
22440711Swollman		}
22540711Swollman		rstart = max(s->r_start, start);
22640711Swollman		rend = min(s->r_end, max(start + count, end));
22740711Swollman#ifdef RMAN_DEBUG
22840711Swollman		printf("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
22940711Swollman		       rstart, rend, (rend - rstart + 1), count);
23040711Swollman#endif /* RMAN_DEBUG */
23140711Swollman
23240711Swollman		if ((rend - rstart + 1) >= count) {
23340711Swollman#ifdef RMAN_DEBUG
23440711Swollman			printf("candidate region: [%#lx, %#lx], size %#lx\n",
23540711Swollman			       rend, rstart, (rend - rstart + 1));
23640711Swollman#endif /* RMAN_DEBUG */
23740711Swollman			if ((s->r_end - s->r_start + 1) == count) {
23840711Swollman#ifdef RMAN_DEBUG
23940711Swollman				printf("candidate region is entire chunk\n");
24040711Swollman#endif /* RMAN_DEBUG */
24140711Swollman				rv = s;
24240711Swollman				rv->r_flags |= RF_ALLOCATED;
24340711Swollman				rv->r_dev = dev;
24440711Swollman				goto out;
24540711Swollman			}
24640711Swollman
24740711Swollman			/*
24840711Swollman			 * If s->r_start < rstart and
24940711Swollman			 *    s->r_end > rstart + count - 1, then
25040711Swollman			 * we need to split the region into three pieces
25140711Swollman			 * (the middle one will get returned to the user).
25240711Swollman			 * Otherwise, we are allocating at either the
25340711Swollman			 * beginning or the end of s, so we only need to
25440711Swollman			 * split it in two.  The first case requires
25540711Swollman			 * two new allocations; the second requires but one.
25640711Swollman			 */
25740711Swollman			rv = malloc(sizeof *r, M_RMAN, M_NOWAIT);
25840711Swollman			if (rv == 0)
25940711Swollman				goto out;
26040711Swollman			rv->r_start = rstart;
26140711Swollman			rv->r_end = rstart + count - 1;
26240711Swollman			rv->r_flags = flags | RF_ALLOCATED;
26340711Swollman			rv->r_dev = dev;
26440711Swollman			rv->r_sharehead = 0;
26540711Swollman
26640711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
26740711Swollman#ifdef RMAN_DEBUG
26840711Swollman				printf("splitting region in three parts: "
26940711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
27040711Swollman				       s->r_start, rv->r_start - 1,
27140711Swollman				       rv->r_start, rv->r_end,
27240711Swollman				       rv->r_end + 1, s->r_end);
27340711Swollman#endif /* RMAN_DEBUG */
27440711Swollman				/*
27540711Swollman				 * We are allocating in the middle.
27640711Swollman				 */
27740711Swollman				r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
27840711Swollman				if (r == 0) {
27940711Swollman					free(rv, M_RMAN);
28040711Swollman					rv = 0;
28140711Swollman					goto out;
28240711Swollman				}
28340711Swollman				r->r_start = rv->r_end + 1;
28440711Swollman				r->r_end = s->r_end;
28540711Swollman				r->r_flags = s->r_flags;
28640711Swollman				r->r_dev = 0;
28740711Swollman				r->r_sharehead = 0;
28840711Swollman				s->r_end = rv->r_start - 1;
28940711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
29040711Swollman						     r_link);
29140711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, rv, r,
29240711Swollman						     r_link);
29340711Swollman			} else if (s->r_start == rv->r_start) {
29440711Swollman#ifdef RMAN_DEBUG
29540711Swollman				printf("allocating from the beginning\n");
29640711Swollman#endif /* RMAN_DEBUG */
29740711Swollman				/*
29840711Swollman				 * We are allocating at the beginning.
29940711Swollman				 */
30040711Swollman				s->r_start = rv->r_end + 1;
30140711Swollman				CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, rv,
30240711Swollman						      r_link);
30340711Swollman			} else {
30440711Swollman#ifdef RMAN_DEBUG
30540711Swollman				printf("allocating at the end\n");
30640711Swollman#endif /* RMAN_DEBUG */
30740711Swollman				/*
30840711Swollman				 * We are allocating at the end.
30940711Swollman				 */
31040711Swollman				s->r_end = rv->r_start - 1;
31140711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
31240711Swollman						     r_link);
31340711Swollman			}
31440711Swollman			goto out;
31540711Swollman		}
31640711Swollman	}
31740711Swollman
31840711Swollman	/*
31940711Swollman	 * Now find an acceptable shared region, if the client's requirements
32040711Swollman	 * allow sharing.  By our implementation restriction, a candidate
32140711Swollman	 * region must match exactly by both size and sharing type in order
32240711Swollman	 * to be considered compatible with the client's request.  (The
32340711Swollman	 * former restriction could probably be lifted without too much
32440711Swollman	 * additional work, but this does not seem warranted.)
32540711Swollman	 */
32640711Swollman#ifdef RMAN_DEBUG
32740711Swollman	printf("no unshared regions found\n");
32840711Swollman#endif /* RMAN_DEBUG */
32940711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
33040711Swollman		goto out;
33140711Swollman
33240711Swollman	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
33340711Swollman	     s = s->r_link.cqe_next) {
33440711Swollman		if (s->r_start > end)
33540711Swollman			break;
33640711Swollman		if ((s->r_flags & flags) != flags)
33740711Swollman			continue;
33840711Swollman		rstart = max(s->r_start, start);
33940711Swollman		rend = min(s->r_end, max(start + count, end));
34040711Swollman		if (s->r_start >= start && s->r_end <= end
34140711Swollman		    && (s->r_end - s->r_start + 1) == count) {
34240711Swollman			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT);
34340711Swollman			if (rv == 0)
34440711Swollman				goto out;
34540711Swollman			rv->r_start = s->r_start;
34640711Swollman			rv->r_end = s->r_end;
34740711Swollman			rv->r_flags = s->r_flags &
34840711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
34940711Swollman			rv->r_dev = dev;
35040711Swollman			rv->r_rm = rm;
35140711Swollman			if (s->r_sharehead == 0) {
35240711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
35340711Swollman							M_RMAN, M_NOWAIT);
35440711Swollman				if (s->r_sharehead == 0) {
35540711Swollman					free(rv, M_RMAN);
35640711Swollman					rv = 0;
35740711Swollman					goto out;
35840711Swollman				}
35940711Swollman				LIST_INIT(s->r_sharehead);
36040711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
36140711Swollman						 r_sharelink);
36245106Sdfr				s->r_flags |= RF_FIRSTSHARE;
36340711Swollman			}
36440711Swollman			rv->r_sharehead = s->r_sharehead;
36540711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
36640711Swollman			goto out;
36740711Swollman		}
36840711Swollman	}
36940711Swollman
37040711Swollman	/*
37140711Swollman	 * We couldn't find anything.
37240711Swollman	 */
37340711Swollmanout:
37440711Swollman	/*
37540711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
37640711Swollman	 * which is reflected in `want_activate', we attempt to atomically
37740711Swollman	 * activate the resource.  If this fails, we release the resource
37840711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
37940711Swollman	 * make sense for RF_TIMESHARE-type resources.)
38040711Swollman	 */
38140711Swollman	if (rv && want_activate) {
38240711Swollman		struct resource *whohas;
38340711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
38440711Swollman			int_rman_release_resource(rm, rv);
38540711Swollman			rv = 0;
38640711Swollman		}
38740711Swollman	}
38840711Swollman
38940711Swollman	simple_unlock(rm->rm_slock);
39040711Swollman	return (rv);
39140711Swollman}
39240711Swollman
39340711Swollmanstatic int
39440711Swollmanint_rman_activate_resource(struct rman *rm, struct resource *r,
39540711Swollman			   struct resource **whohas)
39640711Swollman{
39740711Swollman	struct resource *s;
39840711Swollman	int ok;
39940711Swollman
40040711Swollman	/*
40140711Swollman	 * If we are not timesharing, then there is nothing much to do.
40240711Swollman	 * If we already have the resource, then there is nothing at all to do.
40340711Swollman	 * If we are not on a sharing list with anybody else, then there is
40440711Swollman	 * little to do.
40540711Swollman	 */
40640711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
40740711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
40840711Swollman	    || r->r_sharehead == 0) {
40940711Swollman		r->r_flags |= RF_ACTIVE;
41040711Swollman		return 0;
41140711Swollman	}
41240711Swollman
41340711Swollman	ok = 1;
41440711Swollman	for (s = r->r_sharehead->lh_first; s && ok;
41540711Swollman	     s = s->r_sharelink.le_next) {
41640711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
41740711Swollman			ok = 0;
41840711Swollman			*whohas = s;
41940711Swollman		}
42040711Swollman	}
42140711Swollman	if (ok) {
42240711Swollman		r->r_flags |= RF_ACTIVE;
42340711Swollman		return 0;
42440711Swollman	}
42540711Swollman	return EBUSY;
42640711Swollman}
42740711Swollman
42840711Swollmanint
42940711Swollmanrman_activate_resource(struct resource *r)
43040711Swollman{
43140711Swollman	int rv;
43240711Swollman	struct resource *whohas;
43340711Swollman	struct rman *rm;
43440711Swollman
43540711Swollman	rm = r->r_rm;
43640711Swollman	simple_lock(rm->rm_slock);
43740711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
43840711Swollman	simple_unlock(rm->rm_slock);
43940711Swollman	return rv;
44040711Swollman}
44140711Swollman
44240711Swollmanint
44340711Swollmanrman_await_resource(struct resource *r, int pri, int timo)
44440711Swollman{
44540711Swollman	int	rv, s;
44640711Swollman	struct	resource *whohas;
44740711Swollman	struct	rman *rm;
44840711Swollman
44940711Swollman	rm = r->r_rm;
45040711Swollman	for (;;) {
45140711Swollman		simple_lock(rm->rm_slock);
45240711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
45340711Swollman		if (rv != EBUSY)
45440711Swollman			return (rv);
45540711Swollman
45640711Swollman		if (r->r_sharehead == 0)
45740711Swollman			panic("rman_await_resource");
45840711Swollman		/*
45940711Swollman		 * splhigh hopefully will prevent a race between
46040711Swollman		 * simple_unlock and tsleep where a process
46140711Swollman		 * could conceivably get in and release the resource
46240711Swollman		 * before we have a chance to sleep on it.
46340711Swollman		 */
46440711Swollman		s = splhigh();
46540711Swollman		whohas->r_flags |= RF_WANTED;
46640711Swollman		simple_unlock(rm->rm_slock);
46740711Swollman		rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
46840711Swollman		if (rv) {
46940711Swollman			splx(s);
47040711Swollman			return rv;
47140711Swollman		}
47240711Swollman		simple_lock(rm->rm_slock);
47340711Swollman		splx(s);
47440711Swollman	}
47540711Swollman}
47640711Swollman
47740711Swollmanint
47840711Swollmanrman_deactivate_resource(struct resource *r)
47940711Swollman{
48040711Swollman	struct	rman *rm;
48140711Swollman
48240711Swollman	rm = r->r_rm;
48340711Swollman	simple_lock(rm->rm_slock);
48440711Swollman	r->r_flags &= ~RF_ACTIVE;
48540711Swollman	if (r->r_flags & RF_WANTED) {
48640711Swollman		r->r_flags &= ~RF_WANTED;
48740711Swollman		wakeup(r->r_sharehead);
48840711Swollman	}
48940711Swollman	simple_unlock(rm->rm_slock);
49040711Swollman	return 0;
49140711Swollman}
49240711Swollman
49340711Swollmanstatic int
49440711Swollmanint_rman_release_resource(struct rman *rm, struct resource *r)
49540711Swollman{
49640711Swollman	struct	resource *s, *t;
49740711Swollman
49840711Swollman	if (r->r_flags & RF_ACTIVE)
49940711Swollman		return EBUSY;
50040711Swollman
50140711Swollman	/*
50240711Swollman	 * Check for a sharing list first.  If there is one, then we don't
50340711Swollman	 * have to think as hard.
50440711Swollman	 */
50540711Swollman	if (r->r_sharehead) {
50640711Swollman		/*
50740711Swollman		 * If a sharing list exists, then we know there are at
50840711Swollman		 * least two sharers.
50940711Swollman		 *
51040711Swollman		 * If we are in the main circleq, appoint someone else.
51140711Swollman		 */
51240711Swollman		LIST_REMOVE(r, r_sharelink);
51340711Swollman		s = r->r_sharehead->lh_first;
51440711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
51540711Swollman			s->r_flags |= RF_FIRSTSHARE;
51640711Swollman			CIRCLEQ_INSERT_BEFORE(&rm->rm_list, r, s, r_link);
51740711Swollman			CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
51840711Swollman		}
51940711Swollman
52040711Swollman		/*
52140711Swollman		 * Make sure that the sharing list goes away completely
52240711Swollman		 * if the resource is no longer being shared at all.
52340711Swollman		 */
52440711Swollman		if (s->r_sharelink.le_next == 0) {
52540711Swollman			free(s->r_sharehead, M_RMAN);
52640711Swollman			s->r_sharehead = 0;
52740711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
52840711Swollman		}
52940711Swollman		goto out;
53040711Swollman	}
53140711Swollman
53240711Swollman	/*
53340711Swollman	 * Look at the adjacent resources in the list and see if our
53440711Swollman	 * segment can be merged with any of them.
53540711Swollman	 */
53640711Swollman	s = r->r_link.cqe_prev;
53740711Swollman	t = r->r_link.cqe_next;
53840711Swollman
53940711Swollman	if (s != (void *)&rm->rm_list && (s->r_flags & RF_ALLOCATED) == 0
54040711Swollman	    && t != (void *)&rm->rm_list && (t->r_flags & RF_ALLOCATED) == 0) {
54140711Swollman		/*
54240711Swollman		 * Merge all three segments.
54340711Swollman		 */
54440711Swollman		s->r_end = t->r_end;
54540711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
54640711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, t, r_link);
54740711Swollman		free(t, M_RMAN);
54840711Swollman	} else if (s != (void *)&rm->rm_list
54940711Swollman		   && (s->r_flags & RF_ALLOCATED) == 0) {
55040711Swollman		/*
55140711Swollman		 * Merge previous segment with ours.
55240711Swollman		 */
55340711Swollman		s->r_end = r->r_end;
55440711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
55540711Swollman	} else if (t != (void *)&rm->rm_list
55640711Swollman		   && (t->r_flags & RF_ALLOCATED) == 0) {
55740711Swollman		/*
55840711Swollman		 * Merge next segment with ours.
55940711Swollman		 */
56040711Swollman		t->r_start = r->r_start;
56140711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
56240711Swollman	} else {
56340711Swollman		/*
56440711Swollman		 * At this point, we know there is nothing we
56540711Swollman		 * can potentially merge with, because on each
56640711Swollman		 * side, there is either nothing there or what is
56740711Swollman		 * there is still allocated.  In that case, we don't
56840711Swollman		 * want to remove r from the list; we simply want to
56940711Swollman		 * change it to an unallocated region and return
57040711Swollman		 * without freeing anything.
57140711Swollman		 */
57240711Swollman		r->r_flags &= ~RF_ALLOCATED;
57340711Swollman		return 0;
57440711Swollman	}
57540711Swollman
57640711Swollmanout:
57740711Swollman	free(r, M_RMAN);
57840711Swollman	return 0;
57940711Swollman}
58040711Swollman
58140711Swollmanint
58240711Swollmanrman_release_resource(struct resource *r)
58340711Swollman{
58440711Swollman	int	rv;
58540711Swollman	struct	rman *rm = r->r_rm;
58640711Swollman
58740711Swollman	simple_lock(rm->rm_slock);
58840711Swollman	rv = int_rman_release_resource(rm, r);
58940711Swollman	simple_unlock(rm->rm_slock);
59040711Swollman	return (rv);
59140711Swollman}
592