subr_rman.c revision 40711
140711Swollman/*
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
1540711Swollman *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman *
2940711Swollman *	$Id$
3040711Swollman */
3140711Swollman
3240711Swollman/*
3340711Swollman * The kernel resource manager.  This code is responsible for keeping track
3440711Swollman * of hardware resources which are apportioned out to various drivers.
3540711Swollman * It does not actually assign those resources, and it is not expected
3640711Swollman * that end-device drivers will call into this code directly.  Rather,
3740711Swollman * the code which implements the buses that those devices are attached to,
3840711Swollman * and the code which manages CPU resources, will call this code, and the
3940711Swollman * end-device drivers will make upcalls to that code to actually perform
4040711Swollman * the allocation.
4140711Swollman *
4240711Swollman * There are two sorts of resources managed by this code.  The first is
4340711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4440711Swollman * consist of a sequence of individually-allocatable objects which have
4540711Swollman * been numbered in some well-defined order.  Most of the resources
4640711Swollman * are of this type, as it is the most familiar.  The second type is
4740711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4840711Swollman * resources in which each instance is indistinguishable from every
4940711Swollman * other instance).  The principal anticipated application of gauges
5040711Swollman * is in the context of power consumption, where a bus may have a specific
5140711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5240711Swollman * implemented yet.
5340711Swollman *
5440711Swollman * For array resources, we make one simplifying assumption: two clients
5540711Swollman * sharing the same resource must use the same range of indices.  That
5640711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5740711Swollman * permitted.
5840711Swollman */
5940711Swollman
6040711Swollman#include <sys/param.h>
6140711Swollman#include <sys/systm.h>
6240711Swollman#include <sys/lock.h>
6340711Swollman#include <sys/malloc.h>
6440711Swollman#include <sys/rman.h>
6540711Swollman#include <sys/bus.h>		/* XXX debugging */
6640711Swollman
6740711SwollmanMALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
6840711Swollman
6940711Swollmanstruct	rman_head rman_head;
7040711Swollmanstatic	struct simplelock rman_lock; /* mutex to protect rman_head */
7140711Swollmanstatic	int int_rman_activate_resource(struct rman *rm, struct resource *r,
7240711Swollman				       struct resource **whohas);
7340711Swollmanstatic	int int_rman_release_resource(struct rman *rm, struct resource *r);
7440711Swollman
7540711Swollman#define	CIRCLEQ_TERMCOND(var, head)	(var == (void *)&(head))
7640711Swollman
7740711Swollmanint
7840711Swollmanrman_init(struct rman *rm)
7940711Swollman{
8040711Swollman	static int once;
8140711Swollman	struct resource *r;
8240711Swollman
8340711Swollman	if (once == 0) {
8440711Swollman		once = 1;
8540711Swollman		TAILQ_INIT(&rman_head);
8640711Swollman		simple_lock_init(&rman_lock);
8740711Swollman	}
8840711Swollman
8940711Swollman	if (rm->rm_type == RMAN_UNINIT)
9040711Swollman		panic("rman_init");
9140711Swollman	if (rm->rm_type == RMAN_GAUGE)
9240711Swollman		panic("implement RMAN_GAUGE");
9340711Swollman
9440711Swollman	CIRCLEQ_INIT(&rm->rm_list);
9540711Swollman	rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
9640711Swollman	if (rm->rm_slock == 0)
9740711Swollman		return ENOMEM;
9840711Swollman	simple_lock_init(rm->rm_slock);
9940711Swollman
10040711Swollman	simple_lock(&rman_lock);
10140711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
10240711Swollman	simple_unlock(&rman_lock);
10340711Swollman	return 0;
10440711Swollman}
10540711Swollman
10640711Swollman/*
10740711Swollman * NB: this interface is not robust against programming errors which
10840711Swollman * add multiple copies of the same region.
10940711Swollman */
11040711Swollmanint
11140711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
11240711Swollman{
11340711Swollman	struct resource *r, *s;
11440711Swollman
11540711Swollman	r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
11640711Swollman	if (r == 0)
11740711Swollman		return ENOMEM;
11840711Swollman	r->r_sharehead = 0;
11940711Swollman	r->r_start = start;
12040711Swollman	r->r_end = end;
12140711Swollman	r->r_flags = 0;
12240711Swollman	r->r_dev = 0;
12340711Swollman	r->r_rm = rm;
12440711Swollman
12540711Swollman	simple_lock(rm->rm_slock);
12640711Swollman	for (s = rm->rm_list.cqh_first;
12740711Swollman	     !CIRCLEQ_TERMCOND(s, rm->rm_list) && s->r_end < r->r_start;
12840711Swollman	     s = s->r_link.cqe_next)
12940711Swollman		;
13040711Swollman
13140711Swollman	if (CIRCLEQ_TERMCOND(s, rm->rm_list)) {
13240711Swollman		CIRCLEQ_INSERT_TAIL(&rm->rm_list, r, r_link);
13340711Swollman	} else {
13440711Swollman		CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, r, r_link);
13540711Swollman	}
13640711Swollman
13740711Swollman	simple_unlock(rm->rm_slock);
13840711Swollman	return 0;
13940711Swollman}
14040711Swollman
14140711Swollmanint
14240711Swollmanrman_fini(struct rman *rm)
14340711Swollman{
14440711Swollman	struct resource *r;
14540711Swollman
14640711Swollman	simple_lock(rm->rm_slock);
14740711Swollman	for (r = rm->rm_list.cqh_first;	!CIRCLEQ_TERMCOND(r, rm->rm_list);
14840711Swollman	     r = r->r_link.cqe_next) {
14940711Swollman		if (r->r_flags & RF_ALLOCATED)
15040711Swollman			return EBUSY;
15140711Swollman	}
15240711Swollman
15340711Swollman	/*
15440711Swollman	 * There really should only be one of these if we are in this
15540711Swollman	 * state and the code is working properly, but it can't hurt.
15640711Swollman	 */
15740711Swollman	for (r = rm->rm_list.cqh_first;	!CIRCLEQ_TERMCOND(r, rm->rm_list);
15840711Swollman	     r = rm->rm_list.cqh_first) {
15940711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
16040711Swollman		free(r, M_RMAN);
16140711Swollman	}
16240711Swollman	simple_unlock(rm->rm_slock);
16340711Swollman	simple_lock(&rman_lock);
16440711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
16540711Swollman	simple_unlock(&rman_lock);
16640711Swollman	free(rm->rm_slock, M_RMAN);
16740711Swollman
16840711Swollman	return 0;
16940711Swollman}
17040711Swollman
17140711Swollmanstruct resource *
17240711Swollmanrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
17340711Swollman		      u_int flags, struct device *dev)
17440711Swollman{
17540711Swollman	u_int	want_activate;
17640711Swollman	struct	resource *r, *s, *rv;
17740711Swollman	u_long	rstart, rend;
17840711Swollman
17940711Swollman	rv = 0;
18040711Swollman
18140711Swollman#ifdef RMAN_DEBUG
18240711Swollman	printf("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
18340711Swollman	       "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end,
18440711Swollman	       count, flags, device_get_name(dev), device_get_unit(dev));
18540711Swollman#endif /* RMAN_DEBUG */
18640711Swollman	want_activate = (flags & RF_ACTIVE);
18740711Swollman	flags &= ~RF_ACTIVE;
18840711Swollman
18940711Swollman	simple_lock(rm->rm_slock);
19040711Swollman
19140711Swollman	for (r = rm->rm_list.cqh_first;
19240711Swollman	     !CIRCLEQ_TERMCOND(r, rm->rm_list) && r->r_end < start;
19340711Swollman	     r = r->r_link.cqe_next)
19440711Swollman		;
19540711Swollman
19640711Swollman	if (CIRCLEQ_TERMCOND(r, rm->rm_list)) {
19740711Swollman#ifdef RMAN_DEBUG
19840711Swollman		printf("could not find a region\n");
19940711Swollman#endif RMAN_DEBUG
20040711Swollman		goto out;
20140711Swollman	}
20240711Swollman
20340711Swollman	/*
20440711Swollman	 * First try to find an acceptable totally-unshared region.
20540711Swollman	 */
20640711Swollman	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
20740711Swollman	     s = s->r_link.cqe_next) {
20840711Swollman#ifdef RMAN_DEBUG
20940711Swollman		printf("considering [%#lx, %#lx]\n", s->r_start, s->r_end);
21040711Swollman#endif /* RMAN_DEBUG */
21140711Swollman		if (s->r_start > end) {
21240711Swollman#ifdef RMAN_DEBUG
21340711Swollman			printf("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end);
21440711Swollman#endif /* RMAN_DEBUG */
21540711Swollman			break;
21640711Swollman		}
21740711Swollman		if (s->r_flags & RF_ALLOCATED) {
21840711Swollman#ifdef RMAN_DEBUG
21940711Swollman			printf("region is allocated\n");
22040711Swollman#endif /* RMAN_DEBUG */
22140711Swollman			continue;
22240711Swollman		}
22340711Swollman		rstart = max(s->r_start, start);
22440711Swollman		rend = min(s->r_end, max(start + count, end));
22540711Swollman#ifdef RMAN_DEBUG
22640711Swollman		printf("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
22740711Swollman		       rstart, rend, (rend - rstart + 1), count);
22840711Swollman#endif /* RMAN_DEBUG */
22940711Swollman
23040711Swollman		if ((rend - rstart + 1) >= count) {
23140711Swollman#ifdef RMAN_DEBUG
23240711Swollman			printf("candidate region: [%#lx, %#lx], size %#lx\n",
23340711Swollman			       rend, rstart, (rend - rstart + 1));
23440711Swollman#endif /* RMAN_DEBUG */
23540711Swollman			if ((s->r_end - s->r_start + 1) == count) {
23640711Swollman#ifdef RMAN_DEBUG
23740711Swollman				printf("candidate region is entire chunk\n");
23840711Swollman#endif /* RMAN_DEBUG */
23940711Swollman				rv = s;
24040711Swollman				rv->r_flags |= RF_ALLOCATED;
24140711Swollman				rv->r_dev = dev;
24240711Swollman				goto out;
24340711Swollman			}
24440711Swollman
24540711Swollman			/*
24640711Swollman			 * If s->r_start < rstart and
24740711Swollman			 *    s->r_end > rstart + count - 1, then
24840711Swollman			 * we need to split the region into three pieces
24940711Swollman			 * (the middle one will get returned to the user).
25040711Swollman			 * Otherwise, we are allocating at either the
25140711Swollman			 * beginning or the end of s, so we only need to
25240711Swollman			 * split it in two.  The first case requires
25340711Swollman			 * two new allocations; the second requires but one.
25440711Swollman			 */
25540711Swollman			rv = malloc(sizeof *r, M_RMAN, M_NOWAIT);
25640711Swollman			if (rv == 0)
25740711Swollman				goto out;
25840711Swollman			rv->r_start = rstart;
25940711Swollman			rv->r_end = rstart + count - 1;
26040711Swollman			rv->r_flags = flags | RF_ALLOCATED;
26140711Swollman			rv->r_dev = dev;
26240711Swollman			rv->r_sharehead = 0;
26340711Swollman
26440711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
26540711Swollman#ifdef RMAN_DEBUG
26640711Swollman				printf("splitting region in three parts: "
26740711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
26840711Swollman				       s->r_start, rv->r_start - 1,
26940711Swollman				       rv->r_start, rv->r_end,
27040711Swollman				       rv->r_end + 1, s->r_end);
27140711Swollman#endif /* RMAN_DEBUG */
27240711Swollman				/*
27340711Swollman				 * We are allocating in the middle.
27440711Swollman				 */
27540711Swollman				r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
27640711Swollman				if (r == 0) {
27740711Swollman					free(rv, M_RMAN);
27840711Swollman					rv = 0;
27940711Swollman					goto out;
28040711Swollman				}
28140711Swollman				r->r_start = rv->r_end + 1;
28240711Swollman				r->r_end = s->r_end;
28340711Swollman				r->r_flags = s->r_flags;
28440711Swollman				r->r_dev = 0;
28540711Swollman				r->r_sharehead = 0;
28640711Swollman				s->r_end = rv->r_start - 1;
28740711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
28840711Swollman						     r_link);
28940711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, rv, r,
29040711Swollman						     r_link);
29140711Swollman			} else if (s->r_start == rv->r_start) {
29240711Swollman#ifdef RMAN_DEBUG
29340711Swollman				printf("allocating from the beginning\n");
29440711Swollman#endif /* RMAN_DEBUG */
29540711Swollman				/*
29640711Swollman				 * We are allocating at the beginning.
29740711Swollman				 */
29840711Swollman				s->r_start = rv->r_end + 1;
29940711Swollman				CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, rv,
30040711Swollman						      r_link);
30140711Swollman			} else {
30240711Swollman#ifdef RMAN_DEBUG
30340711Swollman				printf("allocating at the end\n");
30440711Swollman#endif /* RMAN_DEBUG */
30540711Swollman				/*
30640711Swollman				 * We are allocating at the end.
30740711Swollman				 */
30840711Swollman				s->r_end = rv->r_start - 1;
30940711Swollman				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
31040711Swollman						     r_link);
31140711Swollman			}
31240711Swollman			goto out;
31340711Swollman		}
31440711Swollman	}
31540711Swollman
31640711Swollman	/*
31740711Swollman	 * Now find an acceptable shared region, if the client's requirements
31840711Swollman	 * allow sharing.  By our implementation restriction, a candidate
31940711Swollman	 * region must match exactly by both size and sharing type in order
32040711Swollman	 * to be considered compatible with the client's request.  (The
32140711Swollman	 * former restriction could probably be lifted without too much
32240711Swollman	 * additional work, but this does not seem warranted.)
32340711Swollman	 */
32440711Swollman#ifdef RMAN_DEBUG
32540711Swollman	printf("no unshared regions found\n");
32640711Swollman#endif /* RMAN_DEBUG */
32740711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
32840711Swollman		goto out;
32940711Swollman
33040711Swollman	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
33140711Swollman	     s = s->r_link.cqe_next) {
33240711Swollman		if (s->r_start > end)
33340711Swollman			break;
33440711Swollman		if ((s->r_flags & flags) != flags)
33540711Swollman			continue;
33640711Swollman		rstart = max(s->r_start, start);
33740711Swollman		rend = min(s->r_end, max(start + count, end));
33840711Swollman		if (s->r_start >= start && s->r_end <= end
33940711Swollman		    && (s->r_end - s->r_start + 1) == count) {
34040711Swollman			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT);
34140711Swollman			if (rv == 0)
34240711Swollman				goto out;
34340711Swollman			rv->r_start = s->r_start;
34440711Swollman			rv->r_end = s->r_end;
34540711Swollman			rv->r_flags = s->r_flags &
34640711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
34740711Swollman			rv->r_dev = dev;
34840711Swollman			rv->r_rm = rm;
34940711Swollman			if (s->r_sharehead == 0) {
35040711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
35140711Swollman							M_RMAN, M_NOWAIT);
35240711Swollman				if (s->r_sharehead == 0) {
35340711Swollman					free(rv, M_RMAN);
35440711Swollman					rv = 0;
35540711Swollman					goto out;
35640711Swollman				}
35740711Swollman				LIST_INIT(s->r_sharehead);
35840711Swollman				LIST_INSERT_HEAD(s->r_sharehead, s,
35940711Swollman						 r_sharelink);
36040711Swollman				s->r_flags = RF_FIRSTSHARE;
36140711Swollman			}
36240711Swollman			rv->r_sharehead = s->r_sharehead;
36340711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
36440711Swollman			goto out;
36540711Swollman		}
36640711Swollman	}
36740711Swollman
36840711Swollman	/*
36940711Swollman	 * We couldn't find anything.
37040711Swollman	 */
37140711Swollmanout:
37240711Swollman	/*
37340711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
37440711Swollman	 * which is reflected in `want_activate', we attempt to atomically
37540711Swollman	 * activate the resource.  If this fails, we release the resource
37640711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
37740711Swollman	 * make sense for RF_TIMESHARE-type resources.)
37840711Swollman	 */
37940711Swollman	if (rv && want_activate) {
38040711Swollman		struct resource *whohas;
38140711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
38240711Swollman			int_rman_release_resource(rm, rv);
38340711Swollman			rv = 0;
38440711Swollman		}
38540711Swollman	}
38640711Swollman
38740711Swollman	simple_unlock(rm->rm_slock);
38840711Swollman	return (rv);
38940711Swollman}
39040711Swollman
39140711Swollmanstatic int
39240711Swollmanint_rman_activate_resource(struct rman *rm, struct resource *r,
39340711Swollman			   struct resource **whohas)
39440711Swollman{
39540711Swollman	struct resource *s;
39640711Swollman	int ok;
39740711Swollman
39840711Swollman	/*
39940711Swollman	 * If we are not timesharing, then there is nothing much to do.
40040711Swollman	 * If we already have the resource, then there is nothing at all to do.
40140711Swollman	 * If we are not on a sharing list with anybody else, then there is
40240711Swollman	 * little to do.
40340711Swollman	 */
40440711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
40540711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
40640711Swollman	    || r->r_sharehead == 0) {
40740711Swollman		r->r_flags |= RF_ACTIVE;
40840711Swollman		return 0;
40940711Swollman	}
41040711Swollman
41140711Swollman	ok = 1;
41240711Swollman	for (s = r->r_sharehead->lh_first; s && ok;
41340711Swollman	     s = s->r_sharelink.le_next) {
41440711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
41540711Swollman			ok = 0;
41640711Swollman			*whohas = s;
41740711Swollman		}
41840711Swollman	}
41940711Swollman	if (ok) {
42040711Swollman		r->r_flags |= RF_ACTIVE;
42140711Swollman		return 0;
42240711Swollman	}
42340711Swollman	return EBUSY;
42440711Swollman}
42540711Swollman
42640711Swollmanint
42740711Swollmanrman_activate_resource(struct resource *r)
42840711Swollman{
42940711Swollman	int rv;
43040711Swollman	struct resource *whohas;
43140711Swollman	struct rman *rm;
43240711Swollman
43340711Swollman	rm = r->r_rm;
43440711Swollman	simple_lock(rm->rm_slock);
43540711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
43640711Swollman	simple_unlock(rm->rm_slock);
43740711Swollman	return rv;
43840711Swollman}
43940711Swollman
44040711Swollmanint
44140711Swollmanrman_await_resource(struct resource *r, int pri, int timo)
44240711Swollman{
44340711Swollman	int	rv, s;
44440711Swollman	struct	resource *whohas;
44540711Swollman	struct	rman *rm;
44640711Swollman
44740711Swollman	rm = r->r_rm;
44840711Swollman	for (;;) {
44940711Swollman		simple_lock(rm->rm_slock);
45040711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
45140711Swollman		if (rv != EBUSY)
45240711Swollman			return (rv);
45340711Swollman
45440711Swollman		if (r->r_sharehead == 0)
45540711Swollman			panic("rman_await_resource");
45640711Swollman		/*
45740711Swollman		 * splhigh hopefully will prevent a race between
45840711Swollman		 * simple_unlock and tsleep where a process
45940711Swollman		 * could conceivably get in and release the resource
46040711Swollman		 * before we have a chance to sleep on it.
46140711Swollman		 */
46240711Swollman		s = splhigh();
46340711Swollman		whohas->r_flags |= RF_WANTED;
46440711Swollman		simple_unlock(rm->rm_slock);
46540711Swollman		rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
46640711Swollman		if (rv) {
46740711Swollman			splx(s);
46840711Swollman			return rv;
46940711Swollman		}
47040711Swollman		simple_lock(rm->rm_slock);
47140711Swollman		splx(s);
47240711Swollman	}
47340711Swollman}
47440711Swollman
47540711Swollmanint
47640711Swollmanrman_deactivate_resource(struct resource *r)
47740711Swollman{
47840711Swollman	struct	rman *rm;
47940711Swollman
48040711Swollman	rm = r->r_rm;
48140711Swollman	simple_lock(rm->rm_slock);
48240711Swollman	r->r_flags &= ~RF_ACTIVE;
48340711Swollman	if (r->r_flags & RF_WANTED) {
48440711Swollman		r->r_flags &= ~RF_WANTED;
48540711Swollman		wakeup(r->r_sharehead);
48640711Swollman	}
48740711Swollman	simple_unlock(rm->rm_slock);
48840711Swollman	return 0;
48940711Swollman}
49040711Swollman
49140711Swollmanstatic int
49240711Swollmanint_rman_release_resource(struct rman *rm, struct resource *r)
49340711Swollman{
49440711Swollman	struct	resource *s, *t;
49540711Swollman
49640711Swollman	if (r->r_flags & RF_ACTIVE)
49740711Swollman		return EBUSY;
49840711Swollman
49940711Swollman	/*
50040711Swollman	 * Check for a sharing list first.  If there is one, then we don't
50140711Swollman	 * have to think as hard.
50240711Swollman	 */
50340711Swollman	if (r->r_sharehead) {
50440711Swollman		/*
50540711Swollman		 * If a sharing list exists, then we know there are at
50640711Swollman		 * least two sharers.
50740711Swollman		 *
50840711Swollman		 * If we are in the main circleq, appoint someone else.
50940711Swollman		 */
51040711Swollman		LIST_REMOVE(r, r_sharelink);
51140711Swollman		s = r->r_sharehead->lh_first;
51240711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
51340711Swollman			s->r_flags |= RF_FIRSTSHARE;
51440711Swollman			CIRCLEQ_INSERT_BEFORE(&rm->rm_list, r, s, r_link);
51540711Swollman			CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
51640711Swollman		}
51740711Swollman
51840711Swollman		/*
51940711Swollman		 * Make sure that the sharing list goes away completely
52040711Swollman		 * if the resource is no longer being shared at all.
52140711Swollman		 */
52240711Swollman		if (s->r_sharelink.le_next == 0) {
52340711Swollman			free(s->r_sharehead, M_RMAN);
52440711Swollman			s->r_sharehead = 0;
52540711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
52640711Swollman		}
52740711Swollman		goto out;
52840711Swollman	}
52940711Swollman
53040711Swollman	/*
53140711Swollman	 * Look at the adjacent resources in the list and see if our
53240711Swollman	 * segment can be merged with any of them.
53340711Swollman	 */
53440711Swollman	s = r->r_link.cqe_prev;
53540711Swollman	t = r->r_link.cqe_next;
53640711Swollman
53740711Swollman	if (s != (void *)&rm->rm_list && (s->r_flags & RF_ALLOCATED) == 0
53840711Swollman	    && t != (void *)&rm->rm_list && (t->r_flags & RF_ALLOCATED) == 0) {
53940711Swollman		/*
54040711Swollman		 * Merge all three segments.
54140711Swollman		 */
54240711Swollman		s->r_end = t->r_end;
54340711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
54440711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, t, r_link);
54540711Swollman		free(t, M_RMAN);
54640711Swollman	} else if (s != (void *)&rm->rm_list
54740711Swollman		   && (s->r_flags & RF_ALLOCATED) == 0) {
54840711Swollman		/*
54940711Swollman		 * Merge previous segment with ours.
55040711Swollman		 */
55140711Swollman		s->r_end = r->r_end;
55240711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
55340711Swollman	} else if (t != (void *)&rm->rm_list
55440711Swollman		   && (t->r_flags & RF_ALLOCATED) == 0) {
55540711Swollman		/*
55640711Swollman		 * Merge next segment with ours.
55740711Swollman		 */
55840711Swollman		t->r_start = r->r_start;
55940711Swollman		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
56040711Swollman	} else {
56140711Swollman		/*
56240711Swollman		 * At this point, we know there is nothing we
56340711Swollman		 * can potentially merge with, because on each
56440711Swollman		 * side, there is either nothing there or what is
56540711Swollman		 * there is still allocated.  In that case, we don't
56640711Swollman		 * want to remove r from the list; we simply want to
56740711Swollman		 * change it to an unallocated region and return
56840711Swollman		 * without freeing anything.
56940711Swollman		 */
57040711Swollman		r->r_flags &= ~RF_ALLOCATED;
57140711Swollman		return 0;
57240711Swollman	}
57340711Swollman
57440711Swollmanout:
57540711Swollman	free(r, M_RMAN);
57640711Swollman	return 0;
57740711Swollman}
57840711Swollman
57940711Swollmanint
58040711Swollmanrman_release_resource(struct resource *r)
58140711Swollman{
58240711Swollman	int	rv;
58340711Swollman	struct	rman *rm = r->r_rm;
58440711Swollman
58540711Swollman	simple_lock(rm->rm_slock);
58640711Swollman	rv = int_rman_release_resource(rm, r);
58740711Swollman	simple_unlock(rm->rm_slock);
58840711Swollman	return (rv);
58940711Swollman}
590