subr_rman.c revision 183054
1139804Simp/*-
240711Swollman * Copyright 1998 Massachusetts Institute of Technology
340711Swollman *
440711Swollman * Permission to use, copy, modify, and distribute this software and
540711Swollman * its documentation for any purpose and without fee is hereby
640711Swollman * granted, provided that both the above copyright notice and this
740711Swollman * permission notice appear in all copies, that both the above
840711Swollman * copyright notice and this permission notice appear in all
940711Swollman * supporting documentation, and that the name of M.I.T. not be used
1040711Swollman * in advertising or publicity pertaining to distribution of the
1140711Swollman * software without specific, written prior permission.  M.I.T. makes
1240711Swollman * no representations about the suitability of this software for any
1340711Swollman * purpose.  It is provided "as is" without express or implied
1440711Swollman * warranty.
15152543Syongari *
1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2740711Swollman * SUCH DAMAGE.
2840711Swollman */
2940711Swollman
3040711Swollman/*
3140711Swollman * The kernel resource manager.  This code is responsible for keeping track
3240711Swollman * of hardware resources which are apportioned out to various drivers.
3340711Swollman * It does not actually assign those resources, and it is not expected
3440711Swollman * that end-device drivers will call into this code directly.  Rather,
3540711Swollman * the code which implements the buses that those devices are attached to,
3640711Swollman * and the code which manages CPU resources, will call this code, and the
3740711Swollman * end-device drivers will make upcalls to that code to actually perform
3840711Swollman * the allocation.
3940711Swollman *
4040711Swollman * There are two sorts of resources managed by this code.  The first is
4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class
4240711Swollman * consist of a sequence of individually-allocatable objects which have
4340711Swollman * been numbered in some well-defined order.  Most of the resources
4440711Swollman * are of this type, as it is the most familiar.  The second type is
4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
4640711Swollman * resources in which each instance is indistinguishable from every
4740711Swollman * other instance).  The principal anticipated application of gauges
4840711Swollman * is in the context of power consumption, where a bus may have a specific
4940711Swollman * power budget which all attached devices share.  RMAN_GAUGE is not
5040711Swollman * implemented yet.
5140711Swollman *
5240711Swollman * For array resources, we make one simplifying assumption: two clients
5340711Swollman * sharing the same resource must use the same range of indices.  That
5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not
5540711Swollman * permitted.
5640711Swollman */
5740711Swollman
58168791Sjhb#include "opt_ddb.h"
59168791Sjhb
60116182Sobrien#include <sys/cdefs.h>
61116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_rman.c 183054 2008-09-15 22:45:14Z sam $");
62116182Sobrien
6340711Swollman#include <sys/param.h>
6440711Swollman#include <sys/systm.h>
6541304Sbde#include <sys/kernel.h>
66164881Sjhb#include <sys/limits.h>
6740711Swollman#include <sys/lock.h>
6840711Swollman#include <sys/malloc.h>
6971576Sjasone#include <sys/mutex.h>
7045720Speter#include <sys/bus.h>		/* XXX debugging */
7145720Speter#include <machine/bus.h>
7240711Swollman#include <sys/rman.h>
73102962Siwasaki#include <sys/sysctl.h>
7440711Swollman
75168791Sjhb#ifdef DDB
76168791Sjhb#include <ddb/ddb.h>
77168791Sjhb#endif
78168791Sjhb
79151037Sphk/*
80151037Sphk * We use a linked list rather than a bitmap because we need to be able to
81151037Sphk * represent potentially huge objects (like all of a processor's physical
82151037Sphk * address space).  That is also why the indices are defined to have type
83151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990).
84151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that
85151037Sphk * at some point in the future, particularly if we want to support 36-bit
86151037Sphk * addresses on IA32 hardware.
87151037Sphk */
88151037Sphkstruct resource_i {
89151037Sphk	struct resource		r_r;
90151037Sphk	TAILQ_ENTRY(resource_i)	r_link;
91151037Sphk	LIST_ENTRY(resource_i)	r_sharelink;
92151037Sphk	LIST_HEAD(, resource_i)	*r_sharehead;
93151037Sphk	u_long	r_start;	/* index of the first entry in this resource */
94151037Sphk	u_long	r_end;		/* index of the last entry (inclusive) */
95151037Sphk	u_int	r_flags;
96151037Sphk	void	*r_virtual;	/* virtual address of this resource */
97151037Sphk	struct	device *r_dev;	/* device which has allocated this resource */
98151037Sphk	struct	rman *r_rm;	/* resource manager from whence this came */
99151037Sphk	int	r_rid;		/* optional rid for this resource. */
100151037Sphk};
101151037Sphk
102102962Siwasakiint     rman_debug = 0;
103102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug);
104102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105102962Siwasaki    &rman_debug, 0, "rman debug");
10659910Spaul
107102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params
108102962Siwasaki
10945569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
11040711Swollman
11140711Swollmanstruct	rman_head rman_head;
11271576Sjasonestatic	struct mtx rman_mtx; /* mutex to protect rman_head */
113150523Sphkstatic	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114150523Sphk				       struct resource_i **whohas);
115150523Sphkstatic	int int_rman_deactivate_resource(struct resource_i *r);
116150523Sphkstatic	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
11740711Swollman
118150523Sphkstatic __inline struct resource_i *
119150523Sphkint_alloc_resource(int malloc_flag)
120150523Sphk{
121150523Sphk	struct resource_i *r;
122150523Sphk
123150523Sphk	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124150523Sphk	if (r != NULL) {
125150523Sphk		r->r_r.__r_i = r;
126150523Sphk	}
127150523Sphk	return (r);
128150523Sphk}
129150523Sphk
13040711Swollmanint
13140711Swollmanrman_init(struct rman *rm)
13240711Swollman{
133152543Syongari	static int once = 0;
13440711Swollman
13540711Swollman	if (once == 0) {
13640711Swollman		once = 1;
13740711Swollman		TAILQ_INIT(&rman_head);
13893818Sjhb		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
13940711Swollman	}
14040711Swollman
14140711Swollman	if (rm->rm_type == RMAN_UNINIT)
14240711Swollman		panic("rman_init");
14340711Swollman	if (rm->rm_type == RMAN_GAUGE)
14440711Swollman		panic("implement RMAN_GAUGE");
14540711Swollman
14668727Smckusick	TAILQ_INIT(&rm->rm_list);
14784781Sjhb	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
148152543Syongari	if (rm->rm_mtx == NULL)
14940711Swollman		return ENOMEM;
15093818Sjhb	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
15140711Swollman
15272200Sbmilekic	mtx_lock(&rman_mtx);
15340711Swollman	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
15472200Sbmilekic	mtx_unlock(&rman_mtx);
15540711Swollman	return 0;
15640711Swollman}
15740711Swollman
15840711Swollmanint
15940711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end)
16040711Swollman{
161162224Sjhb	struct resource_i *r, *s, *t;
16240711Swollman
163134040Snjl	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
164134021Snjl	    rm->rm_descr, start, end));
165150523Sphk	r = int_alloc_resource(M_NOWAIT);
166152543Syongari	if (r == NULL)
16740711Swollman		return ENOMEM;
16840711Swollman	r->r_start = start;
16940711Swollman	r->r_end = end;
17040711Swollman	r->r_rm = rm;
17140711Swollman
17272200Sbmilekic	mtx_lock(rm->rm_mtx);
173162224Sjhb
174162224Sjhb	/* Skip entries before us. */
175164881Sjhb	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
176164881Sjhb		if (s->r_end == ULONG_MAX)
177164881Sjhb			break;
178164881Sjhb		if (s->r_end + 1 >= r->r_start)
179164881Sjhb			break;
180164881Sjhb	}
18140711Swollman
182162224Sjhb	/* If we ran off the end of the list, insert at the tail. */
18368727Smckusick	if (s == NULL) {
18468727Smckusick		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
18540711Swollman	} else {
186162224Sjhb		/* Check for any overlap with the current region. */
187162224Sjhb		if (r->r_start <= s->r_end && r->r_end >= s->r_start)
188162224Sjhb			return EBUSY;
189162224Sjhb
190162224Sjhb		/* Check for any overlap with the next region. */
191162224Sjhb		t = TAILQ_NEXT(s, r_link);
192162224Sjhb		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
193162224Sjhb			return EBUSY;
194162224Sjhb
195162224Sjhb		/*
196162224Sjhb		 * See if this region can be merged with the next region.  If
197162224Sjhb		 * not, clear the pointer.
198162224Sjhb		 */
199162224Sjhb		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
200162224Sjhb			t = NULL;
201162224Sjhb
202162224Sjhb		/* See if we can merge with the current region. */
203162224Sjhb		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204162224Sjhb			/* Can we merge all 3 regions? */
205162224Sjhb			if (t != NULL) {
206162224Sjhb				s->r_end = t->r_end;
207162224Sjhb				TAILQ_REMOVE(&rm->rm_list, t, r_link);
208162224Sjhb				free(r, M_RMAN);
209162224Sjhb				free(t, M_RMAN);
210162224Sjhb			} else {
211162224Sjhb				s->r_end = r->r_end;
212162224Sjhb				free(r, M_RMAN);
213162224Sjhb			}
214166932Sscottl		} else if (t != NULL) {
215166932Sscottl			/* Can we merge with just the next region? */
216166932Sscottl			t->r_start = r->r_start;
217166932Sscottl			free(r, M_RMAN);
218166932Sscottl		} else if (s->r_end < r->r_start) {
219166932Sscottl			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
220162224Sjhb		} else {
221166932Sscottl			TAILQ_INSERT_BEFORE(s, r, r_link);
222162224Sjhb		}
22340711Swollman	}
22440711Swollman
22572200Sbmilekic	mtx_unlock(rm->rm_mtx);
22640711Swollman	return 0;
22740711Swollman}
22840711Swollman
22940711Swollmanint
230159536Simprman_init_from_resource(struct rman *rm, struct resource *r)
231159536Simp{
232159536Simp	int rv;
233159536Simp
234159536Simp	if ((rv = rman_init(rm)) != 0)
235159536Simp		return (rv);
236159536Simp	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
237159536Simp}
238159536Simp
239159536Simpint
24040711Swollmanrman_fini(struct rman *rm)
24140711Swollman{
242150523Sphk	struct resource_i *r;
24340711Swollman
24472200Sbmilekic	mtx_lock(rm->rm_mtx);
24568727Smckusick	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
24645720Speter		if (r->r_flags & RF_ALLOCATED) {
24772200Sbmilekic			mtx_unlock(rm->rm_mtx);
24840711Swollman			return EBUSY;
24945720Speter		}
25040711Swollman	}
25140711Swollman
25240711Swollman	/*
25340711Swollman	 * There really should only be one of these if we are in this
25440711Swollman	 * state and the code is working properly, but it can't hurt.
25540711Swollman	 */
25668727Smckusick	while (!TAILQ_EMPTY(&rm->rm_list)) {
25768727Smckusick		r = TAILQ_FIRST(&rm->rm_list);
25868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
25940711Swollman		free(r, M_RMAN);
26040711Swollman	}
26172200Sbmilekic	mtx_unlock(rm->rm_mtx);
26272200Sbmilekic	mtx_lock(&rman_mtx);
26340711Swollman	TAILQ_REMOVE(&rman_head, rm, rm_link);
26472200Sbmilekic	mtx_unlock(&rman_mtx);
26571576Sjasone	mtx_destroy(rm->rm_mtx);
26671576Sjasone	free(rm->rm_mtx, M_RMAN);
26740711Swollman
26840711Swollman	return 0;
26940711Swollman}
27040711Swollman
27140711Swollmanstruct resource *
27288372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
27388372Stmm		      u_long count, u_long bound,  u_int flags,
27488372Stmm		      struct device *dev)
27540711Swollman{
27640711Swollman	u_int	want_activate;
277150523Sphk	struct	resource_i *r, *s, *rv;
27888372Stmm	u_long	rstart, rend, amask, bmask;
27940711Swollman
280152543Syongari	rv = NULL;
28140711Swollman
282160958Sjb	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
283160958Sjb	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
284160958Sjb	       count, flags,
285160958Sjb	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
28640711Swollman	want_activate = (flags & RF_ACTIVE);
28740711Swollman	flags &= ~RF_ACTIVE;
28840711Swollman
28972200Sbmilekic	mtx_lock(rm->rm_mtx);
29040711Swollman
291152543Syongari	for (r = TAILQ_FIRST(&rm->rm_list);
29268727Smckusick	     r && r->r_end < start;
29368727Smckusick	     r = TAILQ_NEXT(r, r_link))
29440711Swollman		;
29540711Swollman
29668727Smckusick	if (r == NULL) {
29759910Spaul		DPRINTF(("could not find a region\n"));
29840711Swollman		goto out;
29940711Swollman	}
30040711Swollman
30188372Stmm	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
30288372Stmm	/* If bound is 0, bmask will also be 0 */
30388372Stmm	bmask = ~(bound - 1);
30440711Swollman	/*
30540711Swollman	 * First try to find an acceptable totally-unshared region.
30640711Swollman	 */
30768727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
30859910Spaul		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
309143665Simp		if (s->r_start + count - 1 > end) {
310143665Simp			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
311143665Simp			    s->r_start, end));
31240711Swollman			break;
31340711Swollman		}
31440711Swollman		if (s->r_flags & RF_ALLOCATED) {
31559910Spaul			DPRINTF(("region is allocated\n"));
31640711Swollman			continue;
31740711Swollman		}
31888372Stmm		rstart = ulmax(s->r_start, start);
31988372Stmm		/*
32088372Stmm		 * Try to find a region by adjusting to boundary and alignment
32188372Stmm		 * until both conditions are satisfied. This is not an optimal
32288372Stmm		 * algorithm, but in most cases it isn't really bad, either.
32388372Stmm		 */
32488372Stmm		do {
32588372Stmm			rstart = (rstart + amask) & ~amask;
326109646Stmm			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
32788372Stmm				rstart += bound - (rstart & ~bmask);
32888372Stmm		} while ((rstart & amask) != 0 && rstart < end &&
32988372Stmm		    rstart < s->r_end);
330128172Simp		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
331102572Siwasaki		if (rstart > rend) {
332102572Siwasaki			DPRINTF(("adjusted start exceeds end\n"));
333102572Siwasaki			continue;
334102572Siwasaki		}
33559910Spaul		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
33659910Spaul		       rstart, rend, (rend - rstart + 1), count));
33740711Swollman
33840711Swollman		if ((rend - rstart + 1) >= count) {
33959910Spaul			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
340143664Simp			       rstart, rend, (rend - rstart + 1)));
34140711Swollman			if ((s->r_end - s->r_start + 1) == count) {
34259910Spaul				DPRINTF(("candidate region is entire chunk\n"));
34340711Swollman				rv = s;
34448235Sdfr				rv->r_flags |= RF_ALLOCATED | flags;
34540711Swollman				rv->r_dev = dev;
34640711Swollman				goto out;
34740711Swollman			}
34840711Swollman
34940711Swollman			/*
35040711Swollman			 * If s->r_start < rstart and
35140711Swollman			 *    s->r_end > rstart + count - 1, then
35240711Swollman			 * we need to split the region into three pieces
35340711Swollman			 * (the middle one will get returned to the user).
35440711Swollman			 * Otherwise, we are allocating at either the
35540711Swollman			 * beginning or the end of s, so we only need to
35640711Swollman			 * split it in two.  The first case requires
35740711Swollman			 * two new allocations; the second requires but one.
35840711Swollman			 */
359150523Sphk			rv = int_alloc_resource(M_NOWAIT);
360152543Syongari			if (rv == NULL)
36140711Swollman				goto out;
36240711Swollman			rv->r_start = rstart;
36340711Swollman			rv->r_end = rstart + count - 1;
36440711Swollman			rv->r_flags = flags | RF_ALLOCATED;
36540711Swollman			rv->r_dev = dev;
36645720Speter			rv->r_rm = rm;
367152543Syongari
36840711Swollman			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
36959910Spaul				DPRINTF(("splitting region in three parts: "
37040711Swollman				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
37140711Swollman				       s->r_start, rv->r_start - 1,
37240711Swollman				       rv->r_start, rv->r_end,
37359910Spaul				       rv->r_end + 1, s->r_end));
37440711Swollman				/*
37540711Swollman				 * We are allocating in the middle.
37640711Swollman				 */
377150523Sphk				r = int_alloc_resource(M_NOWAIT);
378152543Syongari				if (r == NULL) {
37940711Swollman					free(rv, M_RMAN);
380152543Syongari					rv = NULL;
38140711Swollman					goto out;
38240711Swollman				}
38340711Swollman				r->r_start = rv->r_end + 1;
38440711Swollman				r->r_end = s->r_end;
38540711Swollman				r->r_flags = s->r_flags;
38645720Speter				r->r_rm = rm;
38740711Swollman				s->r_end = rv->r_start - 1;
38868727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
38940711Swollman						     r_link);
39068727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
39140711Swollman						     r_link);
39240711Swollman			} else if (s->r_start == rv->r_start) {
39359910Spaul				DPRINTF(("allocating from the beginning\n"));
39440711Swollman				/*
39540711Swollman				 * We are allocating at the beginning.
39640711Swollman				 */
39740711Swollman				s->r_start = rv->r_end + 1;
39868727Smckusick				TAILQ_INSERT_BEFORE(s, rv, r_link);
39940711Swollman			} else {
40059910Spaul				DPRINTF(("allocating at the end\n"));
40140711Swollman				/*
40240711Swollman				 * We are allocating at the end.
40340711Swollman				 */
40440711Swollman				s->r_end = rv->r_start - 1;
40568727Smckusick				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
40640711Swollman						     r_link);
40740711Swollman			}
40840711Swollman			goto out;
40940711Swollman		}
41040711Swollman	}
41140711Swollman
41240711Swollman	/*
41340711Swollman	 * Now find an acceptable shared region, if the client's requirements
41440711Swollman	 * allow sharing.  By our implementation restriction, a candidate
41540711Swollman	 * region must match exactly by both size and sharing type in order
41640711Swollman	 * to be considered compatible with the client's request.  (The
41740711Swollman	 * former restriction could probably be lifted without too much
41840711Swollman	 * additional work, but this does not seem warranted.)
41940711Swollman	 */
42059910Spaul	DPRINTF(("no unshared regions found\n"));
42140711Swollman	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
42240711Swollman		goto out;
42340711Swollman
42468727Smckusick	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
42540711Swollman		if (s->r_start > end)
42640711Swollman			break;
42740711Swollman		if ((s->r_flags & flags) != flags)
42840711Swollman			continue;
42988372Stmm		rstart = ulmax(s->r_start, start);
430128172Simp		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
43140711Swollman		if (s->r_start >= start && s->r_end <= end
43288372Stmm		    && (s->r_end - s->r_start + 1) == count &&
43388372Stmm		    (s->r_start & amask) == 0 &&
43488372Stmm		    ((s->r_start ^ s->r_end) & bmask) == 0) {
435150523Sphk			rv = int_alloc_resource(M_NOWAIT);
436152543Syongari			if (rv == NULL)
43740711Swollman				goto out;
43840711Swollman			rv->r_start = s->r_start;
43940711Swollman			rv->r_end = s->r_end;
440152543Syongari			rv->r_flags = s->r_flags &
44140711Swollman				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
44240711Swollman			rv->r_dev = dev;
44340711Swollman			rv->r_rm = rm;
444152543Syongari			if (s->r_sharehead == NULL) {
44540711Swollman				s->r_sharehead = malloc(sizeof *s->r_sharehead,
44669781Sdwmalone						M_RMAN, M_NOWAIT | M_ZERO);
447152543Syongari				if (s->r_sharehead == NULL) {
44840711Swollman					free(rv, M_RMAN);
449152543Syongari					rv = NULL;
45040711Swollman					goto out;
45140711Swollman				}
45240711Swollman				LIST_INIT(s->r_sharehead);
453152543Syongari				LIST_INSERT_HEAD(s->r_sharehead, s,
45440711Swollman						 r_sharelink);
45545106Sdfr				s->r_flags |= RF_FIRSTSHARE;
45640711Swollman			}
45740711Swollman			rv->r_sharehead = s->r_sharehead;
45840711Swollman			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
45940711Swollman			goto out;
46040711Swollman		}
46140711Swollman	}
46240711Swollman
46340711Swollman	/*
46440711Swollman	 * We couldn't find anything.
46540711Swollman	 */
46640711Swollmanout:
46740711Swollman	/*
46840711Swollman	 * If the user specified RF_ACTIVE in the initial flags,
46940711Swollman	 * which is reflected in `want_activate', we attempt to atomically
47040711Swollman	 * activate the resource.  If this fails, we release the resource
47140711Swollman	 * and indicate overall failure.  (This behavior probably doesn't
47240711Swollman	 * make sense for RF_TIMESHARE-type resources.)
47340711Swollman	 */
47440711Swollman	if (rv && want_activate) {
475150523Sphk		struct resource_i *whohas;
47640711Swollman		if (int_rman_activate_resource(rm, rv, &whohas)) {
47740711Swollman			int_rman_release_resource(rm, rv);
478152543Syongari			rv = NULL;
47940711Swollman		}
48040711Swollman	}
481152543Syongari
48272200Sbmilekic	mtx_unlock(rm->rm_mtx);
483152543Syongari	return (rv == NULL ? NULL : &rv->r_r);
48440711Swollman}
48540711Swollman
48688372Stmmstruct resource *
48788372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
48888372Stmm		      u_int flags, struct device *dev)
48988372Stmm{
49088372Stmm
49188372Stmm	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
49288372Stmm	    dev));
49388372Stmm}
49488372Stmm
49540711Swollmanstatic int
496150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r,
497150523Sphk			   struct resource_i **whohas)
49840711Swollman{
499150523Sphk	struct resource_i *s;
50040711Swollman	int ok;
50140711Swollman
50240711Swollman	/*
50340711Swollman	 * If we are not timesharing, then there is nothing much to do.
50440711Swollman	 * If we already have the resource, then there is nothing at all to do.
50540711Swollman	 * If we are not on a sharing list with anybody else, then there is
50640711Swollman	 * little to do.
50740711Swollman	 */
50840711Swollman	if ((r->r_flags & RF_TIMESHARE) == 0
50940711Swollman	    || (r->r_flags & RF_ACTIVE) != 0
510152543Syongari	    || r->r_sharehead == NULL) {
51140711Swollman		r->r_flags |= RF_ACTIVE;
51240711Swollman		return 0;
51340711Swollman	}
51440711Swollman
51540711Swollman	ok = 1;
51653225Sphk	for (s = LIST_FIRST(r->r_sharehead); s && ok;
51753225Sphk	     s = LIST_NEXT(s, r_sharelink)) {
51840711Swollman		if ((s->r_flags & RF_ACTIVE) != 0) {
51940711Swollman			ok = 0;
52040711Swollman			*whohas = s;
52140711Swollman		}
52240711Swollman	}
52340711Swollman	if (ok) {
52440711Swollman		r->r_flags |= RF_ACTIVE;
52540711Swollman		return 0;
52640711Swollman	}
52740711Swollman	return EBUSY;
52840711Swollman}
52940711Swollman
53040711Swollmanint
531150523Sphkrman_activate_resource(struct resource *re)
53240711Swollman{
53340711Swollman	int rv;
534150523Sphk	struct resource_i *r, *whohas;
53540711Swollman	struct rman *rm;
53640711Swollman
537150523Sphk	r = re->__r_i;
53840711Swollman	rm = r->r_rm;
53972200Sbmilekic	mtx_lock(rm->rm_mtx);
54040711Swollman	rv = int_rman_activate_resource(rm, r, &whohas);
54172200Sbmilekic	mtx_unlock(rm->rm_mtx);
54240711Swollman	return rv;
54340711Swollman}
54440711Swollman
54540711Swollmanint
546150523Sphkrman_await_resource(struct resource *re, int pri, int timo)
54740711Swollman{
54885519Sjhb	int	rv;
549150523Sphk	struct	resource_i *r, *whohas;
55040711Swollman	struct	rman *rm;
55140711Swollman
552150523Sphk	r = re->__r_i;
55340711Swollman	rm = r->r_rm;
55485519Sjhb	mtx_lock(rm->rm_mtx);
55540711Swollman	for (;;) {
55640711Swollman		rv = int_rman_activate_resource(rm, r, &whohas);
55740711Swollman		if (rv != EBUSY)
55871576Sjasone			return (rv);	/* returns with mutex held */
55940711Swollman
560152543Syongari		if (r->r_sharehead == NULL)
56140711Swollman			panic("rman_await_resource");
56240711Swollman		whohas->r_flags |= RF_WANTED;
56385519Sjhb		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
56440711Swollman		if (rv) {
56585519Sjhb			mtx_unlock(rm->rm_mtx);
56685519Sjhb			return (rv);
56740711Swollman		}
56840711Swollman	}
56940711Swollman}
57040711Swollman
57145720Speterstatic int
572150523Sphkint_rman_deactivate_resource(struct resource_i *r)
57340711Swollman{
57440711Swollman
57540711Swollman	r->r_flags &= ~RF_ACTIVE;
57640711Swollman	if (r->r_flags & RF_WANTED) {
57740711Swollman		r->r_flags &= ~RF_WANTED;
57840711Swollman		wakeup(r->r_sharehead);
57940711Swollman	}
58045720Speter	return 0;
58145720Speter}
58245720Speter
58345720Speterint
58445720Speterrman_deactivate_resource(struct resource *r)
58545720Speter{
58645720Speter	struct	rman *rm;
58745720Speter
588150523Sphk	rm = r->__r_i->r_rm;
58972200Sbmilekic	mtx_lock(rm->rm_mtx);
590150523Sphk	int_rman_deactivate_resource(r->__r_i);
59172200Sbmilekic	mtx_unlock(rm->rm_mtx);
59240711Swollman	return 0;
59340711Swollman}
59440711Swollman
59540711Swollmanstatic int
596150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r)
59740711Swollman{
598150523Sphk	struct	resource_i *s, *t;
59940711Swollman
60040711Swollman	if (r->r_flags & RF_ACTIVE)
60145720Speter		int_rman_deactivate_resource(r);
60240711Swollman
60340711Swollman	/*
60440711Swollman	 * Check for a sharing list first.  If there is one, then we don't
60540711Swollman	 * have to think as hard.
60640711Swollman	 */
60740711Swollman	if (r->r_sharehead) {
60840711Swollman		/*
60940711Swollman		 * If a sharing list exists, then we know there are at
61040711Swollman		 * least two sharers.
61140711Swollman		 *
61240711Swollman		 * If we are in the main circleq, appoint someone else.
61340711Swollman		 */
61440711Swollman		LIST_REMOVE(r, r_sharelink);
61553225Sphk		s = LIST_FIRST(r->r_sharehead);
61640711Swollman		if (r->r_flags & RF_FIRSTSHARE) {
61740711Swollman			s->r_flags |= RF_FIRSTSHARE;
61868727Smckusick			TAILQ_INSERT_BEFORE(r, s, r_link);
61968727Smckusick			TAILQ_REMOVE(&rm->rm_list, r, r_link);
62040711Swollman		}
62140711Swollman
62240711Swollman		/*
62340711Swollman		 * Make sure that the sharing list goes away completely
62440711Swollman		 * if the resource is no longer being shared at all.
62540711Swollman		 */
626152543Syongari		if (LIST_NEXT(s, r_sharelink) == NULL) {
62740711Swollman			free(s->r_sharehead, M_RMAN);
628152543Syongari			s->r_sharehead = NULL;
62940711Swollman			s->r_flags &= ~RF_FIRSTSHARE;
63040711Swollman		}
63140711Swollman		goto out;
63240711Swollman	}
63340711Swollman
63440711Swollman	/*
63540711Swollman	 * Look at the adjacent resources in the list and see if our
636133177Sjhb	 * segment can be merged with any of them.  If either of the
637133177Sjhb	 * resources is allocated or is not exactly adjacent then they
638133177Sjhb	 * cannot be merged with our segment.
63940711Swollman	 */
64068727Smckusick	s = TAILQ_PREV(r, resource_head, r_link);
641133177Sjhb	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
642133177Sjhb	    s->r_end + 1 != r->r_start))
643133177Sjhb		s = NULL;
64468727Smckusick	t = TAILQ_NEXT(r, r_link);
645133177Sjhb	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
646133177Sjhb	    r->r_end + 1 != t->r_start))
647133177Sjhb		t = NULL;
64840711Swollman
649133177Sjhb	if (s != NULL && t != NULL) {
65040711Swollman		/*
65140711Swollman		 * Merge all three segments.
65240711Swollman		 */
65340711Swollman		s->r_end = t->r_end;
65468727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
65568727Smckusick		TAILQ_REMOVE(&rm->rm_list, t, r_link);
65640711Swollman		free(t, M_RMAN);
657133177Sjhb	} else if (s != NULL) {
65840711Swollman		/*
65940711Swollman		 * Merge previous segment with ours.
66040711Swollman		 */
66140711Swollman		s->r_end = r->r_end;
66268727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
663133177Sjhb	} else if (t != NULL) {
66440711Swollman		/*
66540711Swollman		 * Merge next segment with ours.
66640711Swollman		 */
66740711Swollman		t->r_start = r->r_start;
66868727Smckusick		TAILQ_REMOVE(&rm->rm_list, r, r_link);
66940711Swollman	} else {
67040711Swollman		/*
67140711Swollman		 * At this point, we know there is nothing we
67240711Swollman		 * can potentially merge with, because on each
67340711Swollman		 * side, there is either nothing there or what is
67440711Swollman		 * there is still allocated.  In that case, we don't
67540711Swollman		 * want to remove r from the list; we simply want to
67640711Swollman		 * change it to an unallocated region and return
67740711Swollman		 * without freeing anything.
67840711Swollman		 */
67940711Swollman		r->r_flags &= ~RF_ALLOCATED;
68040711Swollman		return 0;
68140711Swollman	}
68240711Swollman
68340711Swollmanout:
68440711Swollman	free(r, M_RMAN);
68540711Swollman	return 0;
68640711Swollman}
68740711Swollman
68840711Swollmanint
689150523Sphkrman_release_resource(struct resource *re)
69040711Swollman{
69140711Swollman	int	rv;
692150523Sphk	struct	resource_i *r;
693150523Sphk	struct	rman *rm;
69440711Swollman
695150523Sphk	r = re->__r_i;
696150523Sphk	rm = r->r_rm;
69772200Sbmilekic	mtx_lock(rm->rm_mtx);
69840711Swollman	rv = int_rman_release_resource(rm, r);
69972200Sbmilekic	mtx_unlock(rm->rm_mtx);
70040711Swollman	return (rv);
70140711Swollman}
70267261Simp
70367261Simpuint32_t
70467261Simprman_make_alignment_flags(uint32_t size)
70567261Simp{
70667261Simp	int	i;
70767261Simp
70867425Simp	/*
70967425Simp	 * Find the hightest bit set, and add one if more than one bit
71067425Simp	 * set.  We're effectively computing the ceil(log2(size)) here.
71167425Simp	 */
71288372Stmm	for (i = 31; i > 0; i--)
71367425Simp		if ((1 << i) & size)
71467425Simp			break;
71567425Simp	if (~(1 << i) & size)
71667425Simp		i++;
71767261Simp
71867261Simp	return(RF_ALIGNMENT_LOG2(i));
71967425Simp}
720107296Simp
721182162Sjhbvoid
722182162Sjhbrman_set_start(struct resource *r, u_long start)
723182162Sjhb{
724182162Sjhb	r->__r_i->r_start = start;
725182162Sjhb}
726182162Sjhb
727107296Simpu_long
728107296Simprman_get_start(struct resource *r)
729107296Simp{
730150523Sphk	return (r->__r_i->r_start);
731107296Simp}
732107296Simp
733182162Sjhbvoid
734182162Sjhbrman_set_end(struct resource *r, u_long end)
735182162Sjhb{
736182162Sjhb	r->__r_i->r_end = end;
737182162Sjhb}
738182162Sjhb
739107296Simpu_long
740107296Simprman_get_end(struct resource *r)
741107296Simp{
742150523Sphk	return (r->__r_i->r_end);
743107296Simp}
744107296Simp
745107296Simpu_long
746107296Simprman_get_size(struct resource *r)
747107296Simp{
748150523Sphk	return (r->__r_i->r_end - r->__r_i->r_start + 1);
749107296Simp}
750107296Simp
751107296Simpu_int
752107296Simprman_get_flags(struct resource *r)
753107296Simp{
754150523Sphk	return (r->__r_i->r_flags);
755107296Simp}
756107296Simp
757107296Simpvoid
758107296Simprman_set_virtual(struct resource *r, void *v)
759107296Simp{
760150523Sphk	r->__r_i->r_virtual = v;
761107296Simp}
762107296Simp
763107296Simpvoid *
764107296Simprman_get_virtual(struct resource *r)
765107296Simp{
766150523Sphk	return (r->__r_i->r_virtual);
767107296Simp}
768107296Simp
769107296Simpvoid
770107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t)
771107296Simp{
772107296Simp	r->r_bustag = t;
773107296Simp}
774107296Simp
775107296Simpbus_space_tag_t
776107296Simprman_get_bustag(struct resource *r)
777107296Simp{
778107296Simp	return (r->r_bustag);
779107296Simp}
780107296Simp
781107296Simpvoid
782107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h)
783107296Simp{
784107296Simp	r->r_bushandle = h;
785107296Simp}
786107296Simp
787107296Simpbus_space_handle_t
788107296Simprman_get_bushandle(struct resource *r)
789107296Simp{
790107296Simp	return (r->r_bushandle);
791107296Simp}
792107296Simp
793107296Simpvoid
794107296Simprman_set_rid(struct resource *r, int rid)
795107296Simp{
796150523Sphk	r->__r_i->r_rid = rid;
797107296Simp}
798107296Simp
799182162Sjhbint
800182162Sjhbrman_get_rid(struct resource *r)
801131414Simp{
802182162Sjhb	return (r->__r_i->r_rid);
803131414Simp}
804131414Simp
805131414Simpvoid
806182162Sjhbrman_set_device(struct resource *r, struct device *dev)
807131414Simp{
808182162Sjhb	r->__r_i->r_dev = dev;
809131414Simp}
810131414Simp
811110753Simpstruct device *
812110753Simprman_get_device(struct resource *r)
813110753Simp{
814150523Sphk	return (r->__r_i->r_dev);
815110753Simp}
816144071Sphk
817150547Sphkint
818150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm)
819150547Sphk{
820150547Sphk
821150547Sphk	return (r->__r_i->r_rm == rm);
822150547Sphk}
823150547Sphk
824144071Sphk/*
825144071Sphk * Sysctl interface for scanning the resource lists.
826144071Sphk *
827144071Sphk * We take two input parameters; the index into the list of resource
828144071Sphk * managers, and the resource offset into the list.
829144071Sphk */
830144071Sphkstatic int
831144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS)
832144071Sphk{
833144071Sphk	int			*name = (int *)arg1;
834144071Sphk	u_int			namelen = arg2;
835144071Sphk	int			rman_idx, res_idx;
836144071Sphk	struct rman		*rm;
837150523Sphk	struct resource_i	*res;
838144071Sphk	struct u_rman		urm;
839144071Sphk	struct u_resource	ures;
840144071Sphk	int			error;
841144071Sphk
842144071Sphk	if (namelen != 3)
843144071Sphk		return (EINVAL);
844144071Sphk
845144071Sphk	if (bus_data_generation_check(name[0]))
846144071Sphk		return (EINVAL);
847144071Sphk	rman_idx = name[1];
848144071Sphk	res_idx = name[2];
849144071Sphk
850144071Sphk	/*
851144071Sphk	 * Find the indexed resource manager
852144071Sphk	 */
853152543Syongari	mtx_lock(&rman_mtx);
854144071Sphk	TAILQ_FOREACH(rm, &rman_head, rm_link) {
855144071Sphk		if (rman_idx-- == 0)
856144071Sphk			break;
857144071Sphk	}
858152543Syongari	mtx_unlock(&rman_mtx);
859144071Sphk	if (rm == NULL)
860144071Sphk		return (ENOENT);
861144071Sphk
862144071Sphk	/*
863144071Sphk	 * If the resource index is -1, we want details on the
864144071Sphk	 * resource manager.
865144071Sphk	 */
866144071Sphk	if (res_idx == -1) {
867145953Scperciva		bzero(&urm, sizeof(urm));
868144071Sphk		urm.rm_handle = (uintptr_t)rm;
869144071Sphk		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
870144071Sphk		urm.rm_start = rm->rm_start;
871144071Sphk		urm.rm_size = rm->rm_end - rm->rm_start + 1;
872144071Sphk		urm.rm_type = rm->rm_type;
873144071Sphk
874144071Sphk		error = SYSCTL_OUT(req, &urm, sizeof(urm));
875144071Sphk		return (error);
876144071Sphk	}
877144071Sphk
878144071Sphk	/*
879144071Sphk	 * Find the indexed resource and return it.
880144071Sphk	 */
881152543Syongari	mtx_lock(rm->rm_mtx);
882144071Sphk	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
883144071Sphk		if (res_idx-- == 0) {
884145953Scperciva			bzero(&ures, sizeof(ures));
885144071Sphk			ures.r_handle = (uintptr_t)res;
886144071Sphk			ures.r_parent = (uintptr_t)res->r_rm;
887144071Sphk			ures.r_device = (uintptr_t)res->r_dev;
888144071Sphk			if (res->r_dev != NULL) {
889144071Sphk				if (device_get_name(res->r_dev) != NULL) {
890144071Sphk					snprintf(ures.r_devname, RM_TEXTLEN,
891144071Sphk					    "%s%d",
892144071Sphk					    device_get_name(res->r_dev),
893144071Sphk					    device_get_unit(res->r_dev));
894144071Sphk				} else {
895144071Sphk					strlcpy(ures.r_devname, "nomatch",
896144071Sphk					    RM_TEXTLEN);
897144071Sphk				}
898144071Sphk			} else {
899144071Sphk				ures.r_devname[0] = '\0';
900144071Sphk			}
901144071Sphk			ures.r_start = res->r_start;
902144071Sphk			ures.r_size = res->r_end - res->r_start + 1;
903144071Sphk			ures.r_flags = res->r_flags;
904144071Sphk
905152543Syongari			mtx_unlock(rm->rm_mtx);
906144071Sphk			error = SYSCTL_OUT(req, &ures, sizeof(ures));
907144071Sphk			return (error);
908144071Sphk		}
909144071Sphk	}
910152543Syongari	mtx_unlock(rm->rm_mtx);
911144071Sphk	return (ENOENT);
912144071Sphk}
913144071Sphk
914144071SphkSYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
915144071Sphk    "kernel resource manager");
916168791Sjhb
917168791Sjhb#ifdef DDB
918168791Sjhbstatic void
919168791Sjhbdump_rman(struct rman *rm)
920168791Sjhb{
921168791Sjhb	struct resource_i *r;
922168791Sjhb	const char *devname;
923168791Sjhb
924168791Sjhb	if (db_pager_quit)
925168791Sjhb		return;
926168791Sjhb	db_printf("rman: %s\n", rm->rm_descr);
927168791Sjhb	db_printf("    0x%lx-0x%lx (full range)\n", rm->rm_start, rm->rm_end);
928168791Sjhb	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
929168791Sjhb		if (r->r_dev != NULL) {
930168791Sjhb			devname = device_get_nameunit(r->r_dev);
931168791Sjhb			if (devname == NULL)
932168791Sjhb				devname = "nomatch";
933168791Sjhb		} else
934168791Sjhb			devname = NULL;
935168791Sjhb		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
936168791Sjhb		if (devname != NULL)
937168791Sjhb			db_printf("(%s)\n", devname);
938168791Sjhb		else
939168791Sjhb			db_printf("----\n");
940168791Sjhb		if (db_pager_quit)
941168791Sjhb			return;
942168791Sjhb	}
943168791Sjhb}
944168791Sjhb
945168791SjhbDB_SHOW_COMMAND(rman, db_show_rman)
946168791Sjhb{
947168791Sjhb
948168791Sjhb	if (have_addr)
949168791Sjhb		dump_rman((struct rman *)addr);
950168791Sjhb}
951168791Sjhb
952183054SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
953168791Sjhb{
954168791Sjhb	struct rman *rm;
955168791Sjhb
956168791Sjhb	TAILQ_FOREACH(rm, &rman_head, rm_link)
957168791Sjhb		dump_rman(rm);
958168791Sjhb}
959183054SsamDB_SHOW_ALIAS(allrman, db_show_all_rman);
960168791Sjhb#endif
961