1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission.  M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose.  It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager.  This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly.  Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code.  The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order.  Most of the resources
44 * are of this type, as it is the most familiar.  The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance).  The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share.  RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices.  That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD$");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h>		/* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).  That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
87 */
88struct resource_i {
89	struct resource		r_r;
90	TAILQ_ENTRY(resource_i)	r_link;
91	LIST_ENTRY(resource_i)	r_sharelink;
92	LIST_HEAD(, resource_i)	*r_sharehead;
93	rman_res_t	r_start;	/* index of the first entry in this resource */
94	rman_res_t	r_end;		/* index of the last entry (inclusive) */
95	u_int	r_flags;
96	void	*r_virtual;	/* virtual address of this resource */
97	void	*r_irq_cookie;	/* interrupt cookie for this (interrupt) resource */
98	device_t r_dev;	/* device which has allocated this resource */
99	struct rman *r_rm;	/* resource manager from whence this came */
100	int	r_rid;		/* optional rid for this resource. */
101};
102
103static int rman_debug = 0;
104SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
105    &rman_debug, 0, "rman debug");
106
107#define DPRINTF(params) if (rman_debug) printf params
108
109static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110
111struct rman_head rman_head;
112static struct mtx rman_mtx; /* mutex to protect rman_head */
113static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
114
115static __inline struct resource_i *
116int_alloc_resource(int malloc_flag)
117{
118	struct resource_i *r;
119
120	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
121	if (r != NULL) {
122		r->r_r.__r_i = r;
123	}
124	return (r);
125}
126
127int
128rman_init(struct rman *rm)
129{
130	static int once = 0;
131
132	if (once == 0) {
133		once = 1;
134		TAILQ_INIT(&rman_head);
135		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
136	}
137
138	if (rm->rm_start == 0 && rm->rm_end == 0)
139		rm->rm_end = ~0;
140	if (rm->rm_type == RMAN_UNINIT)
141		panic("rman_init");
142	if (rm->rm_type == RMAN_GAUGE)
143		panic("implement RMAN_GAUGE");
144
145	TAILQ_INIT(&rm->rm_list);
146	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
147	if (rm->rm_mtx == NULL)
148		return ENOMEM;
149	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
150
151	mtx_lock(&rman_mtx);
152	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
153	mtx_unlock(&rman_mtx);
154	return 0;
155}
156
157int
158rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
159{
160	struct resource_i *r, *s, *t;
161	int rv = 0;
162
163	DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
164	    rm->rm_descr, start, end));
165	if (start < rm->rm_start || end > rm->rm_end)
166		return EINVAL;
167	r = int_alloc_resource(M_NOWAIT);
168	if (r == NULL)
169		return ENOMEM;
170	r->r_start = start;
171	r->r_end = end;
172	r->r_rm = rm;
173
174	mtx_lock(rm->rm_mtx);
175
176	/* Skip entries before us. */
177	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
178		if (s->r_end == ~0)
179			break;
180		if (s->r_end + 1 >= r->r_start)
181			break;
182	}
183
184	/* If we ran off the end of the list, insert at the tail. */
185	if (s == NULL) {
186		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
187	} else {
188		/* Check for any overlap with the current region. */
189		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
190			rv = EBUSY;
191			goto out;
192		}
193
194		/* Check for any overlap with the next region. */
195		t = TAILQ_NEXT(s, r_link);
196		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
197			rv = EBUSY;
198			goto out;
199		}
200
201		/*
202		 * See if this region can be merged with the next region.  If
203		 * not, clear the pointer.
204		 */
205		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
206			t = NULL;
207
208		/* See if we can merge with the current region. */
209		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
210			/* Can we merge all 3 regions? */
211			if (t != NULL) {
212				s->r_end = t->r_end;
213				TAILQ_REMOVE(&rm->rm_list, t, r_link);
214				free(r, M_RMAN);
215				free(t, M_RMAN);
216			} else {
217				s->r_end = r->r_end;
218				free(r, M_RMAN);
219			}
220		} else if (t != NULL) {
221			/* Can we merge with just the next region? */
222			t->r_start = r->r_start;
223			free(r, M_RMAN);
224		} else if (s->r_end < r->r_start) {
225			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
226		} else {
227			TAILQ_INSERT_BEFORE(s, r, r_link);
228		}
229	}
230out:
231	mtx_unlock(rm->rm_mtx);
232	return rv;
233}
234
235int
236rman_init_from_resource(struct rman *rm, struct resource *r)
237{
238	int rv;
239
240	if ((rv = rman_init(rm)) != 0)
241		return (rv);
242	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
243}
244
245int
246rman_fini(struct rman *rm)
247{
248	struct resource_i *r;
249
250	mtx_lock(rm->rm_mtx);
251	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
252		if (r->r_flags & RF_ALLOCATED) {
253			mtx_unlock(rm->rm_mtx);
254			return EBUSY;
255		}
256	}
257
258	/*
259	 * There really should only be one of these if we are in this
260	 * state and the code is working properly, but it can't hurt.
261	 */
262	while (!TAILQ_EMPTY(&rm->rm_list)) {
263		r = TAILQ_FIRST(&rm->rm_list);
264		TAILQ_REMOVE(&rm->rm_list, r, r_link);
265		free(r, M_RMAN);
266	}
267	mtx_unlock(rm->rm_mtx);
268	mtx_lock(&rman_mtx);
269	TAILQ_REMOVE(&rman_head, rm, rm_link);
270	mtx_unlock(&rman_mtx);
271	mtx_destroy(rm->rm_mtx);
272	free(rm->rm_mtx, M_RMAN);
273
274	return 0;
275}
276
277int
278rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
279{
280	struct resource_i *r;
281
282	mtx_lock(rm->rm_mtx);
283	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
284		if (!(r->r_flags & RF_ALLOCATED)) {
285			*start = r->r_start;
286			*end = r->r_end;
287			mtx_unlock(rm->rm_mtx);
288			return (0);
289		}
290	}
291	mtx_unlock(rm->rm_mtx);
292	return (ENOENT);
293}
294
295int
296rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
297{
298	struct resource_i *r;
299
300	mtx_lock(rm->rm_mtx);
301	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
302		if (!(r->r_flags & RF_ALLOCATED)) {
303			*start = r->r_start;
304			*end = r->r_end;
305			mtx_unlock(rm->rm_mtx);
306			return (0);
307		}
308	}
309	mtx_unlock(rm->rm_mtx);
310	return (ENOENT);
311}
312
313/* Shrink or extend one or both ends of an allocated resource. */
314int
315rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
316{
317	struct resource_i *r, *s, *t, *new;
318	struct rman *rm;
319
320	/* Not supported for shared resources. */
321	r = rr->__r_i;
322	if (r->r_flags & RF_SHAREABLE)
323		return (EINVAL);
324
325	/*
326	 * This does not support wholesale moving of a resource.  At
327	 * least part of the desired new range must overlap with the
328	 * existing resource.
329	 */
330	if (end < r->r_start || r->r_end < start)
331		return (EINVAL);
332
333	/*
334	 * Find the two resource regions immediately adjacent to the
335	 * allocated resource.
336	 */
337	rm = r->r_rm;
338	mtx_lock(rm->rm_mtx);
339#ifdef INVARIANTS
340	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
341		if (s == r)
342			break;
343	}
344	if (s == NULL)
345		panic("resource not in list");
346#endif
347	s = TAILQ_PREV(r, resource_head, r_link);
348	t = TAILQ_NEXT(r, r_link);
349	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
350	    ("prev resource mismatch"));
351	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
352	    ("next resource mismatch"));
353
354	/*
355	 * See if the changes are permitted.  Shrinking is always allowed,
356	 * but growing requires sufficient room in the adjacent region.
357	 */
358	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
359	    s->r_start > start)) {
360		mtx_unlock(rm->rm_mtx);
361		return (EBUSY);
362	}
363	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
364	    t->r_end < end)) {
365		mtx_unlock(rm->rm_mtx);
366		return (EBUSY);
367	}
368
369	/*
370	 * While holding the lock, grow either end of the resource as
371	 * needed and shrink either end if the shrinking does not require
372	 * allocating a new resource.  We can safely drop the lock and then
373	 * insert a new range to handle the shrinking case afterwards.
374	 */
375	if (start < r->r_start ||
376	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
377		KASSERT(s->r_flags == 0, ("prev is busy"));
378		r->r_start = start;
379		if (s->r_start == start) {
380			TAILQ_REMOVE(&rm->rm_list, s, r_link);
381			free(s, M_RMAN);
382		} else
383			s->r_end = start - 1;
384	}
385	if (end > r->r_end ||
386	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
387		KASSERT(t->r_flags == 0, ("next is busy"));
388		r->r_end = end;
389		if (t->r_end == end) {
390			TAILQ_REMOVE(&rm->rm_list, t, r_link);
391			free(t, M_RMAN);
392		} else
393			t->r_start = end + 1;
394	}
395	mtx_unlock(rm->rm_mtx);
396
397	/*
398	 * Handle the shrinking cases that require allocating a new
399	 * resource to hold the newly-free region.  We have to recheck
400	 * if we still need this new region after acquiring the lock.
401	 */
402	if (start > r->r_start) {
403		new = int_alloc_resource(M_WAITOK);
404		new->r_start = r->r_start;
405		new->r_end = start - 1;
406		new->r_rm = rm;
407		mtx_lock(rm->rm_mtx);
408		r->r_start = start;
409		s = TAILQ_PREV(r, resource_head, r_link);
410		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
411			s->r_end = start - 1;
412			free(new, M_RMAN);
413		} else
414			TAILQ_INSERT_BEFORE(r, new, r_link);
415		mtx_unlock(rm->rm_mtx);
416	}
417	if (end < r->r_end) {
418		new = int_alloc_resource(M_WAITOK);
419		new->r_start = end + 1;
420		new->r_end = r->r_end;
421		new->r_rm = rm;
422		mtx_lock(rm->rm_mtx);
423		r->r_end = end;
424		t = TAILQ_NEXT(r, r_link);
425		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
426			t->r_start = end + 1;
427			free(new, M_RMAN);
428		} else
429			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
430		mtx_unlock(rm->rm_mtx);
431	}
432	return (0);
433}
434
435#define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_PREFETCHABLE))
436
437struct resource *
438rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
439			    rman_res_t count, rman_res_t bound, u_int flags,
440			    device_t dev)
441{
442	u_int new_rflags;
443	struct resource_i *r, *s, *rv;
444	rman_res_t rstart, rend, amask, bmask;
445
446	rv = NULL;
447
448	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
449	       "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
450	       count, flags,
451	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
452	KASSERT((flags & RF_FIRSTSHARE) == 0,
453	    ("invalid flags %#x", flags));
454	new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
455
456	mtx_lock(rm->rm_mtx);
457
458	r = TAILQ_FIRST(&rm->rm_list);
459	if (r == NULL) {
460	    DPRINTF(("NULL list head\n"));
461	} else {
462	    DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
463		    r->r_end, start, count-1));
464	}
465	for (r = TAILQ_FIRST(&rm->rm_list);
466	     r && r->r_end < start + count - 1;
467	     r = TAILQ_NEXT(r, r_link)) {
468		;
469		DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
470			r->r_end, start, count-1));
471	}
472
473	if (r == NULL) {
474		DPRINTF(("could not find a region\n"));
475		goto out;
476	}
477
478	amask = (1ull << RF_ALIGNMENT(flags)) - 1;
479	KASSERT(start <= RM_MAX_END - amask,
480	    ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
481
482	/* If bound is 0, bmask will also be 0 */
483	bmask = ~(bound - 1);
484	/*
485	 * First try to find an acceptable totally-unshared region.
486	 */
487	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
488		DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
489		/*
490		 * The resource list is sorted, so there is no point in
491		 * searching further once r_start is too large.
492		 */
493		if (s->r_start > end - (count - 1)) {
494			DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
495			    s->r_start, end));
496			break;
497		}
498		if (s->r_start > RM_MAX_END - amask) {
499			DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
500			    s->r_start, amask));
501			break;
502		}
503		if (s->r_flags & RF_ALLOCATED) {
504			DPRINTF(("region is allocated\n"));
505			continue;
506		}
507		rstart = ummax(s->r_start, start);
508		/*
509		 * Try to find a region by adjusting to boundary and alignment
510		 * until both conditions are satisfied. This is not an optimal
511		 * algorithm, but in most cases it isn't really bad, either.
512		 */
513		do {
514			rstart = (rstart + amask) & ~amask;
515			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
516				rstart += bound - (rstart & ~bmask);
517		} while ((rstart & amask) != 0 && rstart < end &&
518		    rstart < s->r_end);
519		rend = ummin(s->r_end, ummax(rstart + count - 1, end));
520		if (rstart > rend) {
521			DPRINTF(("adjusted start exceeds end\n"));
522			continue;
523		}
524		DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
525		       rstart, rend, (rend - rstart + 1), count));
526
527		if ((rend - rstart + 1) >= count) {
528			DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
529			       rstart, rend, (rend - rstart + 1)));
530			if ((s->r_end - s->r_start + 1) == count) {
531				DPRINTF(("candidate region is entire chunk\n"));
532				rv = s;
533				rv->r_flags = new_rflags;
534				rv->r_dev = dev;
535				goto out;
536			}
537
538			/*
539			 * If s->r_start < rstart and
540			 *    s->r_end > rstart + count - 1, then
541			 * we need to split the region into three pieces
542			 * (the middle one will get returned to the user).
543			 * Otherwise, we are allocating at either the
544			 * beginning or the end of s, so we only need to
545			 * split it in two.  The first case requires
546			 * two new allocations; the second requires but one.
547			 */
548			rv = int_alloc_resource(M_NOWAIT);
549			if (rv == NULL)
550				goto out;
551			rv->r_start = rstart;
552			rv->r_end = rstart + count - 1;
553			rv->r_flags = new_rflags;
554			rv->r_dev = dev;
555			rv->r_rm = rm;
556
557			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
558				DPRINTF(("splitting region in three parts: "
559				       "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
560				       s->r_start, rv->r_start - 1,
561				       rv->r_start, rv->r_end,
562				       rv->r_end + 1, s->r_end));
563				/*
564				 * We are allocating in the middle.
565				 */
566				r = int_alloc_resource(M_NOWAIT);
567				if (r == NULL) {
568					free(rv, M_RMAN);
569					rv = NULL;
570					goto out;
571				}
572				r->r_start = rv->r_end + 1;
573				r->r_end = s->r_end;
574				r->r_flags = s->r_flags;
575				r->r_rm = rm;
576				s->r_end = rv->r_start - 1;
577				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
578						     r_link);
579				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
580						     r_link);
581			} else if (s->r_start == rv->r_start) {
582				DPRINTF(("allocating from the beginning\n"));
583				/*
584				 * We are allocating at the beginning.
585				 */
586				s->r_start = rv->r_end + 1;
587				TAILQ_INSERT_BEFORE(s, rv, r_link);
588			} else {
589				DPRINTF(("allocating at the end\n"));
590				/*
591				 * We are allocating at the end.
592				 */
593				s->r_end = rv->r_start - 1;
594				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
595						     r_link);
596			}
597			goto out;
598		}
599	}
600
601	/*
602	 * Now find an acceptable shared region, if the client's requirements
603	 * allow sharing.  By our implementation restriction, a candidate
604	 * region must match exactly by both size and sharing type in order
605	 * to be considered compatible with the client's request.  (The
606	 * former restriction could probably be lifted without too much
607	 * additional work, but this does not seem warranted.)
608	 */
609	DPRINTF(("no unshared regions found\n"));
610	if ((flags & RF_SHAREABLE) == 0)
611		goto out;
612
613	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
614		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
615		    s->r_start >= start &&
616		    (s->r_end - s->r_start + 1) == count &&
617		    (s->r_start & amask) == 0 &&
618		    ((s->r_start ^ s->r_end) & bmask) == 0) {
619			rv = int_alloc_resource(M_NOWAIT);
620			if (rv == NULL)
621				goto out;
622			rv->r_start = s->r_start;
623			rv->r_end = s->r_end;
624			rv->r_flags = new_rflags;
625			rv->r_dev = dev;
626			rv->r_rm = rm;
627			if (s->r_sharehead == NULL) {
628				s->r_sharehead = malloc(sizeof *s->r_sharehead,
629						M_RMAN, M_NOWAIT | M_ZERO);
630				if (s->r_sharehead == NULL) {
631					free(rv, M_RMAN);
632					rv = NULL;
633					goto out;
634				}
635				LIST_INIT(s->r_sharehead);
636				LIST_INSERT_HEAD(s->r_sharehead, s,
637						 r_sharelink);
638				s->r_flags |= RF_FIRSTSHARE;
639			}
640			rv->r_sharehead = s->r_sharehead;
641			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
642			goto out;
643		}
644	}
645	/*
646	 * We couldn't find anything.
647	 */
648
649out:
650	mtx_unlock(rm->rm_mtx);
651	return (rv == NULL ? NULL : &rv->r_r);
652}
653
654struct resource *
655rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
656		      rman_res_t count, u_int flags, device_t dev)
657{
658
659	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
660	    dev));
661}
662
663int
664rman_activate_resource(struct resource *re)
665{
666	struct resource_i *r;
667	struct rman *rm;
668
669	r = re->__r_i;
670	rm = r->r_rm;
671	mtx_lock(rm->rm_mtx);
672	r->r_flags |= RF_ACTIVE;
673	mtx_unlock(rm->rm_mtx);
674	return 0;
675}
676
677int
678rman_deactivate_resource(struct resource *r)
679{
680	struct rman *rm;
681
682	rm = r->__r_i->r_rm;
683	mtx_lock(rm->rm_mtx);
684	r->__r_i->r_flags &= ~RF_ACTIVE;
685	mtx_unlock(rm->rm_mtx);
686	return 0;
687}
688
689static int
690int_rman_release_resource(struct rman *rm, struct resource_i *r)
691{
692	struct resource_i *s, *t;
693
694	if (r->r_flags & RF_ACTIVE)
695		r->r_flags &= ~RF_ACTIVE;
696
697	/*
698	 * Check for a sharing list first.  If there is one, then we don't
699	 * have to think as hard.
700	 */
701	if (r->r_sharehead) {
702		/*
703		 * If a sharing list exists, then we know there are at
704		 * least two sharers.
705		 *
706		 * If we are in the main circleq, appoint someone else.
707		 */
708		LIST_REMOVE(r, r_sharelink);
709		s = LIST_FIRST(r->r_sharehead);
710		if (r->r_flags & RF_FIRSTSHARE) {
711			s->r_flags |= RF_FIRSTSHARE;
712			TAILQ_INSERT_BEFORE(r, s, r_link);
713			TAILQ_REMOVE(&rm->rm_list, r, r_link);
714		}
715
716		/*
717		 * Make sure that the sharing list goes away completely
718		 * if the resource is no longer being shared at all.
719		 */
720		if (LIST_NEXT(s, r_sharelink) == NULL) {
721			free(s->r_sharehead, M_RMAN);
722			s->r_sharehead = NULL;
723			s->r_flags &= ~RF_FIRSTSHARE;
724		}
725		goto out;
726	}
727
728	/*
729	 * Look at the adjacent resources in the list and see if our
730	 * segment can be merged with any of them.  If either of the
731	 * resources is allocated or is not exactly adjacent then they
732	 * cannot be merged with our segment.
733	 */
734	s = TAILQ_PREV(r, resource_head, r_link);
735	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
736	    s->r_end + 1 != r->r_start))
737		s = NULL;
738	t = TAILQ_NEXT(r, r_link);
739	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
740	    r->r_end + 1 != t->r_start))
741		t = NULL;
742
743	if (s != NULL && t != NULL) {
744		/*
745		 * Merge all three segments.
746		 */
747		s->r_end = t->r_end;
748		TAILQ_REMOVE(&rm->rm_list, r, r_link);
749		TAILQ_REMOVE(&rm->rm_list, t, r_link);
750		free(t, M_RMAN);
751	} else if (s != NULL) {
752		/*
753		 * Merge previous segment with ours.
754		 */
755		s->r_end = r->r_end;
756		TAILQ_REMOVE(&rm->rm_list, r, r_link);
757	} else if (t != NULL) {
758		/*
759		 * Merge next segment with ours.
760		 */
761		t->r_start = r->r_start;
762		TAILQ_REMOVE(&rm->rm_list, r, r_link);
763	} else {
764		/*
765		 * At this point, we know there is nothing we
766		 * can potentially merge with, because on each
767		 * side, there is either nothing there or what is
768		 * there is still allocated.  In that case, we don't
769		 * want to remove r from the list; we simply want to
770		 * change it to an unallocated region and return
771		 * without freeing anything.
772		 */
773		r->r_flags &= ~RF_ALLOCATED;
774		r->r_dev = NULL;
775		return 0;
776	}
777
778out:
779	free(r, M_RMAN);
780	return 0;
781}
782
783int
784rman_release_resource(struct resource *re)
785{
786	int rv;
787	struct resource_i *r;
788	struct rman *rm;
789
790	r = re->__r_i;
791	rm = r->r_rm;
792	mtx_lock(rm->rm_mtx);
793	rv = int_rman_release_resource(rm, r);
794	mtx_unlock(rm->rm_mtx);
795	return (rv);
796}
797
798uint32_t
799rman_make_alignment_flags(uint32_t size)
800{
801	int i;
802
803	/*
804	 * Find the hightest bit set, and add one if more than one bit
805	 * set.  We're effectively computing the ceil(log2(size)) here.
806	 */
807	for (i = 31; i > 0; i--)
808		if ((1 << i) & size)
809			break;
810	if (~(1 << i) & size)
811		i++;
812
813	return(RF_ALIGNMENT_LOG2(i));
814}
815
816void
817rman_set_start(struct resource *r, rman_res_t start)
818{
819
820	r->__r_i->r_start = start;
821}
822
823rman_res_t
824rman_get_start(struct resource *r)
825{
826
827	return (r->__r_i->r_start);
828}
829
830void
831rman_set_end(struct resource *r, rman_res_t end)
832{
833
834	r->__r_i->r_end = end;
835}
836
837rman_res_t
838rman_get_end(struct resource *r)
839{
840
841	return (r->__r_i->r_end);
842}
843
844rman_res_t
845rman_get_size(struct resource *r)
846{
847
848	return (r->__r_i->r_end - r->__r_i->r_start + 1);
849}
850
851u_int
852rman_get_flags(struct resource *r)
853{
854
855	return (r->__r_i->r_flags);
856}
857
858void
859rman_set_virtual(struct resource *r, void *v)
860{
861
862	r->__r_i->r_virtual = v;
863}
864
865void *
866rman_get_virtual(struct resource *r)
867{
868
869	return (r->__r_i->r_virtual);
870}
871
872void
873rman_set_irq_cookie(struct resource *r, void *c)
874{
875
876	r->__r_i->r_irq_cookie = c;
877}
878
879void *
880rman_get_irq_cookie(struct resource *r)
881{
882
883	return (r->__r_i->r_irq_cookie);
884}
885
886void
887rman_set_bustag(struct resource *r, bus_space_tag_t t)
888{
889
890	r->r_bustag = t;
891}
892
893bus_space_tag_t
894rman_get_bustag(struct resource *r)
895{
896
897	return (r->r_bustag);
898}
899
900void
901rman_set_bushandle(struct resource *r, bus_space_handle_t h)
902{
903
904	r->r_bushandle = h;
905}
906
907bus_space_handle_t
908rman_get_bushandle(struct resource *r)
909{
910
911	return (r->r_bushandle);
912}
913
914void
915rman_set_mapping(struct resource *r, struct resource_map *map)
916{
917
918	KASSERT(rman_get_size(r) == map->r_size,
919	    ("rman_set_mapping: size mismatch"));
920	rman_set_bustag(r, map->r_bustag);
921	rman_set_bushandle(r, map->r_bushandle);
922	rman_set_virtual(r, map->r_vaddr);
923}
924
925void
926rman_get_mapping(struct resource *r, struct resource_map *map)
927{
928
929	map->r_bustag = rman_get_bustag(r);
930	map->r_bushandle = rman_get_bushandle(r);
931	map->r_size = rman_get_size(r);
932	map->r_vaddr = rman_get_virtual(r);
933}
934
935void
936rman_set_rid(struct resource *r, int rid)
937{
938
939	r->__r_i->r_rid = rid;
940}
941
942int
943rman_get_rid(struct resource *r)
944{
945
946	return (r->__r_i->r_rid);
947}
948
949void
950rman_set_device(struct resource *r, device_t dev)
951{
952
953	r->__r_i->r_dev = dev;
954}
955
956device_t
957rman_get_device(struct resource *r)
958{
959
960	return (r->__r_i->r_dev);
961}
962
963int
964rman_is_region_manager(struct resource *r, struct rman *rm)
965{
966
967	return (r->__r_i->r_rm == rm);
968}
969
970/*
971 * Sysctl interface for scanning the resource lists.
972 *
973 * We take two input parameters; the index into the list of resource
974 * managers, and the resource offset into the list.
975 */
976static int
977sysctl_rman(SYSCTL_HANDLER_ARGS)
978{
979	int			*name = (int *)arg1;
980	u_int			namelen = arg2;
981	int			rman_idx, res_idx;
982	struct rman		*rm;
983	struct resource_i	*res;
984	struct resource_i	*sres;
985	struct u_rman		urm;
986	struct u_resource	ures;
987	int			error;
988
989	if (namelen != 3)
990		return (EINVAL);
991
992	if (bus_data_generation_check(name[0]))
993		return (EINVAL);
994	rman_idx = name[1];
995	res_idx = name[2];
996
997	/*
998	 * Find the indexed resource manager
999	 */
1000	mtx_lock(&rman_mtx);
1001	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1002		if (rman_idx-- == 0)
1003			break;
1004	}
1005	mtx_unlock(&rman_mtx);
1006	if (rm == NULL)
1007		return (ENOENT);
1008
1009	/*
1010	 * If the resource index is -1, we want details on the
1011	 * resource manager.
1012	 */
1013	if (res_idx == -1) {
1014		bzero(&urm, sizeof(urm));
1015		urm.rm_handle = (uintptr_t)rm;
1016		if (rm->rm_descr != NULL)
1017			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1018		urm.rm_start = rm->rm_start;
1019		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1020		urm.rm_type = rm->rm_type;
1021
1022		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1023		return (error);
1024	}
1025
1026	/*
1027	 * Find the indexed resource and return it.
1028	 */
1029	mtx_lock(rm->rm_mtx);
1030	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1031		if (res->r_sharehead != NULL) {
1032			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1033				if (res_idx-- == 0) {
1034					res = sres;
1035					goto found;
1036				}
1037		}
1038		else if (res_idx-- == 0)
1039				goto found;
1040	}
1041	mtx_unlock(rm->rm_mtx);
1042	return (ENOENT);
1043
1044found:
1045	bzero(&ures, sizeof(ures));
1046	ures.r_handle = (uintptr_t)res;
1047	ures.r_parent = (uintptr_t)res->r_rm;
1048	ures.r_device = (uintptr_t)res->r_dev;
1049	if (res->r_dev != NULL) {
1050		if (device_get_name(res->r_dev) != NULL) {
1051			snprintf(ures.r_devname, RM_TEXTLEN,
1052			    "%s%d",
1053			    device_get_name(res->r_dev),
1054			    device_get_unit(res->r_dev));
1055		} else {
1056			strlcpy(ures.r_devname, "nomatch",
1057			    RM_TEXTLEN);
1058		}
1059	} else {
1060		ures.r_devname[0] = '\0';
1061	}
1062	ures.r_start = res->r_start;
1063	ures.r_size = res->r_end - res->r_start + 1;
1064	ures.r_flags = res->r_flags;
1065
1066	mtx_unlock(rm->rm_mtx);
1067	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1068	return (error);
1069}
1070
1071static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1072    sysctl_rman,
1073    "kernel resource manager");
1074
1075#ifdef DDB
1076static void
1077dump_rman_header(struct rman *rm)
1078{
1079
1080	if (db_pager_quit)
1081		return;
1082	db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1083	    rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1084}
1085
1086static void
1087dump_rman(struct rman *rm)
1088{
1089	struct resource_i *r;
1090	const char *devname;
1091
1092	if (db_pager_quit)
1093		return;
1094	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1095		if (r->r_dev != NULL) {
1096			devname = device_get_nameunit(r->r_dev);
1097			if (devname == NULL)
1098				devname = "nomatch";
1099		} else
1100			devname = NULL;
1101		db_printf("    0x%jx-0x%jx (RID=%d) ",
1102		    r->r_start, r->r_end, r->r_rid);
1103		if (devname != NULL)
1104			db_printf("(%s)\n", devname);
1105		else
1106			db_printf("----\n");
1107		if (db_pager_quit)
1108			return;
1109	}
1110}
1111
1112DB_SHOW_COMMAND(rman, db_show_rman)
1113{
1114
1115	if (have_addr) {
1116		dump_rman_header((struct rman *)addr);
1117		dump_rman((struct rman *)addr);
1118	}
1119}
1120
1121DB_SHOW_COMMAND(rmans, db_show_rmans)
1122{
1123	struct rman *rm;
1124
1125	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1126		dump_rman_header(rm);
1127	}
1128}
1129
1130DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1131{
1132	struct rman *rm;
1133
1134	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1135		dump_rman_header(rm);
1136		dump_rman(rm);
1137	}
1138}
1139DB_SHOW_ALIAS(allrman, db_show_all_rman);
1140#endif
1141