1139804Simp/*- 240711Swollman * Copyright 1998 Massachusetts Institute of Technology 340711Swollman * 440711Swollman * Permission to use, copy, modify, and distribute this software and 540711Swollman * its documentation for any purpose and without fee is hereby 640711Swollman * granted, provided that both the above copyright notice and this 740711Swollman * permission notice appear in all copies, that both the above 840711Swollman * copyright notice and this permission notice appear in all 940711Swollman * supporting documentation, and that the name of M.I.T. not be used 1040711Swollman * in advertising or publicity pertaining to distribution of the 1140711Swollman * software without specific, written prior permission. M.I.T. makes 1240711Swollman * no representations about the suitability of this software for any 1340711Swollman * purpose. It is provided "as is" without express or implied 1440711Swollman * warranty. 15152543Syongari * 1640711Swollman * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 1740711Swollman * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 1840711Swollman * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 1940711Swollman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 2040711Swollman * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2140711Swollman * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2240711Swollman * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 2340711Swollman * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 2440711Swollman * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 2540711Swollman * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 2640711Swollman * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2740711Swollman * SUCH DAMAGE. 2840711Swollman */ 2940711Swollman 3040711Swollman/* 3140711Swollman * The kernel resource manager. This code is responsible for keeping track 3240711Swollman * of hardware resources which are apportioned out to various drivers. 3340711Swollman * It does not actually assign those resources, and it is not expected 3440711Swollman * that end-device drivers will call into this code directly. Rather, 3540711Swollman * the code which implements the buses that those devices are attached to, 3640711Swollman * and the code which manages CPU resources, will call this code, and the 3740711Swollman * end-device drivers will make upcalls to that code to actually perform 3840711Swollman * the allocation. 3940711Swollman * 4040711Swollman * There are two sorts of resources managed by this code. The first is 4140711Swollman * the more familiar array (RMAN_ARRAY) type; resources in this class 4240711Swollman * consist of a sequence of individually-allocatable objects which have 4340711Swollman * been numbered in some well-defined order. Most of the resources 4440711Swollman * are of this type, as it is the most familiar. The second type is 4540711Swollman * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 4640711Swollman * resources in which each instance is indistinguishable from every 4740711Swollman * other instance). The principal anticipated application of gauges 4840711Swollman * is in the context of power consumption, where a bus may have a specific 4940711Swollman * power budget which all attached devices share. RMAN_GAUGE is not 5040711Swollman * implemented yet. 5140711Swollman * 5240711Swollman * For array resources, we make one simplifying assumption: two clients 5340711Swollman * sharing the same resource must use the same range of indices. That 5440711Swollman * is to say, sharing of overlapping-but-not-identical regions is not 5540711Swollman * permitted. 5640711Swollman */ 5740711Swollman 58168791Sjhb#include "opt_ddb.h" 59168791Sjhb 60116182Sobrien#include <sys/cdefs.h> 61116182Sobrien__FBSDID("$FreeBSD$"); 62116182Sobrien 6340711Swollman#include <sys/param.h> 6440711Swollman#include <sys/systm.h> 6541304Sbde#include <sys/kernel.h> 66164881Sjhb#include <sys/limits.h> 6740711Swollman#include <sys/lock.h> 6840711Swollman#include <sys/malloc.h> 6971576Sjasone#include <sys/mutex.h> 7045720Speter#include <sys/bus.h> /* XXX debugging */ 7145720Speter#include <machine/bus.h> 7240711Swollman#include <sys/rman.h> 73102962Siwasaki#include <sys/sysctl.h> 7440711Swollman 75168791Sjhb#ifdef DDB 76168791Sjhb#include <ddb/ddb.h> 77168791Sjhb#endif 78168791Sjhb 79151037Sphk/* 80151037Sphk * We use a linked list rather than a bitmap because we need to be able to 81151037Sphk * represent potentially huge objects (like all of a processor's physical 82151037Sphk * address space). That is also why the indices are defined to have type 83151037Sphk * `unsigned long' -- that being the largest integral type in ISO C (1990). 84151037Sphk * The 1999 version of C allows `long long'; we may need to switch to that 85151037Sphk * at some point in the future, particularly if we want to support 36-bit 86151037Sphk * addresses on IA32 hardware. 87151037Sphk */ 88151037Sphkstruct resource_i { 89151037Sphk struct resource r_r; 90151037Sphk TAILQ_ENTRY(resource_i) r_link; 91151037Sphk LIST_ENTRY(resource_i) r_sharelink; 92151037Sphk LIST_HEAD(, resource_i) *r_sharehead; 93151037Sphk u_long r_start; /* index of the first entry in this resource */ 94151037Sphk u_long r_end; /* index of the last entry (inclusive) */ 95151037Sphk u_int r_flags; 96151037Sphk void *r_virtual; /* virtual address of this resource */ 97151037Sphk struct device *r_dev; /* device which has allocated this resource */ 98151037Sphk struct rman *r_rm; /* resource manager from whence this came */ 99151037Sphk int r_rid; /* optional rid for this resource. */ 100151037Sphk}; 101151037Sphk 102188061Simpstatic int rman_debug = 0; 103102962SiwasakiTUNABLE_INT("debug.rman_debug", &rman_debug); 104102962SiwasakiSYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 105102962Siwasaki &rman_debug, 0, "rman debug"); 10659910Spaul 107102962Siwasaki#define DPRINTF(params) if (rman_debug) printf params 108102962Siwasaki 10945569Seivindstatic MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 11040711Swollman 11140711Swollmanstruct rman_head rman_head; 11271576Sjasonestatic struct mtx rman_mtx; /* mutex to protect rman_head */ 113150523Sphkstatic int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 114150523Sphk struct resource_i **whohas); 115150523Sphkstatic int int_rman_deactivate_resource(struct resource_i *r); 116150523Sphkstatic int int_rman_release_resource(struct rman *rm, struct resource_i *r); 11740711Swollman 118150523Sphkstatic __inline struct resource_i * 119150523Sphkint_alloc_resource(int malloc_flag) 120150523Sphk{ 121150523Sphk struct resource_i *r; 122150523Sphk 123150523Sphk r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 124150523Sphk if (r != NULL) { 125150523Sphk r->r_r.__r_i = r; 126150523Sphk } 127150523Sphk return (r); 128150523Sphk} 129150523Sphk 13040711Swollmanint 13140711Swollmanrman_init(struct rman *rm) 13240711Swollman{ 133152543Syongari static int once = 0; 13440711Swollman 13540711Swollman if (once == 0) { 13640711Swollman once = 1; 13740711Swollman TAILQ_INIT(&rman_head); 13893818Sjhb mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 13940711Swollman } 14040711Swollman 141221218Sjhb if (rm->rm_start == 0 && rm->rm_end == 0) 142221218Sjhb rm->rm_end = ~0ul; 14340711Swollman if (rm->rm_type == RMAN_UNINIT) 14440711Swollman panic("rman_init"); 14540711Swollman if (rm->rm_type == RMAN_GAUGE) 14640711Swollman panic("implement RMAN_GAUGE"); 14740711Swollman 14868727Smckusick TAILQ_INIT(&rm->rm_list); 14984781Sjhb rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 150152543Syongari if (rm->rm_mtx == NULL) 15140711Swollman return ENOMEM; 15293818Sjhb mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 15340711Swollman 15472200Sbmilekic mtx_lock(&rman_mtx); 15540711Swollman TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 15672200Sbmilekic mtx_unlock(&rman_mtx); 15740711Swollman return 0; 15840711Swollman} 15940711Swollman 16040711Swollmanint 16140711Swollmanrman_manage_region(struct rman *rm, u_long start, u_long end) 16240711Swollman{ 163162224Sjhb struct resource_i *r, *s, *t; 164236359Simp int rv = 0; 16540711Swollman 166134040Snjl DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 167134021Snjl rm->rm_descr, start, end)); 168221218Sjhb if (start < rm->rm_start || end > rm->rm_end) 169221218Sjhb return EINVAL; 170150523Sphk r = int_alloc_resource(M_NOWAIT); 171152543Syongari if (r == NULL) 17240711Swollman return ENOMEM; 17340711Swollman r->r_start = start; 17440711Swollman r->r_end = end; 17540711Swollman r->r_rm = rm; 17640711Swollman 17772200Sbmilekic mtx_lock(rm->rm_mtx); 178162224Sjhb 179162224Sjhb /* Skip entries before us. */ 180164881Sjhb TAILQ_FOREACH(s, &rm->rm_list, r_link) { 181164881Sjhb if (s->r_end == ULONG_MAX) 182164881Sjhb break; 183164881Sjhb if (s->r_end + 1 >= r->r_start) 184164881Sjhb break; 185164881Sjhb } 18640711Swollman 187162224Sjhb /* If we ran off the end of the list, insert at the tail. */ 18868727Smckusick if (s == NULL) { 18968727Smckusick TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 19040711Swollman } else { 191162224Sjhb /* Check for any overlap with the current region. */ 192236359Simp if (r->r_start <= s->r_end && r->r_end >= s->r_start) { 193236359Simp rv = EBUSY; 194236359Simp goto out; 195236359Simp } 196162224Sjhb 197162224Sjhb /* Check for any overlap with the next region. */ 198162224Sjhb t = TAILQ_NEXT(s, r_link); 199236359Simp if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) { 200236359Simp rv = EBUSY; 201236359Simp goto out; 202236359Simp } 203162224Sjhb 204162224Sjhb /* 205162224Sjhb * See if this region can be merged with the next region. If 206162224Sjhb * not, clear the pointer. 207162224Sjhb */ 208162224Sjhb if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0)) 209162224Sjhb t = NULL; 210162224Sjhb 211162224Sjhb /* See if we can merge with the current region. */ 212162224Sjhb if (s->r_end + 1 == r->r_start && s->r_flags == 0) { 213162224Sjhb /* Can we merge all 3 regions? */ 214162224Sjhb if (t != NULL) { 215162224Sjhb s->r_end = t->r_end; 216162224Sjhb TAILQ_REMOVE(&rm->rm_list, t, r_link); 217162224Sjhb free(r, M_RMAN); 218162224Sjhb free(t, M_RMAN); 219162224Sjhb } else { 220162224Sjhb s->r_end = r->r_end; 221162224Sjhb free(r, M_RMAN); 222162224Sjhb } 223166932Sscottl } else if (t != NULL) { 224166932Sscottl /* Can we merge with just the next region? */ 225166932Sscottl t->r_start = r->r_start; 226166932Sscottl free(r, M_RMAN); 227166932Sscottl } else if (s->r_end < r->r_start) { 228166932Sscottl TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link); 229162224Sjhb } else { 230166932Sscottl TAILQ_INSERT_BEFORE(s, r, r_link); 231162224Sjhb } 23240711Swollman } 233236359Simpout: 23472200Sbmilekic mtx_unlock(rm->rm_mtx); 235236359Simp return rv; 23640711Swollman} 23740711Swollman 23840711Swollmanint 239159536Simprman_init_from_resource(struct rman *rm, struct resource *r) 240159536Simp{ 241159536Simp int rv; 242159536Simp 243159536Simp if ((rv = rman_init(rm)) != 0) 244159536Simp return (rv); 245159536Simp return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end)); 246159536Simp} 247159536Simp 248159536Simpint 24940711Swollmanrman_fini(struct rman *rm) 25040711Swollman{ 251150523Sphk struct resource_i *r; 25240711Swollman 25372200Sbmilekic mtx_lock(rm->rm_mtx); 25468727Smckusick TAILQ_FOREACH(r, &rm->rm_list, r_link) { 25545720Speter if (r->r_flags & RF_ALLOCATED) { 25672200Sbmilekic mtx_unlock(rm->rm_mtx); 25740711Swollman return EBUSY; 25845720Speter } 25940711Swollman } 26040711Swollman 26140711Swollman /* 26240711Swollman * There really should only be one of these if we are in this 26340711Swollman * state and the code is working properly, but it can't hurt. 26440711Swollman */ 26568727Smckusick while (!TAILQ_EMPTY(&rm->rm_list)) { 26668727Smckusick r = TAILQ_FIRST(&rm->rm_list); 26768727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 26840711Swollman free(r, M_RMAN); 26940711Swollman } 27072200Sbmilekic mtx_unlock(rm->rm_mtx); 27172200Sbmilekic mtx_lock(&rman_mtx); 27240711Swollman TAILQ_REMOVE(&rman_head, rm, rm_link); 27372200Sbmilekic mtx_unlock(&rman_mtx); 27471576Sjasone mtx_destroy(rm->rm_mtx); 27571576Sjasone free(rm->rm_mtx, M_RMAN); 27640711Swollman 27740711Swollman return 0; 27840711Swollman} 27940711Swollman 280221220Sjhbint 281221220Sjhbrman_first_free_region(struct rman *rm, u_long *start, u_long *end) 282221220Sjhb{ 283221220Sjhb struct resource_i *r; 284221220Sjhb 285221220Sjhb mtx_lock(rm->rm_mtx); 286221220Sjhb TAILQ_FOREACH(r, &rm->rm_list, r_link) { 287221220Sjhb if (!(r->r_flags & RF_ALLOCATED)) { 288221220Sjhb *start = r->r_start; 289221220Sjhb *end = r->r_end; 290221220Sjhb mtx_unlock(rm->rm_mtx); 291221220Sjhb return (0); 292221220Sjhb } 293221220Sjhb } 294221220Sjhb mtx_unlock(rm->rm_mtx); 295221220Sjhb return (ENOENT); 296221220Sjhb} 297221220Sjhb 298221220Sjhbint 299221220Sjhbrman_last_free_region(struct rman *rm, u_long *start, u_long *end) 300221220Sjhb{ 301221220Sjhb struct resource_i *r; 302221220Sjhb 303221220Sjhb mtx_lock(rm->rm_mtx); 304221220Sjhb TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) { 305221220Sjhb if (!(r->r_flags & RF_ALLOCATED)) { 306221220Sjhb *start = r->r_start; 307221220Sjhb *end = r->r_end; 308221220Sjhb mtx_unlock(rm->rm_mtx); 309221220Sjhb return (0); 310221220Sjhb } 311221220Sjhb } 312221220Sjhb mtx_unlock(rm->rm_mtx); 313221220Sjhb return (ENOENT); 314221220Sjhb} 315221220Sjhb 316221220Sjhb/* Shrink or extend one or both ends of an allocated resource. */ 317221220Sjhbint 318221220Sjhbrman_adjust_resource(struct resource *rr, u_long start, u_long end) 319221220Sjhb{ 320221220Sjhb struct resource_i *r, *s, *t, *new; 321221220Sjhb struct rman *rm; 322221220Sjhb 323221220Sjhb /* Not supported for shared resources. */ 324221220Sjhb r = rr->__r_i; 325221220Sjhb if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE)) 326221220Sjhb return (EINVAL); 327221220Sjhb 328221220Sjhb /* 329221220Sjhb * This does not support wholesale moving of a resource. At 330221220Sjhb * least part of the desired new range must overlap with the 331221220Sjhb * existing resource. 332221220Sjhb */ 333221220Sjhb if (end < r->r_start || r->r_end < start) 334221220Sjhb return (EINVAL); 335221220Sjhb 336221220Sjhb /* 337221220Sjhb * Find the two resource regions immediately adjacent to the 338221220Sjhb * allocated resource. 339221220Sjhb */ 340221220Sjhb rm = r->r_rm; 341221220Sjhb mtx_lock(rm->rm_mtx); 342221220Sjhb#ifdef INVARIANTS 343221220Sjhb TAILQ_FOREACH(s, &rm->rm_list, r_link) { 344221220Sjhb if (s == r) 345221220Sjhb break; 346221220Sjhb } 347221220Sjhb if (s == NULL) 348221220Sjhb panic("resource not in list"); 349221220Sjhb#endif 350221220Sjhb s = TAILQ_PREV(r, resource_head, r_link); 351221220Sjhb t = TAILQ_NEXT(r, r_link); 352221220Sjhb KASSERT(s == NULL || s->r_end + 1 == r->r_start, 353221220Sjhb ("prev resource mismatch")); 354221220Sjhb KASSERT(t == NULL || r->r_end + 1 == t->r_start, 355221220Sjhb ("next resource mismatch")); 356221220Sjhb 357221220Sjhb /* 358221220Sjhb * See if the changes are permitted. Shrinking is always allowed, 359221220Sjhb * but growing requires sufficient room in the adjacent region. 360221220Sjhb */ 361221220Sjhb if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) || 362221220Sjhb s->r_start > start)) { 363221220Sjhb mtx_unlock(rm->rm_mtx); 364221220Sjhb return (EBUSY); 365221220Sjhb } 366221220Sjhb if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) || 367221220Sjhb t->r_end < end)) { 368221220Sjhb mtx_unlock(rm->rm_mtx); 369221220Sjhb return (EBUSY); 370221220Sjhb } 371221220Sjhb 372221220Sjhb /* 373221220Sjhb * While holding the lock, grow either end of the resource as 374221220Sjhb * needed and shrink either end if the shrinking does not require 375221220Sjhb * allocating a new resource. We can safely drop the lock and then 376221220Sjhb * insert a new range to handle the shrinking case afterwards. 377221220Sjhb */ 378221220Sjhb if (start < r->r_start || 379221220Sjhb (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) { 380221220Sjhb KASSERT(s->r_flags == 0, ("prev is busy")); 381221220Sjhb r->r_start = start; 382221220Sjhb if (s->r_start == start) { 383221220Sjhb TAILQ_REMOVE(&rm->rm_list, s, r_link); 384221220Sjhb free(s, M_RMAN); 385221220Sjhb } else 386221220Sjhb s->r_end = start - 1; 387221220Sjhb } 388221220Sjhb if (end > r->r_end || 389221220Sjhb (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) { 390221220Sjhb KASSERT(t->r_flags == 0, ("next is busy")); 391221220Sjhb r->r_end = end; 392221220Sjhb if (t->r_end == end) { 393221220Sjhb TAILQ_REMOVE(&rm->rm_list, t, r_link); 394221220Sjhb free(t, M_RMAN); 395221220Sjhb } else 396221220Sjhb t->r_start = end + 1; 397221220Sjhb } 398221220Sjhb mtx_unlock(rm->rm_mtx); 399221220Sjhb 400221220Sjhb /* 401221220Sjhb * Handle the shrinking cases that require allocating a new 402221220Sjhb * resource to hold the newly-free region. We have to recheck 403221220Sjhb * if we still need this new region after acquiring the lock. 404221220Sjhb */ 405221220Sjhb if (start > r->r_start) { 406221220Sjhb new = int_alloc_resource(M_WAITOK); 407221220Sjhb new->r_start = r->r_start; 408221220Sjhb new->r_end = start - 1; 409221220Sjhb new->r_rm = rm; 410221220Sjhb mtx_lock(rm->rm_mtx); 411221220Sjhb r->r_start = start; 412221220Sjhb s = TAILQ_PREV(r, resource_head, r_link); 413221220Sjhb if (s != NULL && !(s->r_flags & RF_ALLOCATED)) { 414221220Sjhb s->r_end = start - 1; 415221220Sjhb free(new, M_RMAN); 416221220Sjhb } else 417221220Sjhb TAILQ_INSERT_BEFORE(r, new, r_link); 418221220Sjhb mtx_unlock(rm->rm_mtx); 419221220Sjhb } 420221220Sjhb if (end < r->r_end) { 421221220Sjhb new = int_alloc_resource(M_WAITOK); 422221220Sjhb new->r_start = end + 1; 423221220Sjhb new->r_end = r->r_end; 424221220Sjhb new->r_rm = rm; 425221220Sjhb mtx_lock(rm->rm_mtx); 426221220Sjhb r->r_end = end; 427221220Sjhb t = TAILQ_NEXT(r, r_link); 428221220Sjhb if (t != NULL && !(t->r_flags & RF_ALLOCATED)) { 429221220Sjhb t->r_start = end + 1; 430221220Sjhb free(new, M_RMAN); 431221220Sjhb } else 432221220Sjhb TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link); 433221220Sjhb mtx_unlock(rm->rm_mtx); 434221220Sjhb } 435221220Sjhb return (0); 436221220Sjhb} 437221220Sjhb 43840711Swollmanstruct resource * 43988372Stmmrman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 44088372Stmm u_long count, u_long bound, u_int flags, 44188372Stmm struct device *dev) 44240711Swollman{ 44340711Swollman u_int want_activate; 444150523Sphk struct resource_i *r, *s, *rv; 44588372Stmm u_long rstart, rend, amask, bmask; 44640711Swollman 447152543Syongari rv = NULL; 44840711Swollman 449160958Sjb DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], " 450160958Sjb "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end, 451160958Sjb count, flags, 452160958Sjb dev == NULL ? "<null>" : device_get_nameunit(dev))); 45340711Swollman want_activate = (flags & RF_ACTIVE); 45440711Swollman flags &= ~RF_ACTIVE; 45540711Swollman 45672200Sbmilekic mtx_lock(rm->rm_mtx); 45740711Swollman 458152543Syongari for (r = TAILQ_FIRST(&rm->rm_list); 45968727Smckusick r && r->r_end < start; 46068727Smckusick r = TAILQ_NEXT(r, r_link)) 46140711Swollman ; 46240711Swollman 46368727Smckusick if (r == NULL) { 46459910Spaul DPRINTF(("could not find a region\n")); 46540711Swollman goto out; 46640711Swollman } 46740711Swollman 46888372Stmm amask = (1ul << RF_ALIGNMENT(flags)) - 1; 46988372Stmm /* If bound is 0, bmask will also be 0 */ 47088372Stmm bmask = ~(bound - 1); 47140711Swollman /* 47240711Swollman * First try to find an acceptable totally-unshared region. 47340711Swollman */ 47468727Smckusick for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 47559910Spaul DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 476143665Simp if (s->r_start + count - 1 > end) { 477143665Simp DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 478143665Simp s->r_start, end)); 47940711Swollman break; 48040711Swollman } 48140711Swollman if (s->r_flags & RF_ALLOCATED) { 48259910Spaul DPRINTF(("region is allocated\n")); 48340711Swollman continue; 48440711Swollman } 48588372Stmm rstart = ulmax(s->r_start, start); 48688372Stmm /* 48788372Stmm * Try to find a region by adjusting to boundary and alignment 48888372Stmm * until both conditions are satisfied. This is not an optimal 48988372Stmm * algorithm, but in most cases it isn't really bad, either. 49088372Stmm */ 49188372Stmm do { 49288372Stmm rstart = (rstart + amask) & ~amask; 493109646Stmm if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 49488372Stmm rstart += bound - (rstart & ~bmask); 49588372Stmm } while ((rstart & amask) != 0 && rstart < end && 49688372Stmm rstart < s->r_end); 497128172Simp rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 498102572Siwasaki if (rstart > rend) { 499102572Siwasaki DPRINTF(("adjusted start exceeds end\n")); 500102572Siwasaki continue; 501102572Siwasaki } 50259910Spaul DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 50359910Spaul rstart, rend, (rend - rstart + 1), count)); 50440711Swollman 50540711Swollman if ((rend - rstart + 1) >= count) { 50659910Spaul DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 507143664Simp rstart, rend, (rend - rstart + 1))); 50840711Swollman if ((s->r_end - s->r_start + 1) == count) { 50959910Spaul DPRINTF(("candidate region is entire chunk\n")); 51040711Swollman rv = s; 51148235Sdfr rv->r_flags |= RF_ALLOCATED | flags; 51240711Swollman rv->r_dev = dev; 51340711Swollman goto out; 51440711Swollman } 51540711Swollman 51640711Swollman /* 51740711Swollman * If s->r_start < rstart and 51840711Swollman * s->r_end > rstart + count - 1, then 51940711Swollman * we need to split the region into three pieces 52040711Swollman * (the middle one will get returned to the user). 52140711Swollman * Otherwise, we are allocating at either the 52240711Swollman * beginning or the end of s, so we only need to 52340711Swollman * split it in two. The first case requires 52440711Swollman * two new allocations; the second requires but one. 52540711Swollman */ 526150523Sphk rv = int_alloc_resource(M_NOWAIT); 527152543Syongari if (rv == NULL) 52840711Swollman goto out; 52940711Swollman rv->r_start = rstart; 53040711Swollman rv->r_end = rstart + count - 1; 53140711Swollman rv->r_flags = flags | RF_ALLOCATED; 53240711Swollman rv->r_dev = dev; 53345720Speter rv->r_rm = rm; 534152543Syongari 53540711Swollman if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 53659910Spaul DPRINTF(("splitting region in three parts: " 53740711Swollman "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 53840711Swollman s->r_start, rv->r_start - 1, 53940711Swollman rv->r_start, rv->r_end, 54059910Spaul rv->r_end + 1, s->r_end)); 54140711Swollman /* 54240711Swollman * We are allocating in the middle. 54340711Swollman */ 544150523Sphk r = int_alloc_resource(M_NOWAIT); 545152543Syongari if (r == NULL) { 54640711Swollman free(rv, M_RMAN); 547152543Syongari rv = NULL; 54840711Swollman goto out; 54940711Swollman } 55040711Swollman r->r_start = rv->r_end + 1; 55140711Swollman r->r_end = s->r_end; 55240711Swollman r->r_flags = s->r_flags; 55345720Speter r->r_rm = rm; 55440711Swollman s->r_end = rv->r_start - 1; 55568727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 55640711Swollman r_link); 55768727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 55840711Swollman r_link); 55940711Swollman } else if (s->r_start == rv->r_start) { 56059910Spaul DPRINTF(("allocating from the beginning\n")); 56140711Swollman /* 56240711Swollman * We are allocating at the beginning. 56340711Swollman */ 56440711Swollman s->r_start = rv->r_end + 1; 56568727Smckusick TAILQ_INSERT_BEFORE(s, rv, r_link); 56640711Swollman } else { 56759910Spaul DPRINTF(("allocating at the end\n")); 56840711Swollman /* 56940711Swollman * We are allocating at the end. 57040711Swollman */ 57140711Swollman s->r_end = rv->r_start - 1; 57268727Smckusick TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 57340711Swollman r_link); 57440711Swollman } 57540711Swollman goto out; 57640711Swollman } 57740711Swollman } 57840711Swollman 57940711Swollman /* 58040711Swollman * Now find an acceptable shared region, if the client's requirements 58140711Swollman * allow sharing. By our implementation restriction, a candidate 58240711Swollman * region must match exactly by both size and sharing type in order 58340711Swollman * to be considered compatible with the client's request. (The 58440711Swollman * former restriction could probably be lifted without too much 58540711Swollman * additional work, but this does not seem warranted.) 58640711Swollman */ 58759910Spaul DPRINTF(("no unshared regions found\n")); 58840711Swollman if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 58940711Swollman goto out; 59040711Swollman 59168727Smckusick for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 59240711Swollman if (s->r_start > end) 59340711Swollman break; 59440711Swollman if ((s->r_flags & flags) != flags) 59540711Swollman continue; 59688372Stmm rstart = ulmax(s->r_start, start); 597128172Simp rend = ulmin(s->r_end, ulmax(start + count - 1, end)); 59840711Swollman if (s->r_start >= start && s->r_end <= end 59988372Stmm && (s->r_end - s->r_start + 1) == count && 60088372Stmm (s->r_start & amask) == 0 && 60188372Stmm ((s->r_start ^ s->r_end) & bmask) == 0) { 602150523Sphk rv = int_alloc_resource(M_NOWAIT); 603152543Syongari if (rv == NULL) 60440711Swollman goto out; 60540711Swollman rv->r_start = s->r_start; 60640711Swollman rv->r_end = s->r_end; 607152543Syongari rv->r_flags = s->r_flags & 60840711Swollman (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 60940711Swollman rv->r_dev = dev; 61040711Swollman rv->r_rm = rm; 611152543Syongari if (s->r_sharehead == NULL) { 61240711Swollman s->r_sharehead = malloc(sizeof *s->r_sharehead, 61369781Sdwmalone M_RMAN, M_NOWAIT | M_ZERO); 614152543Syongari if (s->r_sharehead == NULL) { 61540711Swollman free(rv, M_RMAN); 616152543Syongari rv = NULL; 61740711Swollman goto out; 61840711Swollman } 61940711Swollman LIST_INIT(s->r_sharehead); 620152543Syongari LIST_INSERT_HEAD(s->r_sharehead, s, 62140711Swollman r_sharelink); 62245106Sdfr s->r_flags |= RF_FIRSTSHARE; 62340711Swollman } 62440711Swollman rv->r_sharehead = s->r_sharehead; 62540711Swollman LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 62640711Swollman goto out; 62740711Swollman } 62840711Swollman } 62940711Swollman 63040711Swollman /* 63140711Swollman * We couldn't find anything. 63240711Swollman */ 63340711Swollmanout: 63440711Swollman /* 63540711Swollman * If the user specified RF_ACTIVE in the initial flags, 63640711Swollman * which is reflected in `want_activate', we attempt to atomically 63740711Swollman * activate the resource. If this fails, we release the resource 63840711Swollman * and indicate overall failure. (This behavior probably doesn't 63940711Swollman * make sense for RF_TIMESHARE-type resources.) 64040711Swollman */ 64140711Swollman if (rv && want_activate) { 642150523Sphk struct resource_i *whohas; 64340711Swollman if (int_rman_activate_resource(rm, rv, &whohas)) { 64440711Swollman int_rman_release_resource(rm, rv); 645152543Syongari rv = NULL; 64640711Swollman } 64740711Swollman } 648152543Syongari 64972200Sbmilekic mtx_unlock(rm->rm_mtx); 650152543Syongari return (rv == NULL ? NULL : &rv->r_r); 65140711Swollman} 65240711Swollman 65388372Stmmstruct resource * 65488372Stmmrman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 65588372Stmm u_int flags, struct device *dev) 65688372Stmm{ 65788372Stmm 65888372Stmm return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 65988372Stmm dev)); 66088372Stmm} 66188372Stmm 66240711Swollmanstatic int 663150523Sphkint_rman_activate_resource(struct rman *rm, struct resource_i *r, 664150523Sphk struct resource_i **whohas) 66540711Swollman{ 666150523Sphk struct resource_i *s; 66740711Swollman int ok; 66840711Swollman 66940711Swollman /* 67040711Swollman * If we are not timesharing, then there is nothing much to do. 67140711Swollman * If we already have the resource, then there is nothing at all to do. 67240711Swollman * If we are not on a sharing list with anybody else, then there is 67340711Swollman * little to do. 67440711Swollman */ 67540711Swollman if ((r->r_flags & RF_TIMESHARE) == 0 67640711Swollman || (r->r_flags & RF_ACTIVE) != 0 677152543Syongari || r->r_sharehead == NULL) { 67840711Swollman r->r_flags |= RF_ACTIVE; 67940711Swollman return 0; 68040711Swollman } 68140711Swollman 68240711Swollman ok = 1; 68353225Sphk for (s = LIST_FIRST(r->r_sharehead); s && ok; 68453225Sphk s = LIST_NEXT(s, r_sharelink)) { 68540711Swollman if ((s->r_flags & RF_ACTIVE) != 0) { 68640711Swollman ok = 0; 68740711Swollman *whohas = s; 68840711Swollman } 68940711Swollman } 69040711Swollman if (ok) { 69140711Swollman r->r_flags |= RF_ACTIVE; 69240711Swollman return 0; 69340711Swollman } 69440711Swollman return EBUSY; 69540711Swollman} 69640711Swollman 69740711Swollmanint 698150523Sphkrman_activate_resource(struct resource *re) 69940711Swollman{ 70040711Swollman int rv; 701150523Sphk struct resource_i *r, *whohas; 70240711Swollman struct rman *rm; 70340711Swollman 704150523Sphk r = re->__r_i; 70540711Swollman rm = r->r_rm; 70672200Sbmilekic mtx_lock(rm->rm_mtx); 70740711Swollman rv = int_rman_activate_resource(rm, r, &whohas); 70872200Sbmilekic mtx_unlock(rm->rm_mtx); 70940711Swollman return rv; 71040711Swollman} 71140711Swollman 71240711Swollmanint 713150523Sphkrman_await_resource(struct resource *re, int pri, int timo) 71440711Swollman{ 71585519Sjhb int rv; 716150523Sphk struct resource_i *r, *whohas; 71740711Swollman struct rman *rm; 71840711Swollman 719150523Sphk r = re->__r_i; 72040711Swollman rm = r->r_rm; 72185519Sjhb mtx_lock(rm->rm_mtx); 72240711Swollman for (;;) { 72340711Swollman rv = int_rman_activate_resource(rm, r, &whohas); 72440711Swollman if (rv != EBUSY) 72571576Sjasone return (rv); /* returns with mutex held */ 72640711Swollman 727152543Syongari if (r->r_sharehead == NULL) 72840711Swollman panic("rman_await_resource"); 72940711Swollman whohas->r_flags |= RF_WANTED; 73085519Sjhb rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 73140711Swollman if (rv) { 73285519Sjhb mtx_unlock(rm->rm_mtx); 73385519Sjhb return (rv); 73440711Swollman } 73540711Swollman } 73640711Swollman} 73740711Swollman 73845720Speterstatic int 739150523Sphkint_rman_deactivate_resource(struct resource_i *r) 74040711Swollman{ 74140711Swollman 74240711Swollman r->r_flags &= ~RF_ACTIVE; 74340711Swollman if (r->r_flags & RF_WANTED) { 74440711Swollman r->r_flags &= ~RF_WANTED; 74540711Swollman wakeup(r->r_sharehead); 74640711Swollman } 74745720Speter return 0; 74845720Speter} 74945720Speter 75045720Speterint 75145720Speterrman_deactivate_resource(struct resource *r) 75245720Speter{ 75345720Speter struct rman *rm; 75445720Speter 755150523Sphk rm = r->__r_i->r_rm; 75672200Sbmilekic mtx_lock(rm->rm_mtx); 757150523Sphk int_rman_deactivate_resource(r->__r_i); 75872200Sbmilekic mtx_unlock(rm->rm_mtx); 75940711Swollman return 0; 76040711Swollman} 76140711Swollman 76240711Swollmanstatic int 763150523Sphkint_rman_release_resource(struct rman *rm, struct resource_i *r) 76440711Swollman{ 765150523Sphk struct resource_i *s, *t; 76640711Swollman 76740711Swollman if (r->r_flags & RF_ACTIVE) 76845720Speter int_rman_deactivate_resource(r); 76940711Swollman 77040711Swollman /* 77140711Swollman * Check for a sharing list first. If there is one, then we don't 77240711Swollman * have to think as hard. 77340711Swollman */ 77440711Swollman if (r->r_sharehead) { 77540711Swollman /* 77640711Swollman * If a sharing list exists, then we know there are at 77740711Swollman * least two sharers. 77840711Swollman * 77940711Swollman * If we are in the main circleq, appoint someone else. 78040711Swollman */ 78140711Swollman LIST_REMOVE(r, r_sharelink); 78253225Sphk s = LIST_FIRST(r->r_sharehead); 78340711Swollman if (r->r_flags & RF_FIRSTSHARE) { 78440711Swollman s->r_flags |= RF_FIRSTSHARE; 78568727Smckusick TAILQ_INSERT_BEFORE(r, s, r_link); 78668727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 78740711Swollman } 78840711Swollman 78940711Swollman /* 79040711Swollman * Make sure that the sharing list goes away completely 79140711Swollman * if the resource is no longer being shared at all. 79240711Swollman */ 793152543Syongari if (LIST_NEXT(s, r_sharelink) == NULL) { 79440711Swollman free(s->r_sharehead, M_RMAN); 795152543Syongari s->r_sharehead = NULL; 79640711Swollman s->r_flags &= ~RF_FIRSTSHARE; 79740711Swollman } 79840711Swollman goto out; 79940711Swollman } 80040711Swollman 80140711Swollman /* 80240711Swollman * Look at the adjacent resources in the list and see if our 803133177Sjhb * segment can be merged with any of them. If either of the 804133177Sjhb * resources is allocated or is not exactly adjacent then they 805133177Sjhb * cannot be merged with our segment. 80640711Swollman */ 80768727Smckusick s = TAILQ_PREV(r, resource_head, r_link); 808133177Sjhb if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 809133177Sjhb s->r_end + 1 != r->r_start)) 810133177Sjhb s = NULL; 81168727Smckusick t = TAILQ_NEXT(r, r_link); 812133177Sjhb if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 813133177Sjhb r->r_end + 1 != t->r_start)) 814133177Sjhb t = NULL; 81540711Swollman 816133177Sjhb if (s != NULL && t != NULL) { 81740711Swollman /* 81840711Swollman * Merge all three segments. 81940711Swollman */ 82040711Swollman s->r_end = t->r_end; 82168727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 82268727Smckusick TAILQ_REMOVE(&rm->rm_list, t, r_link); 82340711Swollman free(t, M_RMAN); 824133177Sjhb } else if (s != NULL) { 82540711Swollman /* 82640711Swollman * Merge previous segment with ours. 82740711Swollman */ 82840711Swollman s->r_end = r->r_end; 82968727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 830133177Sjhb } else if (t != NULL) { 83140711Swollman /* 83240711Swollman * Merge next segment with ours. 83340711Swollman */ 83440711Swollman t->r_start = r->r_start; 83568727Smckusick TAILQ_REMOVE(&rm->rm_list, r, r_link); 83640711Swollman } else { 83740711Swollman /* 83840711Swollman * At this point, we know there is nothing we 83940711Swollman * can potentially merge with, because on each 84040711Swollman * side, there is either nothing there or what is 84140711Swollman * there is still allocated. In that case, we don't 84240711Swollman * want to remove r from the list; we simply want to 84340711Swollman * change it to an unallocated region and return 84440711Swollman * without freeing anything. 84540711Swollman */ 84640711Swollman r->r_flags &= ~RF_ALLOCATED; 847222750Sjhb r->r_dev = NULL; 84840711Swollman return 0; 84940711Swollman } 85040711Swollman 85140711Swollmanout: 85240711Swollman free(r, M_RMAN); 85340711Swollman return 0; 85440711Swollman} 85540711Swollman 85640711Swollmanint 857150523Sphkrman_release_resource(struct resource *re) 85840711Swollman{ 85940711Swollman int rv; 860150523Sphk struct resource_i *r; 861150523Sphk struct rman *rm; 86240711Swollman 863150523Sphk r = re->__r_i; 864150523Sphk rm = r->r_rm; 86572200Sbmilekic mtx_lock(rm->rm_mtx); 86640711Swollman rv = int_rman_release_resource(rm, r); 86772200Sbmilekic mtx_unlock(rm->rm_mtx); 86840711Swollman return (rv); 86940711Swollman} 87067261Simp 87167261Simpuint32_t 87267261Simprman_make_alignment_flags(uint32_t size) 87367261Simp{ 87467261Simp int i; 87567261Simp 87667425Simp /* 87767425Simp * Find the hightest bit set, and add one if more than one bit 87867425Simp * set. We're effectively computing the ceil(log2(size)) here. 87967425Simp */ 88088372Stmm for (i = 31; i > 0; i--) 88167425Simp if ((1 << i) & size) 88267425Simp break; 88367425Simp if (~(1 << i) & size) 88467425Simp i++; 88567261Simp 88667261Simp return(RF_ALIGNMENT_LOG2(i)); 88767425Simp} 888107296Simp 889182162Sjhbvoid 890182162Sjhbrman_set_start(struct resource *r, u_long start) 891182162Sjhb{ 892182162Sjhb r->__r_i->r_start = start; 893182162Sjhb} 894182162Sjhb 895107296Simpu_long 896107296Simprman_get_start(struct resource *r) 897107296Simp{ 898150523Sphk return (r->__r_i->r_start); 899107296Simp} 900107296Simp 901182162Sjhbvoid 902182162Sjhbrman_set_end(struct resource *r, u_long end) 903182162Sjhb{ 904182162Sjhb r->__r_i->r_end = end; 905182162Sjhb} 906182162Sjhb 907107296Simpu_long 908107296Simprman_get_end(struct resource *r) 909107296Simp{ 910150523Sphk return (r->__r_i->r_end); 911107296Simp} 912107296Simp 913107296Simpu_long 914107296Simprman_get_size(struct resource *r) 915107296Simp{ 916150523Sphk return (r->__r_i->r_end - r->__r_i->r_start + 1); 917107296Simp} 918107296Simp 919107296Simpu_int 920107296Simprman_get_flags(struct resource *r) 921107296Simp{ 922150523Sphk return (r->__r_i->r_flags); 923107296Simp} 924107296Simp 925107296Simpvoid 926107296Simprman_set_virtual(struct resource *r, void *v) 927107296Simp{ 928150523Sphk r->__r_i->r_virtual = v; 929107296Simp} 930107296Simp 931107296Simpvoid * 932107296Simprman_get_virtual(struct resource *r) 933107296Simp{ 934150523Sphk return (r->__r_i->r_virtual); 935107296Simp} 936107296Simp 937107296Simpvoid 938107296Simprman_set_bustag(struct resource *r, bus_space_tag_t t) 939107296Simp{ 940107296Simp r->r_bustag = t; 941107296Simp} 942107296Simp 943107296Simpbus_space_tag_t 944107296Simprman_get_bustag(struct resource *r) 945107296Simp{ 946107296Simp return (r->r_bustag); 947107296Simp} 948107296Simp 949107296Simpvoid 950107296Simprman_set_bushandle(struct resource *r, bus_space_handle_t h) 951107296Simp{ 952107296Simp r->r_bushandle = h; 953107296Simp} 954107296Simp 955107296Simpbus_space_handle_t 956107296Simprman_get_bushandle(struct resource *r) 957107296Simp{ 958107296Simp return (r->r_bushandle); 959107296Simp} 960107296Simp 961107296Simpvoid 962107296Simprman_set_rid(struct resource *r, int rid) 963107296Simp{ 964150523Sphk r->__r_i->r_rid = rid; 965107296Simp} 966107296Simp 967182162Sjhbint 968182162Sjhbrman_get_rid(struct resource *r) 969131414Simp{ 970182162Sjhb return (r->__r_i->r_rid); 971131414Simp} 972131414Simp 973131414Simpvoid 974182162Sjhbrman_set_device(struct resource *r, struct device *dev) 975131414Simp{ 976182162Sjhb r->__r_i->r_dev = dev; 977131414Simp} 978131414Simp 979110753Simpstruct device * 980110753Simprman_get_device(struct resource *r) 981110753Simp{ 982150523Sphk return (r->__r_i->r_dev); 983110753Simp} 984144071Sphk 985150547Sphkint 986150547Sphkrman_is_region_manager(struct resource *r, struct rman *rm) 987150547Sphk{ 988150547Sphk 989150547Sphk return (r->__r_i->r_rm == rm); 990150547Sphk} 991150547Sphk 992144071Sphk/* 993144071Sphk * Sysctl interface for scanning the resource lists. 994144071Sphk * 995144071Sphk * We take two input parameters; the index into the list of resource 996144071Sphk * managers, and the resource offset into the list. 997144071Sphk */ 998144071Sphkstatic int 999144071Sphksysctl_rman(SYSCTL_HANDLER_ARGS) 1000144071Sphk{ 1001144071Sphk int *name = (int *)arg1; 1002144071Sphk u_int namelen = arg2; 1003144071Sphk int rman_idx, res_idx; 1004144071Sphk struct rman *rm; 1005150523Sphk struct resource_i *res; 1006192379Savg struct resource_i *sres; 1007144071Sphk struct u_rman urm; 1008144071Sphk struct u_resource ures; 1009144071Sphk int error; 1010144071Sphk 1011144071Sphk if (namelen != 3) 1012144071Sphk return (EINVAL); 1013144071Sphk 1014144071Sphk if (bus_data_generation_check(name[0])) 1015144071Sphk return (EINVAL); 1016144071Sphk rman_idx = name[1]; 1017144071Sphk res_idx = name[2]; 1018144071Sphk 1019144071Sphk /* 1020144071Sphk * Find the indexed resource manager 1021144071Sphk */ 1022152543Syongari mtx_lock(&rman_mtx); 1023144071Sphk TAILQ_FOREACH(rm, &rman_head, rm_link) { 1024144071Sphk if (rman_idx-- == 0) 1025144071Sphk break; 1026144071Sphk } 1027152543Syongari mtx_unlock(&rman_mtx); 1028144071Sphk if (rm == NULL) 1029144071Sphk return (ENOENT); 1030144071Sphk 1031144071Sphk /* 1032144071Sphk * If the resource index is -1, we want details on the 1033144071Sphk * resource manager. 1034144071Sphk */ 1035144071Sphk if (res_idx == -1) { 1036145953Scperciva bzero(&urm, sizeof(urm)); 1037144071Sphk urm.rm_handle = (uintptr_t)rm; 1038184173Smarcel if (rm->rm_descr != NULL) 1039184173Smarcel strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 1040144071Sphk urm.rm_start = rm->rm_start; 1041144071Sphk urm.rm_size = rm->rm_end - rm->rm_start + 1; 1042144071Sphk urm.rm_type = rm->rm_type; 1043144071Sphk 1044144071Sphk error = SYSCTL_OUT(req, &urm, sizeof(urm)); 1045144071Sphk return (error); 1046144071Sphk } 1047144071Sphk 1048144071Sphk /* 1049144071Sphk * Find the indexed resource and return it. 1050144071Sphk */ 1051152543Syongari mtx_lock(rm->rm_mtx); 1052144071Sphk TAILQ_FOREACH(res, &rm->rm_list, r_link) { 1053192379Savg if (res->r_sharehead != NULL) { 1054192379Savg LIST_FOREACH(sres, res->r_sharehead, r_sharelink) 1055192379Savg if (res_idx-- == 0) { 1056192379Savg res = sres; 1057192379Savg goto found; 1058144071Sphk } 1059192379Savg } 1060192379Savg else if (res_idx-- == 0) 1061192379Savg goto found; 1062192379Savg } 1063192379Savg mtx_unlock(rm->rm_mtx); 1064192379Savg return (ENOENT); 1065144071Sphk 1066192379Savgfound: 1067192379Savg bzero(&ures, sizeof(ures)); 1068192379Savg ures.r_handle = (uintptr_t)res; 1069192379Savg ures.r_parent = (uintptr_t)res->r_rm; 1070192379Savg ures.r_device = (uintptr_t)res->r_dev; 1071192379Savg if (res->r_dev != NULL) { 1072192379Savg if (device_get_name(res->r_dev) != NULL) { 1073192379Savg snprintf(ures.r_devname, RM_TEXTLEN, 1074192379Savg "%s%d", 1075192379Savg device_get_name(res->r_dev), 1076192379Savg device_get_unit(res->r_dev)); 1077192379Savg } else { 1078192379Savg strlcpy(ures.r_devname, "nomatch", 1079192379Savg RM_TEXTLEN); 1080144071Sphk } 1081192379Savg } else { 1082192379Savg ures.r_devname[0] = '\0'; 1083144071Sphk } 1084192379Savg ures.r_start = res->r_start; 1085192379Savg ures.r_size = res->r_end - res->r_start + 1; 1086192379Savg ures.r_flags = res->r_flags; 1087192379Savg 1088152543Syongari mtx_unlock(rm->rm_mtx); 1089192379Savg error = SYSCTL_OUT(req, &ures, sizeof(ures)); 1090192379Savg return (error); 1091144071Sphk} 1092144071Sphk 1093227309Sedstatic SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 1094144071Sphk "kernel resource manager"); 1095168791Sjhb 1096168791Sjhb#ifdef DDB 1097168791Sjhbstatic void 1098220606Sgavindump_rman_header(struct rman *rm) 1099220606Sgavin{ 1100220606Sgavin 1101220606Sgavin if (db_pager_quit) 1102220606Sgavin return; 1103220606Sgavin db_printf("rman %p: %s (0x%lx-0x%lx full range)\n", 1104220606Sgavin rm, rm->rm_descr, rm->rm_start, rm->rm_end); 1105220606Sgavin} 1106220606Sgavin 1107220606Sgavinstatic void 1108168791Sjhbdump_rman(struct rman *rm) 1109168791Sjhb{ 1110168791Sjhb struct resource_i *r; 1111168791Sjhb const char *devname; 1112168791Sjhb 1113168791Sjhb if (db_pager_quit) 1114168791Sjhb return; 1115168791Sjhb TAILQ_FOREACH(r, &rm->rm_list, r_link) { 1116168791Sjhb if (r->r_dev != NULL) { 1117168791Sjhb devname = device_get_nameunit(r->r_dev); 1118168791Sjhb if (devname == NULL) 1119168791Sjhb devname = "nomatch"; 1120168791Sjhb } else 1121168791Sjhb devname = NULL; 1122168791Sjhb db_printf(" 0x%lx-0x%lx ", r->r_start, r->r_end); 1123168791Sjhb if (devname != NULL) 1124168791Sjhb db_printf("(%s)\n", devname); 1125168791Sjhb else 1126168791Sjhb db_printf("----\n"); 1127168791Sjhb if (db_pager_quit) 1128168791Sjhb return; 1129168791Sjhb } 1130168791Sjhb} 1131168791Sjhb 1132168791SjhbDB_SHOW_COMMAND(rman, db_show_rman) 1133168791Sjhb{ 1134168791Sjhb 1135220606Sgavin if (have_addr) { 1136220606Sgavin dump_rman_header((struct rman *)addr); 1137168791Sjhb dump_rman((struct rman *)addr); 1138220606Sgavin } 1139168791Sjhb} 1140168791Sjhb 1141220606SgavinDB_SHOW_COMMAND(rmans, db_show_rmans) 1142220606Sgavin{ 1143220606Sgavin struct rman *rm; 1144220606Sgavin 1145220606Sgavin TAILQ_FOREACH(rm, &rman_head, rm_link) { 1146220606Sgavin dump_rman_header(rm); 1147220606Sgavin } 1148220606Sgavin} 1149220606Sgavin 1150183054SsamDB_SHOW_ALL_COMMAND(rman, db_show_all_rman) 1151168791Sjhb{ 1152168791Sjhb struct rman *rm; 1153168791Sjhb 1154220606Sgavin TAILQ_FOREACH(rm, &rman_head, rm_link) { 1155220606Sgavin dump_rman_header(rm); 1156168791Sjhb dump_rman(rm); 1157220606Sgavin } 1158168791Sjhb} 1159183054SsamDB_SHOW_ALIAS(allrman, db_show_all_rman); 1160168791Sjhb#endif 1161