1139825Simp/*- 2187681Sjeff * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org> 3148078Srwatson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4148078Srwatson * All rights reserved. 592654Sjeff * 692654Sjeff * Redistribution and use in source and binary forms, with or without 792654Sjeff * modification, are permitted provided that the following conditions 892654Sjeff * are met: 992654Sjeff * 1. Redistributions of source code must retain the above copyright 1092654Sjeff * notice unmodified, this list of conditions, and the following 1192654Sjeff * disclaimer. 1292654Sjeff * 2. Redistributions in binary form must reproduce the above copyright 1392654Sjeff * notice, this list of conditions and the following disclaimer in the 1492654Sjeff * documentation and/or other materials provided with the distribution. 1592654Sjeff * 1692654Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1792654Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1892654Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1992654Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 2092654Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2192654Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2292654Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2392654Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2492654Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2592654Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2692654Sjeff * 2792654Sjeff * $FreeBSD$ 2892654Sjeff * 2992654Sjeff */ 3092654Sjeff 3192654Sjeff/* 3292654Sjeff * This file includes definitions, structures, prototypes, and inlines that 3392654Sjeff * should not be used outside of the actual implementation of UMA. 3492654Sjeff */ 3592654Sjeff 3692654Sjeff/* 3792654Sjeff * Here's a quick description of the relationship between the objects: 3892654Sjeff * 39129906Sbmilekic * Kegs contain lists of slabs which are stored in either the full bin, empty 4092654Sjeff * bin, or partially allocated bin, to reduce fragmentation. They also contain 4192654Sjeff * the user supplied value for size, which is adjusted for alignment purposes 42129906Sbmilekic * and rsize is the result of that. The Keg also stores information for 4392654Sjeff * managing a hash of page addresses that maps pages to uma_slab_t structures 4492654Sjeff * for pages that don't have embedded uma_slab_t's. 4592654Sjeff * 4692654Sjeff * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 4792654Sjeff * be allocated off the page from a special slab zone. The free list within a 48222137Salc * slab is managed with a linked list of indices, which are 8 bit values. If 4992654Sjeff * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 5092654Sjeff * values. Currently on alpha you can get 250 or so 32 byte items and on x86 5192654Sjeff * you can get 250 or so 16byte items. For item sizes that would yield more 5294157Sjeff * than 10% memory waste we potentially allocate a separate uma_slab_t if this 5392654Sjeff * will improve the number of items per slab that will fit. 5492654Sjeff * 5592654Sjeff * Other potential space optimizations are storing the 8bit of linkage in space 5692654Sjeff * wasted between items due to alignment problems. This may yield a much better 5792654Sjeff * memory footprint for certain sizes of objects. Another alternative is to 5892654Sjeff * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer 59222137Salc * dynamic slab sizes because we could stick with 8 bit indices and only use 6092654Sjeff * large slab sizes for zones with a lot of waste per slab. This may create 61222137Salc * inefficiencies in the vm subsystem due to fragmentation in the address space. 6292654Sjeff * 6392654Sjeff * The only really gross cases, with regards to memory waste, are for those 6492654Sjeff * items that are just over half the page size. You can get nearly 50% waste, 6592654Sjeff * so you fall back to the memory footprint of the power of two allocator. I 6692654Sjeff * have looked at memory allocation sizes on many of the machines available to 6792654Sjeff * me, and there does not seem to be an abundance of allocations at this range 6892654Sjeff * so at this time it may not make sense to optimize for it. This can, of 6992654Sjeff * course, be solved with dynamic slab sizes. 7092654Sjeff * 71129906Sbmilekic * Kegs may serve multiple Zones but by far most of the time they only serve 72129906Sbmilekic * one. When a Zone is created, a Keg is allocated and setup for it. While 73129906Sbmilekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 74129906Sbmilekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 75129906Sbmilekic * pair, as well as with its own set of small per-CPU caches, layered above 76129906Sbmilekic * the Zone's general Bucket cache. 77129906Sbmilekic * 78169431Srwatson * The PCPU caches are protected by critical sections, and may be accessed 79169431Srwatson * safely only from their associated CPU, while the Zones backed by the same 80169431Srwatson * Keg all share a common Keg lock (to coalesce contention on the backing 81169431Srwatson * slabs). The backing Keg typically only serves one Zone but in the case of 82169431Srwatson * multiple Zones, one of the Zones is considered the Master Zone and all 83169431Srwatson * Zone-related stats from the Keg are done in the Master Zone. For an 84169431Srwatson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 8592654Sjeff */ 8692654Sjeff 8792654Sjeff/* 8892654Sjeff * This is the representation for normal (Non OFFPAGE slab) 8992654Sjeff * 9092654Sjeff * i == item 9192654Sjeff * s == slab pointer 9292654Sjeff * 9392654Sjeff * <---------------- Page (UMA_SLAB_SIZE) ------------------> 9492654Sjeff * ___________________________________________________________ 9592654Sjeff * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 9692654Sjeff * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 9792654Sjeff * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 9892654Sjeff * |___________________________________________________________| 9992654Sjeff * 10092654Sjeff * 10192654Sjeff * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 10292654Sjeff * 10392654Sjeff * ___________________________________________________________ 10492654Sjeff * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 10592654Sjeff * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 10692654Sjeff * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 10792654Sjeff * |___________________________________________________________| 10892654Sjeff * ___________ ^ 10992654Sjeff * |slab header| | 11092654Sjeff * |___________|---* 11192654Sjeff * 11292654Sjeff */ 11392654Sjeff 11492654Sjeff#ifndef VM_UMA_INT_H 11592654Sjeff#define VM_UMA_INT_H 11692654Sjeff 11792654Sjeff#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 11892654Sjeff#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 11992654Sjeff#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 12092654Sjeff 121222163Salc#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */ 12292654Sjeff 12392654Sjeff/* Max waste before going to off page slab management */ 12492654Sjeff#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10) 12592654Sjeff 12692654Sjeff/* 12792654Sjeff * I doubt there will be many cases where this is exceeded. This is the initial 12892654Sjeff * size of the hash table for uma_slabs that are managed off page. This hash 12992654Sjeff * does expand by powers of two. Currently it doesn't get smaller. 13092654Sjeff */ 13192654Sjeff#define UMA_HASH_SIZE_INIT 32 13292654Sjeff 13392654Sjeff/* 13492654Sjeff * I should investigate other hashing algorithms. This should yield a low 13592654Sjeff * number of collisions if the pages are relatively contiguous. 13692654Sjeff * 13792654Sjeff * This is the same algorithm that most processor caches use. 13892654Sjeff * 13992654Sjeff * I'm shifting and masking instead of % because it should be faster. 14092654Sjeff */ 14192654Sjeff 14292654Sjeff#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \ 14392654Sjeff (h)->uh_hashmask) 14492654Sjeff 14592654Sjeff#define UMA_HASH_INSERT(h, s, mem) \ 14692654Sjeff SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 147200129Santoine (mem))], (s), us_hlink) 14892654Sjeff#define UMA_HASH_REMOVE(h, s, mem) \ 14992654Sjeff SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 150200129Santoine (mem))], (s), uma_slab, us_hlink) 15192654Sjeff 15292654Sjeff/* Hash table for freed address -> slab translation */ 15392654Sjeff 15492654SjeffSLIST_HEAD(slabhead, uma_slab); 15592654Sjeff 15692654Sjeffstruct uma_hash { 15792654Sjeff struct slabhead *uh_slab_hash; /* Hash table for slabs */ 15892654Sjeff int uh_hashsize; /* Current size of the hash table */ 15992654Sjeff int uh_hashmask; /* Mask used during hashing */ 16092654Sjeff}; 16192654Sjeff 16292654Sjeff/* 163205266Skmacy * align field or structure to cache line 164205266Skmacy */ 165205487Skmacy#if defined(__amd64__) 166205487Skmacy#define UMA_ALIGN __aligned(CACHE_LINE_SIZE) 167205487Skmacy#else 168205298Skmacy#define UMA_ALIGN 169205487Skmacy#endif 170205266Skmacy 171205266Skmacy/* 17292654Sjeff * Structures for per cpu queues. 17392654Sjeff */ 17492654Sjeff 17592654Sjeffstruct uma_bucket { 17692654Sjeff LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 177120218Sjeff int16_t ub_cnt; /* Count of free items. */ 178120218Sjeff int16_t ub_entries; /* Max items. */ 179120218Sjeff void *ub_bucket[]; /* actual allocation storage */ 180205487Skmacy}; 18192654Sjeff 18292654Sjefftypedef struct uma_bucket * uma_bucket_t; 18392654Sjeff 18492654Sjeffstruct uma_cache { 18592654Sjeff uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 18692654Sjeff uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 18792654Sjeff u_int64_t uc_allocs; /* Count of allocations */ 188147995Srwatson u_int64_t uc_frees; /* Count of frees */ 189205266Skmacy} UMA_ALIGN; 19092654Sjeff 19192654Sjefftypedef struct uma_cache * uma_cache_t; 19292654Sjeff 19392654Sjeff/* 194129906Sbmilekic * Keg management structure 195129906Sbmilekic * 196129906Sbmilekic * TODO: Optimize for cache line size 197129906Sbmilekic * 198129906Sbmilekic */ 199129906Sbmilekicstruct uma_keg { 200129906Sbmilekic LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 201129906Sbmilekic 202129906Sbmilekic struct mtx uk_lock; /* Lock for the keg */ 203129906Sbmilekic struct uma_hash uk_hash; 204129906Sbmilekic 205242365Smdf const char *uk_name; /* Name of creating zone. */ 206129906Sbmilekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 207129906Sbmilekic LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 208129906Sbmilekic LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 209129906Sbmilekic LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 210129906Sbmilekic 211129906Sbmilekic u_int32_t uk_recurse; /* Allocation recursion count */ 212129906Sbmilekic u_int32_t uk_align; /* Alignment mask */ 213129906Sbmilekic u_int32_t uk_pages; /* Total page count */ 214129906Sbmilekic u_int32_t uk_free; /* Count of items free in slabs */ 215129906Sbmilekic u_int32_t uk_size; /* Requested size of each item */ 216129906Sbmilekic u_int32_t uk_rsize; /* Real size of each item */ 217129906Sbmilekic u_int32_t uk_maxpages; /* Maximum number of pages to alloc */ 218129906Sbmilekic 219129906Sbmilekic uma_init uk_init; /* Keg's init routine */ 220129906Sbmilekic uma_fini uk_fini; /* Keg's fini routine */ 221129906Sbmilekic uma_alloc uk_allocf; /* Allocation function */ 222129906Sbmilekic uma_free uk_freef; /* Free routine */ 223129906Sbmilekic 224129906Sbmilekic struct vm_object *uk_obj; /* Zone specific object */ 225129906Sbmilekic vm_offset_t uk_kva; /* Base kva for zones with objs */ 226129906Sbmilekic uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 227129906Sbmilekic 228129906Sbmilekic u_int16_t uk_pgoff; /* Offset to uma_slab struct */ 229129906Sbmilekic u_int16_t uk_ppera; /* pages per allocation from backend */ 230129906Sbmilekic u_int16_t uk_ipers; /* Items per slab */ 231148072Ssilby u_int32_t uk_flags; /* Internal flags */ 232129906Sbmilekic}; 233187681Sjefftypedef struct uma_keg * uma_keg_t; 234129906Sbmilekic 235129906Sbmilekic/* Page management structure */ 236129906Sbmilekic 237129906Sbmilekic/* Sorry for the union, but space efficiency is important */ 238129906Sbmilekicstruct uma_slab_head { 239129906Sbmilekic uma_keg_t us_keg; /* Keg we live in */ 240129906Sbmilekic union { 241129906Sbmilekic LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 242129906Sbmilekic unsigned long _us_size; /* Size of allocation */ 243129906Sbmilekic } us_type; 244129906Sbmilekic SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 245129906Sbmilekic u_int8_t *us_data; /* First item */ 246129906Sbmilekic u_int8_t us_flags; /* Page flags see uma.h */ 247129906Sbmilekic u_int8_t us_freecount; /* How many are free? */ 248129906Sbmilekic u_int8_t us_firstfree; /* First free item index */ 249129906Sbmilekic}; 250129906Sbmilekic 251129906Sbmilekic/* The standard slab structure */ 252129906Sbmilekicstruct uma_slab { 253129906Sbmilekic struct uma_slab_head us_head; /* slab header data */ 254129906Sbmilekic struct { 255129906Sbmilekic u_int8_t us_item; 256129906Sbmilekic } us_freelist[1]; /* actual number bigger */ 257129906Sbmilekic}; 258129906Sbmilekic 259129906Sbmilekic/* 260129906Sbmilekic * The slab structure for UMA_ZONE_REFCNT zones for whose items we 261129906Sbmilekic * maintain reference counters in the slab for. 262129906Sbmilekic */ 263129906Sbmilekicstruct uma_slab_refcnt { 264129906Sbmilekic struct uma_slab_head us_head; /* slab header data */ 265129906Sbmilekic struct { 266129906Sbmilekic u_int8_t us_item; 267129906Sbmilekic u_int32_t us_refcnt; 268129906Sbmilekic } us_freelist[1]; /* actual number bigger */ 269129906Sbmilekic}; 270129906Sbmilekic 271129906Sbmilekic#define us_keg us_head.us_keg 272129906Sbmilekic#define us_link us_head.us_type._us_link 273129906Sbmilekic#define us_size us_head.us_type._us_size 274129906Sbmilekic#define us_hlink us_head.us_hlink 275129906Sbmilekic#define us_data us_head.us_data 276129906Sbmilekic#define us_flags us_head.us_flags 277129906Sbmilekic#define us_freecount us_head.us_freecount 278129906Sbmilekic#define us_firstfree us_head.us_firstfree 279129906Sbmilekic 280129906Sbmilekictypedef struct uma_slab * uma_slab_t; 281129906Sbmilekictypedef struct uma_slab_refcnt * uma_slabrefcnt_t; 282187681Sjefftypedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int); 283129906Sbmilekic 284187681Sjeff 285129906Sbmilekic/* 286132842Sbmilekic * These give us the size of one free item reference within our corresponding 287132842Sbmilekic * uma_slab structures, so that our calculations during zone setup are correct 288132842Sbmilekic * regardless of what the compiler decides to do with padding the structure 289132842Sbmilekic * arrays within uma_slab. 290132842Sbmilekic */ 291132842Sbmilekic#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head)) 292132842Sbmilekic#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \ 293132842Sbmilekic sizeof(struct uma_slab_head)) 294132842Sbmilekic 295187681Sjeffstruct uma_klink { 296187681Sjeff LIST_ENTRY(uma_klink) kl_link; 297187681Sjeff uma_keg_t kl_keg; 298187681Sjeff}; 299187681Sjefftypedef struct uma_klink *uma_klink_t; 300187681Sjeff 301132842Sbmilekic/* 30292654Sjeff * Zone management structure 30392654Sjeff * 30492654Sjeff * TODO: Optimize for cache line size 30592654Sjeff * 30692654Sjeff */ 30792654Sjeffstruct uma_zone { 308242365Smdf const char *uz_name; /* Text name of the zone */ 309129906Sbmilekic struct mtx *uz_lock; /* Lock for the zone (keg's lock) */ 31092654Sjeff 311129906Sbmilekic LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 31292654Sjeff LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 31392654Sjeff LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ 31492654Sjeff 315187681Sjeff LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ 316187681Sjeff struct uma_klink uz_klink; /* klink for first keg. */ 317187681Sjeff 318187681Sjeff uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ 31992654Sjeff uma_ctor uz_ctor; /* Constructor for each allocation */ 32092654Sjeff uma_dtor uz_dtor; /* Destructor */ 32192654Sjeff uma_init uz_init; /* Initializer for each item */ 32292654Sjeff uma_fini uz_fini; /* Discards memory */ 323129906Sbmilekic 324205266Skmacy u_int32_t uz_flags; /* Flags inherited from kegs */ 325205266Skmacy u_int32_t uz_size; /* Size inherited from kegs */ 326205266Skmacy 327205266Skmacy u_int64_t uz_allocs UMA_ALIGN; /* Total number of allocations */ 328147996Srwatson u_int64_t uz_frees; /* Total number of frees */ 329148070Srwatson u_int64_t uz_fails; /* Total number of alloc failures */ 330209215Ssbruno u_int64_t uz_sleeps; /* Total number of alloc sleeps */ 33194159Sjeff uint16_t uz_fills; /* Outstanding bucket fills */ 33294159Sjeff uint16_t uz_count; /* Highest value ub_ptr can have */ 333129906Sbmilekic 33492654Sjeff /* 33592654Sjeff * This HAS to be the last item because we adjust the zone size 33692654Sjeff * based on NCPU and then allocate the space for the zones. 33792654Sjeff */ 338205487Skmacy struct uma_cache uz_cpu[1]; /* Per cpu caches */ 33992654Sjeff}; 34092654Sjeff 341120223Sjeff/* 342120223Sjeff * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 343120223Sjeff */ 344187681Sjeff#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */ 345187681Sjeff#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */ 346187681Sjeff#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */ 347148072Ssilby#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */ 348148072Ssilby#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 349148072Ssilby#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ 350148072Ssilby#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 35194631Sjeff 352187681Sjeff#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \ 353187681Sjeff UMA_ZFLAG_BUCKET) 354187681Sjeff 355205266Skmacy#undef UMA_ALIGN 356205266Skmacy 357148690Srwatson#ifdef _KERNEL 35892654Sjeff/* Internal prototypes */ 35992654Sjeffstatic __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data); 36092654Sjeffvoid *uma_large_malloc(int size, int wait); 36192654Sjeffvoid uma_large_free(uma_slab_t slab); 36292654Sjeff 36392654Sjeff/* Lock Macros */ 36492654Sjeff 365187681Sjeff#define KEG_LOCK_INIT(k, lc) \ 36695758Sjeff do { \ 36795758Sjeff if ((lc)) \ 368187681Sjeff mtx_init(&(k)->uk_lock, (k)->uk_name, \ 369187681Sjeff (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 37095758Sjeff else \ 371187681Sjeff mtx_init(&(k)->uk_lock, (k)->uk_name, \ 37295758Sjeff "UMA zone", MTX_DEF | MTX_DUPOK); \ 37395758Sjeff } while (0) 37495758Sjeff 375187681Sjeff#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 376187681Sjeff#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 377187681Sjeff#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) 378129906Sbmilekic#define ZONE_LOCK(z) mtx_lock((z)->uz_lock) 379129906Sbmilekic#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock) 38092654Sjeff 38192654Sjeff/* 38292654Sjeff * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 38392654Sjeff * the slab structure. 38492654Sjeff * 38592654Sjeff * Arguments: 38692654Sjeff * hash The hash table to search. 38792654Sjeff * data The base page of the item. 38892654Sjeff * 38992654Sjeff * Returns: 39092654Sjeff * A pointer to a slab if successful, else NULL. 39192654Sjeff */ 39292654Sjeffstatic __inline uma_slab_t 39392654Sjeffhash_sfind(struct uma_hash *hash, u_int8_t *data) 39492654Sjeff{ 39592654Sjeff uma_slab_t slab; 39692654Sjeff int hval; 39792654Sjeff 39892654Sjeff hval = UMA_HASH(hash, data); 39992654Sjeff 40092654Sjeff SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 40192654Sjeff if ((u_int8_t *)slab->us_data == data) 40292654Sjeff return (slab); 40392654Sjeff } 40492654Sjeff return (NULL); 40592654Sjeff} 40692654Sjeff 407103531Sjeffstatic __inline uma_slab_t 408103531Sjeffvtoslab(vm_offset_t va) 409103531Sjeff{ 410103531Sjeff vm_page_t p; 411103531Sjeff uma_slab_t slab; 41292654Sjeff 413103531Sjeff p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 414103531Sjeff slab = (uma_slab_t )p->object; 415103531Sjeff 416103531Sjeff if (p->flags & PG_SLAB) 417103531Sjeff return (slab); 418103531Sjeff else 419103531Sjeff return (NULL); 420103531Sjeff} 421103531Sjeff 422103531Sjeffstatic __inline void 423103531Sjeffvsetslab(vm_offset_t va, uma_slab_t slab) 424103531Sjeff{ 425103531Sjeff vm_page_t p; 426103531Sjeff 427138114Scognet p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 428103531Sjeff p->object = (vm_object_t)slab; 429103531Sjeff p->flags |= PG_SLAB; 430103531Sjeff} 431103531Sjeff 432103531Sjeffstatic __inline void 433103531Sjeffvsetobj(vm_offset_t va, vm_object_t obj) 434103531Sjeff{ 435103531Sjeff vm_page_t p; 436103531Sjeff 437138114Scognet p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 438103531Sjeff p->object = obj; 439103531Sjeff p->flags &= ~PG_SLAB; 440103531Sjeff} 441103531Sjeff 442106277Sjeff/* 443106277Sjeff * The following two functions may be defined by architecture specific code 444106277Sjeff * if they can provide more effecient allocation functions. This is useful 445106277Sjeff * for using direct mapped addresses. 446106277Sjeff */ 447106277Sjeffvoid *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait); 448106277Sjeffvoid uma_small_free(void *mem, int size, u_int8_t flags); 449148690Srwatson#endif /* _KERNEL */ 450106277Sjeff 45192654Sjeff#endif /* VM_UMA_INT_H */ 452