uma_core.c revision 260306
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c  Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34 * effecient.  A primary design goal is to return unused memory to the rest of
35 * the system.  This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 *	- Improve memory usage for large allocations
47 *	- Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: stable/10/sys/vm/uma_core.c 260306 2014-01-04 23:43:18Z mav $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
62#include "opt_vm.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/bitset.h>
67#include <sys/kernel.h>
68#include <sys/types.h>
69#include <sys/queue.h>
70#include <sys/malloc.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/sysctl.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/rwlock.h>
77#include <sys/sbuf.h>
78#include <sys/sched.h>
79#include <sys/smp.h>
80#include <sys/vmmeter.h>
81
82#include <vm/vm.h>
83#include <vm/vm_object.h>
84#include <vm/vm_page.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_param.h>
87#include <vm/vm_map.h>
88#include <vm/vm_kern.h>
89#include <vm/vm_extern.h>
90#include <vm/uma.h>
91#include <vm/uma_int.h>
92#include <vm/uma_dbg.h>
93
94#include <ddb/ddb.h>
95
96#ifdef DEBUG_MEMGUARD
97#include <vm/memguard.h>
98#endif
99
100/*
101 * This is the zone and keg from which all zones are spawned.  The idea is that
102 * even the zone & keg heads are allocated from the allocator, so we use the
103 * bss section to bootstrap us.
104 */
105static struct uma_keg masterkeg;
106static struct uma_zone masterzone_k;
107static struct uma_zone masterzone_z;
108static uma_zone_t kegs = &masterzone_k;
109static uma_zone_t zones = &masterzone_z;
110
111/* This is the zone from which all of uma_slab_t's are allocated. */
112static uma_zone_t slabzone;
113static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
114
115/*
116 * The initial hash tables come out of this zone so they can be allocated
117 * prior to malloc coming up.
118 */
119static uma_zone_t hashzone;
120
121/* The boot-time adjusted value for cache line alignment. */
122int uma_align_cache = 64 - 1;
123
124static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
125
126/*
127 * Are we allowed to allocate buckets?
128 */
129static int bucketdisable = 1;
130
131/* Linked list of all kegs in the system */
132static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
133
134/* Linked list of all cache-only zones in the system */
135static LIST_HEAD(,uma_zone) uma_cachezones =
136    LIST_HEAD_INITIALIZER(uma_cachezones);
137
138/* This mutex protects the keg list */
139static struct mtx_padalign uma_mtx;
140
141/* Linked list of boot time pages */
142static LIST_HEAD(,uma_slab) uma_boot_pages =
143    LIST_HEAD_INITIALIZER(uma_boot_pages);
144
145/* This mutex protects the boot time pages list */
146static struct mtx_padalign uma_boot_pages_mtx;
147
148/* Is the VM done starting up? */
149static int booted = 0;
150#define	UMA_STARTUP	1
151#define	UMA_STARTUP2	2
152
153/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
154static const u_int uma_max_ipers = SLAB_SETSIZE;
155
156/*
157 * Only mbuf clusters use ref zones.  Just provide enough references
158 * to support the one user.  New code should not use the ref facility.
159 */
160static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
161
162/*
163 * This is the handle used to schedule events that need to happen
164 * outside of the allocation fast path.
165 */
166static struct callout uma_callout;
167#define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
168
169/*
170 * This structure is passed as the zone ctor arg so that I don't have to create
171 * a special allocation function just for zones.
172 */
173struct uma_zctor_args {
174	const char *name;
175	size_t size;
176	uma_ctor ctor;
177	uma_dtor dtor;
178	uma_init uminit;
179	uma_fini fini;
180	uma_import import;
181	uma_release release;
182	void *arg;
183	uma_keg_t keg;
184	int align;
185	uint32_t flags;
186};
187
188struct uma_kctor_args {
189	uma_zone_t zone;
190	size_t size;
191	uma_init uminit;
192	uma_fini fini;
193	int align;
194	uint32_t flags;
195};
196
197struct uma_bucket_zone {
198	uma_zone_t	ubz_zone;
199	char		*ubz_name;
200	int		ubz_entries;	/* Number of items it can hold. */
201	int		ubz_maxsize;	/* Maximum allocation size per-item. */
202};
203
204/*
205 * Compute the actual number of bucket entries to pack them in power
206 * of two sizes for more efficient space utilization.
207 */
208#define	BUCKET_SIZE(n)						\
209    (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
210
211#define	BUCKET_MAX	BUCKET_SIZE(128)
212
213struct uma_bucket_zone bucket_zones[] = {
214	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
215	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
216	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
217	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
218	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
219	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
220	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
221	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
222	{ NULL, NULL, 0}
223};
224
225/*
226 * Flags and enumerations to be passed to internal functions.
227 */
228enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
229
230/* Prototypes.. */
231
232static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
233static void *page_alloc(uma_zone_t, int, uint8_t *, int);
234static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
235static void page_free(void *, int, uint8_t);
236static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
237static void cache_drain(uma_zone_t);
238static void bucket_drain(uma_zone_t, uma_bucket_t);
239static void bucket_cache_drain(uma_zone_t zone);
240static int keg_ctor(void *, int, void *, int);
241static void keg_dtor(void *, int, void *);
242static int zone_ctor(void *, int, void *, int);
243static void zone_dtor(void *, int, void *);
244static int zero_init(void *, int, int);
245static void keg_small_init(uma_keg_t keg);
246static void keg_large_init(uma_keg_t keg);
247static void zone_foreach(void (*zfunc)(uma_zone_t));
248static void zone_timeout(uma_zone_t zone);
249static int hash_alloc(struct uma_hash *);
250static int hash_expand(struct uma_hash *, struct uma_hash *);
251static void hash_free(struct uma_hash *hash);
252static void uma_timeout(void *);
253static void uma_startup3(void);
254static void *zone_alloc_item(uma_zone_t, void *, int);
255static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
256static void bucket_enable(void);
257static void bucket_init(void);
258static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
259static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
260static void bucket_zone_drain(void);
261static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
262static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
263static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
264static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
265static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
266static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
267    uma_fini fini, int align, uint32_t flags);
268static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
269static void zone_release(uma_zone_t zone, void **bucket, int cnt);
270
271void uma_print_zone(uma_zone_t);
272void uma_print_stats(void);
273static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
274static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
275
276SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
277
278SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
279    0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
280
281SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
282    0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
283
284static int zone_warnings = 1;
285TUNABLE_INT("vm.zone_warnings", &zone_warnings);
286SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
287    "Warn when UMA zones becomes full");
288
289/*
290 * This routine checks to see whether or not it's safe to enable buckets.
291 */
292static void
293bucket_enable(void)
294{
295	bucketdisable = vm_page_count_min();
296}
297
298/*
299 * Initialize bucket_zones, the array of zones of buckets of various sizes.
300 *
301 * For each zone, calculate the memory required for each bucket, consisting
302 * of the header and an array of pointers.
303 */
304static void
305bucket_init(void)
306{
307	struct uma_bucket_zone *ubz;
308	int size;
309	int i;
310
311	for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
312		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
313		size += sizeof(void *) * ubz->ubz_entries;
314		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
315		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
316		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
317	}
318}
319
320/*
321 * Given a desired number of entries for a bucket, return the zone from which
322 * to allocate the bucket.
323 */
324static struct uma_bucket_zone *
325bucket_zone_lookup(int entries)
326{
327	struct uma_bucket_zone *ubz;
328
329	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
330		if (ubz->ubz_entries >= entries)
331			return (ubz);
332	ubz--;
333	return (ubz);
334}
335
336static int
337bucket_select(int size)
338{
339	struct uma_bucket_zone *ubz;
340
341	ubz = &bucket_zones[0];
342	if (size > ubz->ubz_maxsize)
343		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
344
345	for (; ubz->ubz_entries != 0; ubz++)
346		if (ubz->ubz_maxsize < size)
347			break;
348	ubz--;
349	return (ubz->ubz_entries);
350}
351
352static uma_bucket_t
353bucket_alloc(uma_zone_t zone, void *udata, int flags)
354{
355	struct uma_bucket_zone *ubz;
356	uma_bucket_t bucket;
357
358	/*
359	 * This is to stop us from allocating per cpu buckets while we're
360	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
361	 * boot pages.  This also prevents us from allocating buckets in
362	 * low memory situations.
363	 */
364	if (bucketdisable)
365		return (NULL);
366	/*
367	 * To limit bucket recursion we store the original zone flags
368	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
369	 * NOVM flag to persist even through deep recursions.  We also
370	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
371	 * a bucket for a bucket zone so we do not allow infinite bucket
372	 * recursion.  This cookie will even persist to frees of unused
373	 * buckets via the allocation path or bucket allocations in the
374	 * free path.
375	 */
376	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
377		udata = (void *)(uintptr_t)zone->uz_flags;
378	else {
379		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
380			return (NULL);
381		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
382	}
383	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
384		flags |= M_NOVM;
385	ubz = bucket_zone_lookup(zone->uz_count);
386	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
387	if (bucket) {
388#ifdef INVARIANTS
389		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
390#endif
391		bucket->ub_cnt = 0;
392		bucket->ub_entries = ubz->ubz_entries;
393	}
394
395	return (bucket);
396}
397
398static void
399bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
400{
401	struct uma_bucket_zone *ubz;
402
403	KASSERT(bucket->ub_cnt == 0,
404	    ("bucket_free: Freeing a non free bucket."));
405	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
406		udata = (void *)(uintptr_t)zone->uz_flags;
407	ubz = bucket_zone_lookup(bucket->ub_entries);
408	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
409}
410
411static void
412bucket_zone_drain(void)
413{
414	struct uma_bucket_zone *ubz;
415
416	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
417		zone_drain(ubz->ubz_zone);
418}
419
420static void
421zone_log_warning(uma_zone_t zone)
422{
423	static const struct timeval warninterval = { 300, 0 };
424
425	if (!zone_warnings || zone->uz_warning == NULL)
426		return;
427
428	if (ratecheck(&zone->uz_ratecheck, &warninterval))
429		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
430}
431
432static void
433zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
434{
435	uma_klink_t klink;
436
437	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
438		kegfn(klink->kl_keg);
439}
440
441/*
442 * Routine called by timeout which is used to fire off some time interval
443 * based calculations.  (stats, hash size, etc.)
444 *
445 * Arguments:
446 *	arg   Unused
447 *
448 * Returns:
449 *	Nothing
450 */
451static void
452uma_timeout(void *unused)
453{
454	bucket_enable();
455	zone_foreach(zone_timeout);
456
457	/* Reschedule this event */
458	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
459}
460
461/*
462 * Routine to perform timeout driven calculations.  This expands the
463 * hashes and does per cpu statistics aggregation.
464 *
465 *  Returns nothing.
466 */
467static void
468keg_timeout(uma_keg_t keg)
469{
470
471	KEG_LOCK(keg);
472	/*
473	 * Expand the keg hash table.
474	 *
475	 * This is done if the number of slabs is larger than the hash size.
476	 * What I'm trying to do here is completely reduce collisions.  This
477	 * may be a little aggressive.  Should I allow for two collisions max?
478	 */
479	if (keg->uk_flags & UMA_ZONE_HASH &&
480	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
481		struct uma_hash newhash;
482		struct uma_hash oldhash;
483		int ret;
484
485		/*
486		 * This is so involved because allocating and freeing
487		 * while the keg lock is held will lead to deadlock.
488		 * I have to do everything in stages and check for
489		 * races.
490		 */
491		newhash = keg->uk_hash;
492		KEG_UNLOCK(keg);
493		ret = hash_alloc(&newhash);
494		KEG_LOCK(keg);
495		if (ret) {
496			if (hash_expand(&keg->uk_hash, &newhash)) {
497				oldhash = keg->uk_hash;
498				keg->uk_hash = newhash;
499			} else
500				oldhash = newhash;
501
502			KEG_UNLOCK(keg);
503			hash_free(&oldhash);
504			return;
505		}
506	}
507	KEG_UNLOCK(keg);
508}
509
510static void
511zone_timeout(uma_zone_t zone)
512{
513
514	zone_foreach_keg(zone, &keg_timeout);
515}
516
517/*
518 * Allocate and zero fill the next sized hash table from the appropriate
519 * backing store.
520 *
521 * Arguments:
522 *	hash  A new hash structure with the old hash size in uh_hashsize
523 *
524 * Returns:
525 *	1 on sucess and 0 on failure.
526 */
527static int
528hash_alloc(struct uma_hash *hash)
529{
530	int oldsize;
531	int alloc;
532
533	oldsize = hash->uh_hashsize;
534
535	/* We're just going to go to a power of two greater */
536	if (oldsize)  {
537		hash->uh_hashsize = oldsize * 2;
538		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
539		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
540		    M_UMAHASH, M_NOWAIT);
541	} else {
542		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
543		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
544		    M_WAITOK);
545		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
546	}
547	if (hash->uh_slab_hash) {
548		bzero(hash->uh_slab_hash, alloc);
549		hash->uh_hashmask = hash->uh_hashsize - 1;
550		return (1);
551	}
552
553	return (0);
554}
555
556/*
557 * Expands the hash table for HASH zones.  This is done from zone_timeout
558 * to reduce collisions.  This must not be done in the regular allocation
559 * path, otherwise, we can recurse on the vm while allocating pages.
560 *
561 * Arguments:
562 *	oldhash  The hash you want to expand
563 *	newhash  The hash structure for the new table
564 *
565 * Returns:
566 *	Nothing
567 *
568 * Discussion:
569 */
570static int
571hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
572{
573	uma_slab_t slab;
574	int hval;
575	int i;
576
577	if (!newhash->uh_slab_hash)
578		return (0);
579
580	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
581		return (0);
582
583	/*
584	 * I need to investigate hash algorithms for resizing without a
585	 * full rehash.
586	 */
587
588	for (i = 0; i < oldhash->uh_hashsize; i++)
589		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
590			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
591			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
592			hval = UMA_HASH(newhash, slab->us_data);
593			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
594			    slab, us_hlink);
595		}
596
597	return (1);
598}
599
600/*
601 * Free the hash bucket to the appropriate backing store.
602 *
603 * Arguments:
604 *	slab_hash  The hash bucket we're freeing
605 *	hashsize   The number of entries in that hash bucket
606 *
607 * Returns:
608 *	Nothing
609 */
610static void
611hash_free(struct uma_hash *hash)
612{
613	if (hash->uh_slab_hash == NULL)
614		return;
615	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
616		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
617	else
618		free(hash->uh_slab_hash, M_UMAHASH);
619}
620
621/*
622 * Frees all outstanding items in a bucket
623 *
624 * Arguments:
625 *	zone   The zone to free to, must be unlocked.
626 *	bucket The free/alloc bucket with items, cpu queue must be locked.
627 *
628 * Returns:
629 *	Nothing
630 */
631
632static void
633bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
634{
635	int i;
636
637	if (bucket == NULL)
638		return;
639
640	if (zone->uz_fini)
641		for (i = 0; i < bucket->ub_cnt; i++)
642			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
643	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
644	bucket->ub_cnt = 0;
645}
646
647/*
648 * Drains the per cpu caches for a zone.
649 *
650 * NOTE: This may only be called while the zone is being turn down, and not
651 * during normal operation.  This is necessary in order that we do not have
652 * to migrate CPUs to drain the per-CPU caches.
653 *
654 * Arguments:
655 *	zone     The zone to drain, must be unlocked.
656 *
657 * Returns:
658 *	Nothing
659 */
660static void
661cache_drain(uma_zone_t zone)
662{
663	uma_cache_t cache;
664	int cpu;
665
666	/*
667	 * XXX: It is safe to not lock the per-CPU caches, because we're
668	 * tearing down the zone anyway.  I.e., there will be no further use
669	 * of the caches at this point.
670	 *
671	 * XXX: It would good to be able to assert that the zone is being
672	 * torn down to prevent improper use of cache_drain().
673	 *
674	 * XXX: We lock the zone before passing into bucket_cache_drain() as
675	 * it is used elsewhere.  Should the tear-down path be made special
676	 * there in some form?
677	 */
678	CPU_FOREACH(cpu) {
679		cache = &zone->uz_cpu[cpu];
680		bucket_drain(zone, cache->uc_allocbucket);
681		bucket_drain(zone, cache->uc_freebucket);
682		if (cache->uc_allocbucket != NULL)
683			bucket_free(zone, cache->uc_allocbucket, NULL);
684		if (cache->uc_freebucket != NULL)
685			bucket_free(zone, cache->uc_freebucket, NULL);
686		cache->uc_allocbucket = cache->uc_freebucket = NULL;
687	}
688	ZONE_LOCK(zone);
689	bucket_cache_drain(zone);
690	ZONE_UNLOCK(zone);
691}
692
693static void
694cache_shrink(uma_zone_t zone)
695{
696
697	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
698		return;
699
700	ZONE_LOCK(zone);
701	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
702	ZONE_UNLOCK(zone);
703}
704
705static void
706cache_drain_safe_cpu(uma_zone_t zone)
707{
708	uma_cache_t cache;
709	uma_bucket_t b1, b2;
710
711	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
712		return;
713
714	b1 = b2 = NULL;
715	ZONE_LOCK(zone);
716	critical_enter();
717	cache = &zone->uz_cpu[curcpu];
718	if (cache->uc_allocbucket) {
719		if (cache->uc_allocbucket->ub_cnt != 0)
720			LIST_INSERT_HEAD(&zone->uz_buckets,
721			    cache->uc_allocbucket, ub_link);
722		else
723			b1 = cache->uc_allocbucket;
724		cache->uc_allocbucket = NULL;
725	}
726	if (cache->uc_freebucket) {
727		if (cache->uc_freebucket->ub_cnt != 0)
728			LIST_INSERT_HEAD(&zone->uz_buckets,
729			    cache->uc_freebucket, ub_link);
730		else
731			b2 = cache->uc_freebucket;
732		cache->uc_freebucket = NULL;
733	}
734	critical_exit();
735	ZONE_UNLOCK(zone);
736	if (b1)
737		bucket_free(zone, b1, NULL);
738	if (b2)
739		bucket_free(zone, b2, NULL);
740}
741
742/*
743 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
744 * This is an expensive call because it needs to bind to all CPUs
745 * one by one and enter a critical section on each of them in order
746 * to safely access their cache buckets.
747 * Zone lock must not be held on call this function.
748 */
749static void
750cache_drain_safe(uma_zone_t zone)
751{
752	int cpu;
753
754	/*
755	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
756	 */
757	if (zone)
758		cache_shrink(zone);
759	else
760		zone_foreach(cache_shrink);
761
762	CPU_FOREACH(cpu) {
763		thread_lock(curthread);
764		sched_bind(curthread, cpu);
765		thread_unlock(curthread);
766
767		if (zone)
768			cache_drain_safe_cpu(zone);
769		else
770			zone_foreach(cache_drain_safe_cpu);
771	}
772	thread_lock(curthread);
773	sched_unbind(curthread);
774	thread_unlock(curthread);
775}
776
777/*
778 * Drain the cached buckets from a zone.  Expects a locked zone on entry.
779 */
780static void
781bucket_cache_drain(uma_zone_t zone)
782{
783	uma_bucket_t bucket;
784
785	/*
786	 * Drain the bucket queues and free the buckets, we just keep two per
787	 * cpu (alloc/free).
788	 */
789	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
790		LIST_REMOVE(bucket, ub_link);
791		ZONE_UNLOCK(zone);
792		bucket_drain(zone, bucket);
793		bucket_free(zone, bucket, NULL);
794		ZONE_LOCK(zone);
795	}
796
797	/*
798	 * Shrink further bucket sizes.  Price of single zone lock collision
799	 * is probably lower then price of global cache drain.
800	 */
801	if (zone->uz_count > zone->uz_count_min)
802		zone->uz_count--;
803}
804
805static void
806keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
807{
808	uint8_t *mem;
809	int i;
810	uint8_t flags;
811
812	mem = slab->us_data;
813	flags = slab->us_flags;
814	i = start;
815	if (keg->uk_fini != NULL) {
816		for (i--; i > -1; i--)
817			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
818			    keg->uk_size);
819	}
820	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
821		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
822#ifdef UMA_DEBUG
823	printf("%s: Returning %d bytes.\n", keg->uk_name,
824	    PAGE_SIZE * keg->uk_ppera);
825#endif
826	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
827}
828
829/*
830 * Frees pages from a keg back to the system.  This is done on demand from
831 * the pageout daemon.
832 *
833 * Returns nothing.
834 */
835static void
836keg_drain(uma_keg_t keg)
837{
838	struct slabhead freeslabs = { 0 };
839	uma_slab_t slab;
840	uma_slab_t n;
841
842	/*
843	 * We don't want to take pages from statically allocated kegs at this
844	 * time
845	 */
846	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
847		return;
848
849#ifdef UMA_DEBUG
850	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
851#endif
852	KEG_LOCK(keg);
853	if (keg->uk_free == 0)
854		goto finished;
855
856	slab = LIST_FIRST(&keg->uk_free_slab);
857	while (slab) {
858		n = LIST_NEXT(slab, us_link);
859
860		/* We have no where to free these to */
861		if (slab->us_flags & UMA_SLAB_BOOT) {
862			slab = n;
863			continue;
864		}
865
866		LIST_REMOVE(slab, us_link);
867		keg->uk_pages -= keg->uk_ppera;
868		keg->uk_free -= keg->uk_ipers;
869
870		if (keg->uk_flags & UMA_ZONE_HASH)
871			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
872
873		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
874
875		slab = n;
876	}
877finished:
878	KEG_UNLOCK(keg);
879
880	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
881		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
882		keg_free_slab(keg, slab, keg->uk_ipers);
883	}
884}
885
886static void
887zone_drain_wait(uma_zone_t zone, int waitok)
888{
889
890	/*
891	 * Set draining to interlock with zone_dtor() so we can release our
892	 * locks as we go.  Only dtor() should do a WAITOK call since it
893	 * is the only call that knows the structure will still be available
894	 * when it wakes up.
895	 */
896	ZONE_LOCK(zone);
897	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
898		if (waitok == M_NOWAIT)
899			goto out;
900		mtx_unlock(&uma_mtx);
901		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
902		mtx_lock(&uma_mtx);
903	}
904	zone->uz_flags |= UMA_ZFLAG_DRAINING;
905	bucket_cache_drain(zone);
906	ZONE_UNLOCK(zone);
907	/*
908	 * The DRAINING flag protects us from being freed while
909	 * we're running.  Normally the uma_mtx would protect us but we
910	 * must be able to release and acquire the right lock for each keg.
911	 */
912	zone_foreach_keg(zone, &keg_drain);
913	ZONE_LOCK(zone);
914	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
915	wakeup(zone);
916out:
917	ZONE_UNLOCK(zone);
918}
919
920void
921zone_drain(uma_zone_t zone)
922{
923
924	zone_drain_wait(zone, M_NOWAIT);
925}
926
927/*
928 * Allocate a new slab for a keg.  This does not insert the slab onto a list.
929 *
930 * Arguments:
931 *	wait  Shall we wait?
932 *
933 * Returns:
934 *	The slab that was allocated or NULL if there is no memory and the
935 *	caller specified M_NOWAIT.
936 */
937static uma_slab_t
938keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
939{
940	uma_slabrefcnt_t slabref;
941	uma_alloc allocf;
942	uma_slab_t slab;
943	uint8_t *mem;
944	uint8_t flags;
945	int i;
946
947	mtx_assert(&keg->uk_lock, MA_OWNED);
948	slab = NULL;
949	mem = NULL;
950
951#ifdef UMA_DEBUG
952	printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
953#endif
954	allocf = keg->uk_allocf;
955	KEG_UNLOCK(keg);
956
957	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
958		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
959		if (slab == NULL)
960			goto out;
961	}
962
963	/*
964	 * This reproduces the old vm_zone behavior of zero filling pages the
965	 * first time they are added to a zone.
966	 *
967	 * Malloced items are zeroed in uma_zalloc.
968	 */
969
970	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
971		wait |= M_ZERO;
972	else
973		wait &= ~M_ZERO;
974
975	if (keg->uk_flags & UMA_ZONE_NODUMP)
976		wait |= M_NODUMP;
977
978	/* zone is passed for legacy reasons. */
979	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
980	if (mem == NULL) {
981		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
982			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
983		slab = NULL;
984		goto out;
985	}
986
987	/* Point the slab into the allocated memory */
988	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
989		slab = (uma_slab_t )(mem + keg->uk_pgoff);
990
991	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
992		for (i = 0; i < keg->uk_ppera; i++)
993			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
994
995	slab->us_keg = keg;
996	slab->us_data = mem;
997	slab->us_freecount = keg->uk_ipers;
998	slab->us_flags = flags;
999	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1000#ifdef INVARIANTS
1001	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1002#endif
1003	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1004		slabref = (uma_slabrefcnt_t)slab;
1005		for (i = 0; i < keg->uk_ipers; i++)
1006			slabref->us_refcnt[i] = 0;
1007	}
1008
1009	if (keg->uk_init != NULL) {
1010		for (i = 0; i < keg->uk_ipers; i++)
1011			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1012			    keg->uk_size, wait) != 0)
1013				break;
1014		if (i != keg->uk_ipers) {
1015			keg_free_slab(keg, slab, i);
1016			slab = NULL;
1017			goto out;
1018		}
1019	}
1020out:
1021	KEG_LOCK(keg);
1022
1023	if (slab != NULL) {
1024		if (keg->uk_flags & UMA_ZONE_HASH)
1025			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1026
1027		keg->uk_pages += keg->uk_ppera;
1028		keg->uk_free += keg->uk_ipers;
1029	}
1030
1031	return (slab);
1032}
1033
1034/*
1035 * This function is intended to be used early on in place of page_alloc() so
1036 * that we may use the boot time page cache to satisfy allocations before
1037 * the VM is ready.
1038 */
1039static void *
1040startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1041{
1042	uma_keg_t keg;
1043	uma_slab_t tmps;
1044	int pages, check_pages;
1045
1046	keg = zone_first_keg(zone);
1047	pages = howmany(bytes, PAGE_SIZE);
1048	check_pages = pages - 1;
1049	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1050
1051	/*
1052	 * Check our small startup cache to see if it has pages remaining.
1053	 */
1054	mtx_lock(&uma_boot_pages_mtx);
1055
1056	/* First check if we have enough room. */
1057	tmps = LIST_FIRST(&uma_boot_pages);
1058	while (tmps != NULL && check_pages-- > 0)
1059		tmps = LIST_NEXT(tmps, us_link);
1060	if (tmps != NULL) {
1061		/*
1062		 * It's ok to lose tmps references.  The last one will
1063		 * have tmps->us_data pointing to the start address of
1064		 * "pages" contiguous pages of memory.
1065		 */
1066		while (pages-- > 0) {
1067			tmps = LIST_FIRST(&uma_boot_pages);
1068			LIST_REMOVE(tmps, us_link);
1069		}
1070		mtx_unlock(&uma_boot_pages_mtx);
1071		*pflag = tmps->us_flags;
1072		return (tmps->us_data);
1073	}
1074	mtx_unlock(&uma_boot_pages_mtx);
1075	if (booted < UMA_STARTUP2)
1076		panic("UMA: Increase vm.boot_pages");
1077	/*
1078	 * Now that we've booted reset these users to their real allocator.
1079	 */
1080#ifdef UMA_MD_SMALL_ALLOC
1081	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1082#else
1083	keg->uk_allocf = page_alloc;
1084#endif
1085	return keg->uk_allocf(zone, bytes, pflag, wait);
1086}
1087
1088/*
1089 * Allocates a number of pages from the system
1090 *
1091 * Arguments:
1092 *	bytes  The number of bytes requested
1093 *	wait  Shall we wait?
1094 *
1095 * Returns:
1096 *	A pointer to the alloced memory or possibly
1097 *	NULL if M_NOWAIT is set.
1098 */
1099static void *
1100page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1101{
1102	void *p;	/* Returned page */
1103
1104	*pflag = UMA_SLAB_KMEM;
1105	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1106
1107	return (p);
1108}
1109
1110/*
1111 * Allocates a number of pages from within an object
1112 *
1113 * Arguments:
1114 *	bytes  The number of bytes requested
1115 *	wait   Shall we wait?
1116 *
1117 * Returns:
1118 *	A pointer to the alloced memory or possibly
1119 *	NULL if M_NOWAIT is set.
1120 */
1121static void *
1122noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
1123{
1124	TAILQ_HEAD(, vm_page) alloctail;
1125	u_long npages;
1126	vm_offset_t retkva, zkva;
1127	vm_page_t p, p_next;
1128	uma_keg_t keg;
1129
1130	TAILQ_INIT(&alloctail);
1131	keg = zone_first_keg(zone);
1132
1133	npages = howmany(bytes, PAGE_SIZE);
1134	while (npages > 0) {
1135		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1136		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1137		if (p != NULL) {
1138			/*
1139			 * Since the page does not belong to an object, its
1140			 * listq is unused.
1141			 */
1142			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1143			npages--;
1144			continue;
1145		}
1146		if (wait & M_WAITOK) {
1147			VM_WAIT;
1148			continue;
1149		}
1150
1151		/*
1152		 * Page allocation failed, free intermediate pages and
1153		 * exit.
1154		 */
1155		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1156			vm_page_unwire(p, 0);
1157			vm_page_free(p);
1158		}
1159		return (NULL);
1160	}
1161	*flags = UMA_SLAB_PRIV;
1162	zkva = keg->uk_kva +
1163	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1164	retkva = zkva;
1165	TAILQ_FOREACH(p, &alloctail, listq) {
1166		pmap_qenter(zkva, &p, 1);
1167		zkva += PAGE_SIZE;
1168	}
1169
1170	return ((void *)retkva);
1171}
1172
1173/*
1174 * Frees a number of pages to the system
1175 *
1176 * Arguments:
1177 *	mem   A pointer to the memory to be freed
1178 *	size  The size of the memory being freed
1179 *	flags The original p->us_flags field
1180 *
1181 * Returns:
1182 *	Nothing
1183 */
1184static void
1185page_free(void *mem, int size, uint8_t flags)
1186{
1187	struct vmem *vmem;
1188
1189	if (flags & UMA_SLAB_KMEM)
1190		vmem = kmem_arena;
1191	else if (flags & UMA_SLAB_KERNEL)
1192		vmem = kernel_arena;
1193	else
1194		panic("UMA: page_free used with invalid flags %d", flags);
1195
1196	kmem_free(vmem, (vm_offset_t)mem, size);
1197}
1198
1199/*
1200 * Zero fill initializer
1201 *
1202 * Arguments/Returns follow uma_init specifications
1203 */
1204static int
1205zero_init(void *mem, int size, int flags)
1206{
1207	bzero(mem, size);
1208	return (0);
1209}
1210
1211/*
1212 * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1213 *
1214 * Arguments
1215 *	keg  The zone we should initialize
1216 *
1217 * Returns
1218 *	Nothing
1219 */
1220static void
1221keg_small_init(uma_keg_t keg)
1222{
1223	u_int rsize;
1224	u_int memused;
1225	u_int wastedspace;
1226	u_int shsize;
1227
1228	if (keg->uk_flags & UMA_ZONE_PCPU) {
1229		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1230
1231		keg->uk_slabsize = sizeof(struct pcpu);
1232		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1233		    PAGE_SIZE);
1234	} else {
1235		keg->uk_slabsize = UMA_SLAB_SIZE;
1236		keg->uk_ppera = 1;
1237	}
1238
1239	/*
1240	 * Calculate the size of each allocation (rsize) according to
1241	 * alignment.  If the requested size is smaller than we have
1242	 * allocation bits for we round it up.
1243	 */
1244	rsize = keg->uk_size;
1245	if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1246		rsize = keg->uk_slabsize / SLAB_SETSIZE;
1247	if (rsize & keg->uk_align)
1248		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1249	keg->uk_rsize = rsize;
1250
1251	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1252	    keg->uk_rsize < sizeof(struct pcpu),
1253	    ("%s: size %u too large", __func__, keg->uk_rsize));
1254
1255	if (keg->uk_flags & UMA_ZONE_REFCNT)
1256		rsize += sizeof(uint32_t);
1257
1258	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1259		shsize = 0;
1260	else
1261		shsize = sizeof(struct uma_slab);
1262
1263	keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1264	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1265	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1266
1267	memused = keg->uk_ipers * rsize + shsize;
1268	wastedspace = keg->uk_slabsize - memused;
1269
1270	/*
1271	 * We can't do OFFPAGE if we're internal or if we've been
1272	 * asked to not go to the VM for buckets.  If we do this we
1273	 * may end up going to the VM  for slabs which we do not
1274	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1275	 * of UMA_ZONE_VM, which clearly forbids it.
1276	 */
1277	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1278	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1279		return;
1280
1281	/*
1282	 * See if using an OFFPAGE slab will limit our waste.  Only do
1283	 * this if it permits more items per-slab.
1284	 *
1285	 * XXX We could try growing slabsize to limit max waste as well.
1286	 * Historically this was not done because the VM could not
1287	 * efficiently handle contiguous allocations.
1288	 */
1289	if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1290	    (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1291		keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1292		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1293		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1294#ifdef UMA_DEBUG
1295		printf("UMA decided we need offpage slab headers for "
1296		    "keg: %s, calculated wastedspace = %d, "
1297		    "maximum wasted space allowed = %d, "
1298		    "calculated ipers = %d, "
1299		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1300		    keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1301		    keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1302#endif
1303		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1304	}
1305
1306	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1307	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1308		keg->uk_flags |= UMA_ZONE_HASH;
1309}
1310
1311/*
1312 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1313 * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1314 * more complicated.
1315 *
1316 * Arguments
1317 *	keg  The keg we should initialize
1318 *
1319 * Returns
1320 *	Nothing
1321 */
1322static void
1323keg_large_init(uma_keg_t keg)
1324{
1325	u_int shsize;
1326
1327	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1328	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1329	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1330	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1331	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1332
1333	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1334	keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1335	keg->uk_ipers = 1;
1336	keg->uk_rsize = keg->uk_size;
1337
1338	/* We can't do OFFPAGE if we're internal, bail out here. */
1339	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1340		return;
1341
1342	/* Check whether we have enough space to not do OFFPAGE. */
1343	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1344		shsize = sizeof(struct uma_slab);
1345		if (keg->uk_flags & UMA_ZONE_REFCNT)
1346			shsize += keg->uk_ipers * sizeof(uint32_t);
1347		if (shsize & UMA_ALIGN_PTR)
1348			shsize = (shsize & ~UMA_ALIGN_PTR) +
1349			    (UMA_ALIGN_PTR + 1);
1350
1351		if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1352			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1353	}
1354
1355	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1356	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1357		keg->uk_flags |= UMA_ZONE_HASH;
1358}
1359
1360static void
1361keg_cachespread_init(uma_keg_t keg)
1362{
1363	int alignsize;
1364	int trailer;
1365	int pages;
1366	int rsize;
1367
1368	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1369	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1370
1371	alignsize = keg->uk_align + 1;
1372	rsize = keg->uk_size;
1373	/*
1374	 * We want one item to start on every align boundary in a page.  To
1375	 * do this we will span pages.  We will also extend the item by the
1376	 * size of align if it is an even multiple of align.  Otherwise, it
1377	 * would fall on the same boundary every time.
1378	 */
1379	if (rsize & keg->uk_align)
1380		rsize = (rsize & ~keg->uk_align) + alignsize;
1381	if ((rsize & alignsize) == 0)
1382		rsize += alignsize;
1383	trailer = rsize - keg->uk_size;
1384	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1385	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1386	keg->uk_rsize = rsize;
1387	keg->uk_ppera = pages;
1388	keg->uk_slabsize = UMA_SLAB_SIZE;
1389	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1390	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1391	KASSERT(keg->uk_ipers <= uma_max_ipers,
1392	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1393	    keg->uk_ipers));
1394}
1395
1396/*
1397 * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1398 * the keg onto the global keg list.
1399 *
1400 * Arguments/Returns follow uma_ctor specifications
1401 *	udata  Actually uma_kctor_args
1402 */
1403static int
1404keg_ctor(void *mem, int size, void *udata, int flags)
1405{
1406	struct uma_kctor_args *arg = udata;
1407	uma_keg_t keg = mem;
1408	uma_zone_t zone;
1409
1410	bzero(keg, size);
1411	keg->uk_size = arg->size;
1412	keg->uk_init = arg->uminit;
1413	keg->uk_fini = arg->fini;
1414	keg->uk_align = arg->align;
1415	keg->uk_free = 0;
1416	keg->uk_reserve = 0;
1417	keg->uk_pages = 0;
1418	keg->uk_flags = arg->flags;
1419	keg->uk_allocf = page_alloc;
1420	keg->uk_freef = page_free;
1421	keg->uk_slabzone = NULL;
1422
1423	/*
1424	 * The master zone is passed to us at keg-creation time.
1425	 */
1426	zone = arg->zone;
1427	keg->uk_name = zone->uz_name;
1428
1429	if (arg->flags & UMA_ZONE_VM)
1430		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1431
1432	if (arg->flags & UMA_ZONE_ZINIT)
1433		keg->uk_init = zero_init;
1434
1435	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1436		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1437
1438	if (arg->flags & UMA_ZONE_PCPU)
1439#ifdef SMP
1440		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1441#else
1442		keg->uk_flags &= ~UMA_ZONE_PCPU;
1443#endif
1444
1445	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1446		keg_cachespread_init(keg);
1447	} else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1448		if (keg->uk_size >
1449		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1450		    sizeof(uint32_t)))
1451			keg_large_init(keg);
1452		else
1453			keg_small_init(keg);
1454	} else {
1455		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1456			keg_large_init(keg);
1457		else
1458			keg_small_init(keg);
1459	}
1460
1461	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1462		if (keg->uk_flags & UMA_ZONE_REFCNT) {
1463			if (keg->uk_ipers > uma_max_ipers_ref)
1464				panic("Too many ref items per zone: %d > %d\n",
1465				    keg->uk_ipers, uma_max_ipers_ref);
1466			keg->uk_slabzone = slabrefzone;
1467		} else
1468			keg->uk_slabzone = slabzone;
1469	}
1470
1471	/*
1472	 * If we haven't booted yet we need allocations to go through the
1473	 * startup cache until the vm is ready.
1474	 */
1475	if (keg->uk_ppera == 1) {
1476#ifdef UMA_MD_SMALL_ALLOC
1477		keg->uk_allocf = uma_small_alloc;
1478		keg->uk_freef = uma_small_free;
1479
1480		if (booted < UMA_STARTUP)
1481			keg->uk_allocf = startup_alloc;
1482#else
1483		if (booted < UMA_STARTUP2)
1484			keg->uk_allocf = startup_alloc;
1485#endif
1486	} else if (booted < UMA_STARTUP2 &&
1487	    (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1488		keg->uk_allocf = startup_alloc;
1489
1490	/*
1491	 * Initialize keg's lock
1492	 */
1493	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1494
1495	/*
1496	 * If we're putting the slab header in the actual page we need to
1497	 * figure out where in each page it goes.  This calculates a right
1498	 * justified offset into the memory on an ALIGN_PTR boundary.
1499	 */
1500	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1501		u_int totsize;
1502
1503		/* Size of the slab struct and free list */
1504		totsize = sizeof(struct uma_slab);
1505
1506		/* Size of the reference counts. */
1507		if (keg->uk_flags & UMA_ZONE_REFCNT)
1508			totsize += keg->uk_ipers * sizeof(uint32_t);
1509
1510		if (totsize & UMA_ALIGN_PTR)
1511			totsize = (totsize & ~UMA_ALIGN_PTR) +
1512			    (UMA_ALIGN_PTR + 1);
1513		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1514
1515		/*
1516		 * The only way the following is possible is if with our
1517		 * UMA_ALIGN_PTR adjustments we are now bigger than
1518		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1519		 * mathematically possible for all cases, so we make
1520		 * sure here anyway.
1521		 */
1522		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1523		if (keg->uk_flags & UMA_ZONE_REFCNT)
1524			totsize += keg->uk_ipers * sizeof(uint32_t);
1525		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1526			printf("zone %s ipers %d rsize %d size %d\n",
1527			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1528			    keg->uk_size);
1529			panic("UMA slab won't fit.");
1530		}
1531	}
1532
1533	if (keg->uk_flags & UMA_ZONE_HASH)
1534		hash_alloc(&keg->uk_hash);
1535
1536#ifdef UMA_DEBUG
1537	printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1538	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1539	    keg->uk_ipers, keg->uk_ppera,
1540	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1541#endif
1542
1543	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1544
1545	mtx_lock(&uma_mtx);
1546	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1547	mtx_unlock(&uma_mtx);
1548	return (0);
1549}
1550
1551/*
1552 * Zone header ctor.  This initializes all fields, locks, etc.
1553 *
1554 * Arguments/Returns follow uma_ctor specifications
1555 *	udata  Actually uma_zctor_args
1556 */
1557static int
1558zone_ctor(void *mem, int size, void *udata, int flags)
1559{
1560	struct uma_zctor_args *arg = udata;
1561	uma_zone_t zone = mem;
1562	uma_zone_t z;
1563	uma_keg_t keg;
1564
1565	bzero(zone, size);
1566	zone->uz_name = arg->name;
1567	zone->uz_ctor = arg->ctor;
1568	zone->uz_dtor = arg->dtor;
1569	zone->uz_slab = zone_fetch_slab;
1570	zone->uz_init = NULL;
1571	zone->uz_fini = NULL;
1572	zone->uz_allocs = 0;
1573	zone->uz_frees = 0;
1574	zone->uz_fails = 0;
1575	zone->uz_sleeps = 0;
1576	zone->uz_count = 0;
1577	zone->uz_count_min = 0;
1578	zone->uz_flags = 0;
1579	zone->uz_warning = NULL;
1580	timevalclear(&zone->uz_ratecheck);
1581	keg = arg->keg;
1582
1583	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1584
1585	/*
1586	 * This is a pure cache zone, no kegs.
1587	 */
1588	if (arg->import) {
1589		if (arg->flags & UMA_ZONE_VM)
1590			arg->flags |= UMA_ZFLAG_CACHEONLY;
1591		zone->uz_flags = arg->flags;
1592		zone->uz_size = arg->size;
1593		zone->uz_import = arg->import;
1594		zone->uz_release = arg->release;
1595		zone->uz_arg = arg->arg;
1596		zone->uz_lockptr = &zone->uz_lock;
1597		mtx_lock(&uma_mtx);
1598		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1599		mtx_unlock(&uma_mtx);
1600		goto out;
1601	}
1602
1603	/*
1604	 * Use the regular zone/keg/slab allocator.
1605	 */
1606	zone->uz_import = (uma_import)zone_import;
1607	zone->uz_release = (uma_release)zone_release;
1608	zone->uz_arg = zone;
1609
1610	if (arg->flags & UMA_ZONE_SECONDARY) {
1611		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1612		zone->uz_init = arg->uminit;
1613		zone->uz_fini = arg->fini;
1614		zone->uz_lockptr = &keg->uk_lock;
1615		zone->uz_flags |= UMA_ZONE_SECONDARY;
1616		mtx_lock(&uma_mtx);
1617		ZONE_LOCK(zone);
1618		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1619			if (LIST_NEXT(z, uz_link) == NULL) {
1620				LIST_INSERT_AFTER(z, zone, uz_link);
1621				break;
1622			}
1623		}
1624		ZONE_UNLOCK(zone);
1625		mtx_unlock(&uma_mtx);
1626	} else if (keg == NULL) {
1627		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1628		    arg->align, arg->flags)) == NULL)
1629			return (ENOMEM);
1630	} else {
1631		struct uma_kctor_args karg;
1632		int error;
1633
1634		/* We should only be here from uma_startup() */
1635		karg.size = arg->size;
1636		karg.uminit = arg->uminit;
1637		karg.fini = arg->fini;
1638		karg.align = arg->align;
1639		karg.flags = arg->flags;
1640		karg.zone = zone;
1641		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1642		    flags);
1643		if (error)
1644			return (error);
1645	}
1646
1647	/*
1648	 * Link in the first keg.
1649	 */
1650	zone->uz_klink.kl_keg = keg;
1651	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1652	zone->uz_lockptr = &keg->uk_lock;
1653	zone->uz_size = keg->uk_size;
1654	zone->uz_flags |= (keg->uk_flags &
1655	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1656
1657	/*
1658	 * Some internal zones don't have room allocated for the per cpu
1659	 * caches.  If we're internal, bail out here.
1660	 */
1661	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1662		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1663		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1664		return (0);
1665	}
1666
1667out:
1668	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1669		zone->uz_count = bucket_select(zone->uz_size);
1670	else
1671		zone->uz_count = BUCKET_MAX;
1672	zone->uz_count_min = zone->uz_count;
1673
1674	return (0);
1675}
1676
1677/*
1678 * Keg header dtor.  This frees all data, destroys locks, frees the hash
1679 * table and removes the keg from the global list.
1680 *
1681 * Arguments/Returns follow uma_dtor specifications
1682 *	udata  unused
1683 */
1684static void
1685keg_dtor(void *arg, int size, void *udata)
1686{
1687	uma_keg_t keg;
1688
1689	keg = (uma_keg_t)arg;
1690	KEG_LOCK(keg);
1691	if (keg->uk_free != 0) {
1692		printf("Freed UMA keg (%s) was not empty (%d items). "
1693		    " Lost %d pages of memory.\n",
1694		    keg->uk_name ? keg->uk_name : "",
1695		    keg->uk_free, keg->uk_pages);
1696	}
1697	KEG_UNLOCK(keg);
1698
1699	hash_free(&keg->uk_hash);
1700
1701	KEG_LOCK_FINI(keg);
1702}
1703
1704/*
1705 * Zone header dtor.
1706 *
1707 * Arguments/Returns follow uma_dtor specifications
1708 *	udata  unused
1709 */
1710static void
1711zone_dtor(void *arg, int size, void *udata)
1712{
1713	uma_klink_t klink;
1714	uma_zone_t zone;
1715	uma_keg_t keg;
1716
1717	zone = (uma_zone_t)arg;
1718	keg = zone_first_keg(zone);
1719
1720	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1721		cache_drain(zone);
1722
1723	mtx_lock(&uma_mtx);
1724	LIST_REMOVE(zone, uz_link);
1725	mtx_unlock(&uma_mtx);
1726	/*
1727	 * XXX there are some races here where
1728	 * the zone can be drained but zone lock
1729	 * released and then refilled before we
1730	 * remove it... we dont care for now
1731	 */
1732	zone_drain_wait(zone, M_WAITOK);
1733	/*
1734	 * Unlink all of our kegs.
1735	 */
1736	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1737		klink->kl_keg = NULL;
1738		LIST_REMOVE(klink, kl_link);
1739		if (klink == &zone->uz_klink)
1740			continue;
1741		free(klink, M_TEMP);
1742	}
1743	/*
1744	 * We only destroy kegs from non secondary zones.
1745	 */
1746	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1747		mtx_lock(&uma_mtx);
1748		LIST_REMOVE(keg, uk_link);
1749		mtx_unlock(&uma_mtx);
1750		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1751	}
1752	ZONE_LOCK_FINI(zone);
1753}
1754
1755/*
1756 * Traverses every zone in the system and calls a callback
1757 *
1758 * Arguments:
1759 *	zfunc  A pointer to a function which accepts a zone
1760 *		as an argument.
1761 *
1762 * Returns:
1763 *	Nothing
1764 */
1765static void
1766zone_foreach(void (*zfunc)(uma_zone_t))
1767{
1768	uma_keg_t keg;
1769	uma_zone_t zone;
1770
1771	mtx_lock(&uma_mtx);
1772	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1773		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1774			zfunc(zone);
1775	}
1776	mtx_unlock(&uma_mtx);
1777}
1778
1779/* Public functions */
1780/* See uma.h */
1781void
1782uma_startup(void *bootmem, int boot_pages)
1783{
1784	struct uma_zctor_args args;
1785	uma_slab_t slab;
1786	u_int slabsize;
1787	int i;
1788
1789#ifdef UMA_DEBUG
1790	printf("Creating uma keg headers zone and keg.\n");
1791#endif
1792	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1793
1794	/* "manually" create the initial zone */
1795	memset(&args, 0, sizeof(args));
1796	args.name = "UMA Kegs";
1797	args.size = sizeof(struct uma_keg);
1798	args.ctor = keg_ctor;
1799	args.dtor = keg_dtor;
1800	args.uminit = zero_init;
1801	args.fini = NULL;
1802	args.keg = &masterkeg;
1803	args.align = 32 - 1;
1804	args.flags = UMA_ZFLAG_INTERNAL;
1805	/* The initial zone has no Per cpu queues so it's smaller */
1806	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1807
1808#ifdef UMA_DEBUG
1809	printf("Filling boot free list.\n");
1810#endif
1811	for (i = 0; i < boot_pages; i++) {
1812		slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1813		slab->us_data = (uint8_t *)slab;
1814		slab->us_flags = UMA_SLAB_BOOT;
1815		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1816	}
1817	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1818
1819#ifdef UMA_DEBUG
1820	printf("Creating uma zone headers zone and keg.\n");
1821#endif
1822	args.name = "UMA Zones";
1823	args.size = sizeof(struct uma_zone) +
1824	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1825	args.ctor = zone_ctor;
1826	args.dtor = zone_dtor;
1827	args.uminit = zero_init;
1828	args.fini = NULL;
1829	args.keg = NULL;
1830	args.align = 32 - 1;
1831	args.flags = UMA_ZFLAG_INTERNAL;
1832	/* The initial zone has no Per cpu queues so it's smaller */
1833	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1834
1835#ifdef UMA_DEBUG
1836	printf("Initializing pcpu cache locks.\n");
1837#endif
1838#ifdef UMA_DEBUG
1839	printf("Creating slab and hash zones.\n");
1840#endif
1841
1842	/* Now make a zone for slab headers */
1843	slabzone = uma_zcreate("UMA Slabs",
1844				sizeof(struct uma_slab),
1845				NULL, NULL, NULL, NULL,
1846				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1847
1848	/*
1849	 * We also create a zone for the bigger slabs with reference
1850	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1851	 */
1852	slabsize = sizeof(struct uma_slab_refcnt);
1853	slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1854	slabrefzone = uma_zcreate("UMA RCntSlabs",
1855				  slabsize,
1856				  NULL, NULL, NULL, NULL,
1857				  UMA_ALIGN_PTR,
1858				  UMA_ZFLAG_INTERNAL);
1859
1860	hashzone = uma_zcreate("UMA Hash",
1861	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1862	    NULL, NULL, NULL, NULL,
1863	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1864
1865	bucket_init();
1866
1867	booted = UMA_STARTUP;
1868
1869#ifdef UMA_DEBUG
1870	printf("UMA startup complete.\n");
1871#endif
1872}
1873
1874/* see uma.h */
1875void
1876uma_startup2(void)
1877{
1878	booted = UMA_STARTUP2;
1879	bucket_enable();
1880#ifdef UMA_DEBUG
1881	printf("UMA startup2 complete.\n");
1882#endif
1883}
1884
1885/*
1886 * Initialize our callout handle
1887 *
1888 */
1889
1890static void
1891uma_startup3(void)
1892{
1893#ifdef UMA_DEBUG
1894	printf("Starting callout.\n");
1895#endif
1896	callout_init(&uma_callout, CALLOUT_MPSAFE);
1897	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1898#ifdef UMA_DEBUG
1899	printf("UMA startup3 complete.\n");
1900#endif
1901}
1902
1903static uma_keg_t
1904uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1905		int align, uint32_t flags)
1906{
1907	struct uma_kctor_args args;
1908
1909	args.size = size;
1910	args.uminit = uminit;
1911	args.fini = fini;
1912	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1913	args.flags = flags;
1914	args.zone = zone;
1915	return (zone_alloc_item(kegs, &args, M_WAITOK));
1916}
1917
1918/* See uma.h */
1919void
1920uma_set_align(int align)
1921{
1922
1923	if (align != UMA_ALIGN_CACHE)
1924		uma_align_cache = align;
1925}
1926
1927/* See uma.h */
1928uma_zone_t
1929uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1930		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1931
1932{
1933	struct uma_zctor_args args;
1934
1935	/* This stuff is essential for the zone ctor */
1936	memset(&args, 0, sizeof(args));
1937	args.name = name;
1938	args.size = size;
1939	args.ctor = ctor;
1940	args.dtor = dtor;
1941	args.uminit = uminit;
1942	args.fini = fini;
1943	args.align = align;
1944	args.flags = flags;
1945	args.keg = NULL;
1946
1947	return (zone_alloc_item(zones, &args, M_WAITOK));
1948}
1949
1950/* See uma.h */
1951uma_zone_t
1952uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1953		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1954{
1955	struct uma_zctor_args args;
1956	uma_keg_t keg;
1957
1958	keg = zone_first_keg(master);
1959	memset(&args, 0, sizeof(args));
1960	args.name = name;
1961	args.size = keg->uk_size;
1962	args.ctor = ctor;
1963	args.dtor = dtor;
1964	args.uminit = zinit;
1965	args.fini = zfini;
1966	args.align = keg->uk_align;
1967	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1968	args.keg = keg;
1969
1970	/* XXX Attaches only one keg of potentially many. */
1971	return (zone_alloc_item(zones, &args, M_WAITOK));
1972}
1973
1974/* See uma.h */
1975uma_zone_t
1976uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1977		    uma_init zinit, uma_fini zfini, uma_import zimport,
1978		    uma_release zrelease, void *arg, int flags)
1979{
1980	struct uma_zctor_args args;
1981
1982	memset(&args, 0, sizeof(args));
1983	args.name = name;
1984	args.size = size;
1985	args.ctor = ctor;
1986	args.dtor = dtor;
1987	args.uminit = zinit;
1988	args.fini = zfini;
1989	args.import = zimport;
1990	args.release = zrelease;
1991	args.arg = arg;
1992	args.align = 0;
1993	args.flags = flags;
1994
1995	return (zone_alloc_item(zones, &args, M_WAITOK));
1996}
1997
1998static void
1999zone_lock_pair(uma_zone_t a, uma_zone_t b)
2000{
2001	if (a < b) {
2002		ZONE_LOCK(a);
2003		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2004	} else {
2005		ZONE_LOCK(b);
2006		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2007	}
2008}
2009
2010static void
2011zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2012{
2013
2014	ZONE_UNLOCK(a);
2015	ZONE_UNLOCK(b);
2016}
2017
2018int
2019uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2020{
2021	uma_klink_t klink;
2022	uma_klink_t kl;
2023	int error;
2024
2025	error = 0;
2026	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2027
2028	zone_lock_pair(zone, master);
2029	/*
2030	 * zone must use vtoslab() to resolve objects and must already be
2031	 * a secondary.
2032	 */
2033	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2034	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2035		error = EINVAL;
2036		goto out;
2037	}
2038	/*
2039	 * The new master must also use vtoslab().
2040	 */
2041	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2042		error = EINVAL;
2043		goto out;
2044	}
2045	/*
2046	 * Both must either be refcnt, or not be refcnt.
2047	 */
2048	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
2049	    (master->uz_flags & UMA_ZONE_REFCNT)) {
2050		error = EINVAL;
2051		goto out;
2052	}
2053	/*
2054	 * The underlying object must be the same size.  rsize
2055	 * may be different.
2056	 */
2057	if (master->uz_size != zone->uz_size) {
2058		error = E2BIG;
2059		goto out;
2060	}
2061	/*
2062	 * Put it at the end of the list.
2063	 */
2064	klink->kl_keg = zone_first_keg(master);
2065	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2066		if (LIST_NEXT(kl, kl_link) == NULL) {
2067			LIST_INSERT_AFTER(kl, klink, kl_link);
2068			break;
2069		}
2070	}
2071	klink = NULL;
2072	zone->uz_flags |= UMA_ZFLAG_MULTI;
2073	zone->uz_slab = zone_fetch_slab_multi;
2074
2075out:
2076	zone_unlock_pair(zone, master);
2077	if (klink != NULL)
2078		free(klink, M_TEMP);
2079
2080	return (error);
2081}
2082
2083
2084/* See uma.h */
2085void
2086uma_zdestroy(uma_zone_t zone)
2087{
2088
2089	zone_free_item(zones, zone, NULL, SKIP_NONE);
2090}
2091
2092/* See uma.h */
2093void *
2094uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2095{
2096	void *item;
2097	uma_cache_t cache;
2098	uma_bucket_t bucket;
2099	int lockfail;
2100	int cpu;
2101
2102	/* This is the fast path allocation */
2103#ifdef UMA_DEBUG_ALLOC_1
2104	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2105#endif
2106	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2107	    zone->uz_name, flags);
2108
2109	if (flags & M_WAITOK) {
2110		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2111		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2112	}
2113#ifdef DEBUG_MEMGUARD
2114	if (memguard_cmp_zone(zone)) {
2115		item = memguard_alloc(zone->uz_size, flags);
2116		if (item != NULL) {
2117			/*
2118			 * Avoid conflict with the use-after-free
2119			 * protecting infrastructure from INVARIANTS.
2120			 */
2121			if (zone->uz_init != NULL &&
2122			    zone->uz_init != mtrash_init &&
2123			    zone->uz_init(item, zone->uz_size, flags) != 0)
2124				return (NULL);
2125			if (zone->uz_ctor != NULL &&
2126			    zone->uz_ctor != mtrash_ctor &&
2127			    zone->uz_ctor(item, zone->uz_size, udata,
2128			    flags) != 0) {
2129			    	zone->uz_fini(item, zone->uz_size);
2130				return (NULL);
2131			}
2132			return (item);
2133		}
2134		/* This is unfortunate but should not be fatal. */
2135	}
2136#endif
2137	/*
2138	 * If possible, allocate from the per-CPU cache.  There are two
2139	 * requirements for safe access to the per-CPU cache: (1) the thread
2140	 * accessing the cache must not be preempted or yield during access,
2141	 * and (2) the thread must not migrate CPUs without switching which
2142	 * cache it accesses.  We rely on a critical section to prevent
2143	 * preemption and migration.  We release the critical section in
2144	 * order to acquire the zone mutex if we are unable to allocate from
2145	 * the current cache; when we re-acquire the critical section, we
2146	 * must detect and handle migration if it has occurred.
2147	 */
2148	critical_enter();
2149	cpu = curcpu;
2150	cache = &zone->uz_cpu[cpu];
2151
2152zalloc_start:
2153	bucket = cache->uc_allocbucket;
2154	if (bucket != NULL && bucket->ub_cnt > 0) {
2155		bucket->ub_cnt--;
2156		item = bucket->ub_bucket[bucket->ub_cnt];
2157#ifdef INVARIANTS
2158		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2159#endif
2160		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2161		cache->uc_allocs++;
2162		critical_exit();
2163		if (zone->uz_ctor != NULL &&
2164		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2165			atomic_add_long(&zone->uz_fails, 1);
2166			zone_free_item(zone, item, udata, SKIP_DTOR);
2167			return (NULL);
2168		}
2169#ifdef INVARIANTS
2170		uma_dbg_alloc(zone, NULL, item);
2171#endif
2172		if (flags & M_ZERO)
2173			bzero(item, zone->uz_size);
2174		return (item);
2175	}
2176
2177	/*
2178	 * We have run out of items in our alloc bucket.
2179	 * See if we can switch with our free bucket.
2180	 */
2181	bucket = cache->uc_freebucket;
2182	if (bucket != NULL && bucket->ub_cnt > 0) {
2183#ifdef UMA_DEBUG_ALLOC
2184		printf("uma_zalloc: Swapping empty with alloc.\n");
2185#endif
2186		cache->uc_freebucket = cache->uc_allocbucket;
2187		cache->uc_allocbucket = bucket;
2188		goto zalloc_start;
2189	}
2190
2191	/*
2192	 * Discard any empty allocation bucket while we hold no locks.
2193	 */
2194	bucket = cache->uc_allocbucket;
2195	cache->uc_allocbucket = NULL;
2196	critical_exit();
2197	if (bucket != NULL)
2198		bucket_free(zone, bucket, udata);
2199
2200	/* Short-circuit for zones without buckets and low memory. */
2201	if (zone->uz_count == 0 || bucketdisable)
2202		goto zalloc_item;
2203
2204	/*
2205	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2206	 * we must go back to the zone.  This requires the zone lock, so we
2207	 * must drop the critical section, then re-acquire it when we go back
2208	 * to the cache.  Since the critical section is released, we may be
2209	 * preempted or migrate.  As such, make sure not to maintain any
2210	 * thread-local state specific to the cache from prior to releasing
2211	 * the critical section.
2212	 */
2213	lockfail = 0;
2214	if (ZONE_TRYLOCK(zone) == 0) {
2215		/* Record contention to size the buckets. */
2216		ZONE_LOCK(zone);
2217		lockfail = 1;
2218	}
2219	critical_enter();
2220	cpu = curcpu;
2221	cache = &zone->uz_cpu[cpu];
2222
2223	/*
2224	 * Since we have locked the zone we may as well send back our stats.
2225	 */
2226	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2227	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2228	cache->uc_allocs = 0;
2229	cache->uc_frees = 0;
2230
2231	/* See if we lost the race to fill the cache. */
2232	if (cache->uc_allocbucket != NULL) {
2233		ZONE_UNLOCK(zone);
2234		goto zalloc_start;
2235	}
2236
2237	/*
2238	 * Check the zone's cache of buckets.
2239	 */
2240	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2241		KASSERT(bucket->ub_cnt != 0,
2242		    ("uma_zalloc_arg: Returning an empty bucket."));
2243
2244		LIST_REMOVE(bucket, ub_link);
2245		cache->uc_allocbucket = bucket;
2246		ZONE_UNLOCK(zone);
2247		goto zalloc_start;
2248	}
2249	/* We are no longer associated with this CPU. */
2250	critical_exit();
2251
2252	/*
2253	 * We bump the uz count when the cache size is insufficient to
2254	 * handle the working set.
2255	 */
2256	if (lockfail && zone->uz_count < BUCKET_MAX)
2257		zone->uz_count++;
2258	ZONE_UNLOCK(zone);
2259
2260	/*
2261	 * Now lets just fill a bucket and put it on the free list.  If that
2262	 * works we'll restart the allocation from the begining and it
2263	 * will use the just filled bucket.
2264	 */
2265	bucket = zone_alloc_bucket(zone, udata, flags);
2266	if (bucket != NULL) {
2267		ZONE_LOCK(zone);
2268		critical_enter();
2269		cpu = curcpu;
2270		cache = &zone->uz_cpu[cpu];
2271		/*
2272		 * See if we lost the race or were migrated.  Cache the
2273		 * initialized bucket to make this less likely or claim
2274		 * the memory directly.
2275		 */
2276		if (cache->uc_allocbucket == NULL)
2277			cache->uc_allocbucket = bucket;
2278		else
2279			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2280		ZONE_UNLOCK(zone);
2281		goto zalloc_start;
2282	}
2283
2284	/*
2285	 * We may not be able to get a bucket so return an actual item.
2286	 */
2287#ifdef UMA_DEBUG
2288	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2289#endif
2290
2291zalloc_item:
2292	item = zone_alloc_item(zone, udata, flags);
2293
2294	return (item);
2295}
2296
2297static uma_slab_t
2298keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2299{
2300	uma_slab_t slab;
2301	int reserve;
2302
2303	mtx_assert(&keg->uk_lock, MA_OWNED);
2304	slab = NULL;
2305	reserve = 0;
2306	if ((flags & M_USE_RESERVE) == 0)
2307		reserve = keg->uk_reserve;
2308
2309	for (;;) {
2310		/*
2311		 * Find a slab with some space.  Prefer slabs that are partially
2312		 * used over those that are totally full.  This helps to reduce
2313		 * fragmentation.
2314		 */
2315		if (keg->uk_free > reserve) {
2316			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2317				slab = LIST_FIRST(&keg->uk_part_slab);
2318			} else {
2319				slab = LIST_FIRST(&keg->uk_free_slab);
2320				LIST_REMOVE(slab, us_link);
2321				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2322				    us_link);
2323			}
2324			MPASS(slab->us_keg == keg);
2325			return (slab);
2326		}
2327
2328		/*
2329		 * M_NOVM means don't ask at all!
2330		 */
2331		if (flags & M_NOVM)
2332			break;
2333
2334		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2335			keg->uk_flags |= UMA_ZFLAG_FULL;
2336			/*
2337			 * If this is not a multi-zone, set the FULL bit.
2338			 * Otherwise slab_multi() takes care of it.
2339			 */
2340			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2341				zone->uz_flags |= UMA_ZFLAG_FULL;
2342				zone_log_warning(zone);
2343			}
2344			if (flags & M_NOWAIT)
2345				break;
2346			zone->uz_sleeps++;
2347			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2348			continue;
2349		}
2350		slab = keg_alloc_slab(keg, zone, flags);
2351		/*
2352		 * If we got a slab here it's safe to mark it partially used
2353		 * and return.  We assume that the caller is going to remove
2354		 * at least one item.
2355		 */
2356		if (slab) {
2357			MPASS(slab->us_keg == keg);
2358			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2359			return (slab);
2360		}
2361		/*
2362		 * We might not have been able to get a slab but another cpu
2363		 * could have while we were unlocked.  Check again before we
2364		 * fail.
2365		 */
2366		flags |= M_NOVM;
2367	}
2368	return (slab);
2369}
2370
2371static uma_slab_t
2372zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2373{
2374	uma_slab_t slab;
2375
2376	if (keg == NULL) {
2377		keg = zone_first_keg(zone);
2378		KEG_LOCK(keg);
2379	}
2380
2381	for (;;) {
2382		slab = keg_fetch_slab(keg, zone, flags);
2383		if (slab)
2384			return (slab);
2385		if (flags & (M_NOWAIT | M_NOVM))
2386			break;
2387	}
2388	KEG_UNLOCK(keg);
2389	return (NULL);
2390}
2391
2392/*
2393 * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2394 * with the keg locked.  On NULL no lock is held.
2395 *
2396 * The last pointer is used to seed the search.  It is not required.
2397 */
2398static uma_slab_t
2399zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2400{
2401	uma_klink_t klink;
2402	uma_slab_t slab;
2403	uma_keg_t keg;
2404	int flags;
2405	int empty;
2406	int full;
2407
2408	/*
2409	 * Don't wait on the first pass.  This will skip limit tests
2410	 * as well.  We don't want to block if we can find a provider
2411	 * without blocking.
2412	 */
2413	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2414	/*
2415	 * Use the last slab allocated as a hint for where to start
2416	 * the search.
2417	 */
2418	if (last != NULL) {
2419		slab = keg_fetch_slab(last, zone, flags);
2420		if (slab)
2421			return (slab);
2422		KEG_UNLOCK(last);
2423	}
2424	/*
2425	 * Loop until we have a slab incase of transient failures
2426	 * while M_WAITOK is specified.  I'm not sure this is 100%
2427	 * required but we've done it for so long now.
2428	 */
2429	for (;;) {
2430		empty = 0;
2431		full = 0;
2432		/*
2433		 * Search the available kegs for slabs.  Be careful to hold the
2434		 * correct lock while calling into the keg layer.
2435		 */
2436		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2437			keg = klink->kl_keg;
2438			KEG_LOCK(keg);
2439			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2440				slab = keg_fetch_slab(keg, zone, flags);
2441				if (slab)
2442					return (slab);
2443			}
2444			if (keg->uk_flags & UMA_ZFLAG_FULL)
2445				full++;
2446			else
2447				empty++;
2448			KEG_UNLOCK(keg);
2449		}
2450		if (rflags & (M_NOWAIT | M_NOVM))
2451			break;
2452		flags = rflags;
2453		/*
2454		 * All kegs are full.  XXX We can't atomically check all kegs
2455		 * and sleep so just sleep for a short period and retry.
2456		 */
2457		if (full && !empty) {
2458			ZONE_LOCK(zone);
2459			zone->uz_flags |= UMA_ZFLAG_FULL;
2460			zone->uz_sleeps++;
2461			zone_log_warning(zone);
2462			msleep(zone, zone->uz_lockptr, PVM,
2463			    "zonelimit", hz/100);
2464			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2465			ZONE_UNLOCK(zone);
2466			continue;
2467		}
2468	}
2469	return (NULL);
2470}
2471
2472static void *
2473slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2474{
2475	void *item;
2476	uint8_t freei;
2477
2478	MPASS(keg == slab->us_keg);
2479	mtx_assert(&keg->uk_lock, MA_OWNED);
2480
2481	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2482	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2483	item = slab->us_data + (keg->uk_rsize * freei);
2484	slab->us_freecount--;
2485	keg->uk_free--;
2486
2487	/* Move this slab to the full list */
2488	if (slab->us_freecount == 0) {
2489		LIST_REMOVE(slab, us_link);
2490		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2491	}
2492
2493	return (item);
2494}
2495
2496static int
2497zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2498{
2499	uma_slab_t slab;
2500	uma_keg_t keg;
2501	int i;
2502
2503	slab = NULL;
2504	keg = NULL;
2505	/* Try to keep the buckets totally full */
2506	for (i = 0; i < max; ) {
2507		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2508			break;
2509		keg = slab->us_keg;
2510		while (slab->us_freecount && i < max) {
2511			bucket[i++] = slab_alloc_item(keg, slab);
2512			if (keg->uk_free <= keg->uk_reserve)
2513				break;
2514		}
2515		/* Don't grab more than one slab at a time. */
2516		flags &= ~M_WAITOK;
2517		flags |= M_NOWAIT;
2518	}
2519	if (slab != NULL)
2520		KEG_UNLOCK(keg);
2521
2522	return i;
2523}
2524
2525static uma_bucket_t
2526zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2527{
2528	uma_bucket_t bucket;
2529	int max;
2530
2531	/* Don't wait for buckets, preserve caller's NOVM setting. */
2532	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2533	if (bucket == NULL)
2534		return (NULL);
2535
2536	max = MIN(bucket->ub_entries, zone->uz_count);
2537	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2538	    max, flags);
2539
2540	/*
2541	 * Initialize the memory if necessary.
2542	 */
2543	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2544		int i;
2545
2546		for (i = 0; i < bucket->ub_cnt; i++)
2547			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2548			    flags) != 0)
2549				break;
2550		/*
2551		 * If we couldn't initialize the whole bucket, put the
2552		 * rest back onto the freelist.
2553		 */
2554		if (i != bucket->ub_cnt) {
2555			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2556			    bucket->ub_cnt - i);
2557#ifdef INVARIANTS
2558			bzero(&bucket->ub_bucket[i],
2559			    sizeof(void *) * (bucket->ub_cnt - i));
2560#endif
2561			bucket->ub_cnt = i;
2562		}
2563	}
2564
2565	if (bucket->ub_cnt == 0) {
2566		bucket_free(zone, bucket, udata);
2567		atomic_add_long(&zone->uz_fails, 1);
2568		return (NULL);
2569	}
2570
2571	return (bucket);
2572}
2573
2574/*
2575 * Allocates a single item from a zone.
2576 *
2577 * Arguments
2578 *	zone   The zone to alloc for.
2579 *	udata  The data to be passed to the constructor.
2580 *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2581 *
2582 * Returns
2583 *	NULL if there is no memory and M_NOWAIT is set
2584 *	An item if successful
2585 */
2586
2587static void *
2588zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2589{
2590	void *item;
2591
2592	item = NULL;
2593
2594#ifdef UMA_DEBUG_ALLOC
2595	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2596#endif
2597	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2598		goto fail;
2599	atomic_add_long(&zone->uz_allocs, 1);
2600
2601	/*
2602	 * We have to call both the zone's init (not the keg's init)
2603	 * and the zone's ctor.  This is because the item is going from
2604	 * a keg slab directly to the user, and the user is expecting it
2605	 * to be both zone-init'd as well as zone-ctor'd.
2606	 */
2607	if (zone->uz_init != NULL) {
2608		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2609			zone_free_item(zone, item, udata, SKIP_FINI);
2610			goto fail;
2611		}
2612	}
2613	if (zone->uz_ctor != NULL) {
2614		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2615			zone_free_item(zone, item, udata, SKIP_DTOR);
2616			goto fail;
2617		}
2618	}
2619#ifdef INVARIANTS
2620	uma_dbg_alloc(zone, NULL, item);
2621#endif
2622	if (flags & M_ZERO)
2623		bzero(item, zone->uz_size);
2624
2625	return (item);
2626
2627fail:
2628	atomic_add_long(&zone->uz_fails, 1);
2629	return (NULL);
2630}
2631
2632/* See uma.h */
2633void
2634uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2635{
2636	uma_cache_t cache;
2637	uma_bucket_t bucket;
2638	int lockfail;
2639	int cpu;
2640
2641#ifdef UMA_DEBUG_ALLOC_1
2642	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2643#endif
2644	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2645	    zone->uz_name);
2646
2647        /* uma_zfree(..., NULL) does nothing, to match free(9). */
2648        if (item == NULL)
2649                return;
2650#ifdef DEBUG_MEMGUARD
2651	if (is_memguard_addr(item)) {
2652		if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2653			zone->uz_dtor(item, zone->uz_size, udata);
2654		if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2655			zone->uz_fini(item, zone->uz_size);
2656		memguard_free(item);
2657		return;
2658	}
2659#endif
2660#ifdef INVARIANTS
2661	if (zone->uz_flags & UMA_ZONE_MALLOC)
2662		uma_dbg_free(zone, udata, item);
2663	else
2664		uma_dbg_free(zone, NULL, item);
2665#endif
2666	if (zone->uz_dtor != NULL)
2667		zone->uz_dtor(item, zone->uz_size, udata);
2668
2669	/*
2670	 * The race here is acceptable.  If we miss it we'll just have to wait
2671	 * a little longer for the limits to be reset.
2672	 */
2673	if (zone->uz_flags & UMA_ZFLAG_FULL)
2674		goto zfree_item;
2675
2676	/*
2677	 * If possible, free to the per-CPU cache.  There are two
2678	 * requirements for safe access to the per-CPU cache: (1) the thread
2679	 * accessing the cache must not be preempted or yield during access,
2680	 * and (2) the thread must not migrate CPUs without switching which
2681	 * cache it accesses.  We rely on a critical section to prevent
2682	 * preemption and migration.  We release the critical section in
2683	 * order to acquire the zone mutex if we are unable to free to the
2684	 * current cache; when we re-acquire the critical section, we must
2685	 * detect and handle migration if it has occurred.
2686	 */
2687zfree_restart:
2688	critical_enter();
2689	cpu = curcpu;
2690	cache = &zone->uz_cpu[cpu];
2691
2692zfree_start:
2693	/*
2694	 * Try to free into the allocbucket first to give LIFO ordering
2695	 * for cache-hot datastructures.  Spill over into the freebucket
2696	 * if necessary.  Alloc will swap them if one runs dry.
2697	 */
2698	bucket = cache->uc_allocbucket;
2699	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2700		bucket = cache->uc_freebucket;
2701	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2702		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2703		    ("uma_zfree: Freeing to non free bucket index."));
2704		bucket->ub_bucket[bucket->ub_cnt] = item;
2705		bucket->ub_cnt++;
2706		cache->uc_frees++;
2707		critical_exit();
2708		return;
2709	}
2710
2711	/*
2712	 * We must go back the zone, which requires acquiring the zone lock,
2713	 * which in turn means we must release and re-acquire the critical
2714	 * section.  Since the critical section is released, we may be
2715	 * preempted or migrate.  As such, make sure not to maintain any
2716	 * thread-local state specific to the cache from prior to releasing
2717	 * the critical section.
2718	 */
2719	critical_exit();
2720	if (zone->uz_count == 0 || bucketdisable)
2721		goto zfree_item;
2722
2723	lockfail = 0;
2724	if (ZONE_TRYLOCK(zone) == 0) {
2725		/* Record contention to size the buckets. */
2726		ZONE_LOCK(zone);
2727		lockfail = 1;
2728	}
2729	critical_enter();
2730	cpu = curcpu;
2731	cache = &zone->uz_cpu[cpu];
2732
2733	/*
2734	 * Since we have locked the zone we may as well send back our stats.
2735	 */
2736	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2737	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2738	cache->uc_allocs = 0;
2739	cache->uc_frees = 0;
2740
2741	bucket = cache->uc_freebucket;
2742	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2743		ZONE_UNLOCK(zone);
2744		goto zfree_start;
2745	}
2746	cache->uc_freebucket = NULL;
2747
2748	/* Can we throw this on the zone full list? */
2749	if (bucket != NULL) {
2750#ifdef UMA_DEBUG_ALLOC
2751		printf("uma_zfree: Putting old bucket on the free list.\n");
2752#endif
2753		/* ub_cnt is pointing to the last free item */
2754		KASSERT(bucket->ub_cnt != 0,
2755		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2756		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2757	}
2758
2759	/* We are no longer associated with this CPU. */
2760	critical_exit();
2761
2762	/*
2763	 * We bump the uz count when the cache size is insufficient to
2764	 * handle the working set.
2765	 */
2766	if (lockfail && zone->uz_count < BUCKET_MAX)
2767		zone->uz_count++;
2768	ZONE_UNLOCK(zone);
2769
2770#ifdef UMA_DEBUG_ALLOC
2771	printf("uma_zfree: Allocating new free bucket.\n");
2772#endif
2773	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2774	if (bucket) {
2775		critical_enter();
2776		cpu = curcpu;
2777		cache = &zone->uz_cpu[cpu];
2778		if (cache->uc_freebucket == NULL) {
2779			cache->uc_freebucket = bucket;
2780			goto zfree_start;
2781		}
2782		/*
2783		 * We lost the race, start over.  We have to drop our
2784		 * critical section to free the bucket.
2785		 */
2786		critical_exit();
2787		bucket_free(zone, bucket, udata);
2788		goto zfree_restart;
2789	}
2790
2791	/*
2792	 * If nothing else caught this, we'll just do an internal free.
2793	 */
2794zfree_item:
2795	zone_free_item(zone, item, udata, SKIP_DTOR);
2796
2797	return;
2798}
2799
2800static void
2801slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2802{
2803	uint8_t freei;
2804
2805	mtx_assert(&keg->uk_lock, MA_OWNED);
2806	MPASS(keg == slab->us_keg);
2807
2808	/* Do we need to remove from any lists? */
2809	if (slab->us_freecount+1 == keg->uk_ipers) {
2810		LIST_REMOVE(slab, us_link);
2811		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2812	} else if (slab->us_freecount == 0) {
2813		LIST_REMOVE(slab, us_link);
2814		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2815	}
2816
2817	/* Slab management. */
2818	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2819	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2820	slab->us_freecount++;
2821
2822	/* Keg statistics. */
2823	keg->uk_free++;
2824}
2825
2826static void
2827zone_release(uma_zone_t zone, void **bucket, int cnt)
2828{
2829	void *item;
2830	uma_slab_t slab;
2831	uma_keg_t keg;
2832	uint8_t *mem;
2833	int clearfull;
2834	int i;
2835
2836	clearfull = 0;
2837	keg = zone_first_keg(zone);
2838	KEG_LOCK(keg);
2839	for (i = 0; i < cnt; i++) {
2840		item = bucket[i];
2841		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2842			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2843			if (zone->uz_flags & UMA_ZONE_HASH) {
2844				slab = hash_sfind(&keg->uk_hash, mem);
2845			} else {
2846				mem += keg->uk_pgoff;
2847				slab = (uma_slab_t)mem;
2848			}
2849		} else {
2850			slab = vtoslab((vm_offset_t)item);
2851			if (slab->us_keg != keg) {
2852				KEG_UNLOCK(keg);
2853				keg = slab->us_keg;
2854				KEG_LOCK(keg);
2855			}
2856		}
2857		slab_free_item(keg, slab, item);
2858		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2859			if (keg->uk_pages < keg->uk_maxpages) {
2860				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2861				clearfull = 1;
2862			}
2863
2864			/*
2865			 * We can handle one more allocation. Since we're
2866			 * clearing ZFLAG_FULL, wake up all procs blocked
2867			 * on pages. This should be uncommon, so keeping this
2868			 * simple for now (rather than adding count of blocked
2869			 * threads etc).
2870			 */
2871			wakeup(keg);
2872		}
2873	}
2874	KEG_UNLOCK(keg);
2875	if (clearfull) {
2876		ZONE_LOCK(zone);
2877		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2878		wakeup(zone);
2879		ZONE_UNLOCK(zone);
2880	}
2881
2882}
2883
2884/*
2885 * Frees a single item to any zone.
2886 *
2887 * Arguments:
2888 *	zone   The zone to free to
2889 *	item   The item we're freeing
2890 *	udata  User supplied data for the dtor
2891 *	skip   Skip dtors and finis
2892 */
2893static void
2894zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2895{
2896
2897#ifdef INVARIANTS
2898	if (skip == SKIP_NONE) {
2899		if (zone->uz_flags & UMA_ZONE_MALLOC)
2900			uma_dbg_free(zone, udata, item);
2901		else
2902			uma_dbg_free(zone, NULL, item);
2903	}
2904#endif
2905	if (skip < SKIP_DTOR && zone->uz_dtor)
2906		zone->uz_dtor(item, zone->uz_size, udata);
2907
2908	if (skip < SKIP_FINI && zone->uz_fini)
2909		zone->uz_fini(item, zone->uz_size);
2910
2911	atomic_add_long(&zone->uz_frees, 1);
2912	zone->uz_release(zone->uz_arg, &item, 1);
2913}
2914
2915/* See uma.h */
2916int
2917uma_zone_set_max(uma_zone_t zone, int nitems)
2918{
2919	uma_keg_t keg;
2920
2921	keg = zone_first_keg(zone);
2922	if (keg == NULL)
2923		return (0);
2924	KEG_LOCK(keg);
2925	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2926	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2927		keg->uk_maxpages += keg->uk_ppera;
2928	nitems = keg->uk_maxpages * keg->uk_ipers;
2929	KEG_UNLOCK(keg);
2930
2931	return (nitems);
2932}
2933
2934/* See uma.h */
2935int
2936uma_zone_get_max(uma_zone_t zone)
2937{
2938	int nitems;
2939	uma_keg_t keg;
2940
2941	keg = zone_first_keg(zone);
2942	if (keg == NULL)
2943		return (0);
2944	KEG_LOCK(keg);
2945	nitems = keg->uk_maxpages * keg->uk_ipers;
2946	KEG_UNLOCK(keg);
2947
2948	return (nitems);
2949}
2950
2951/* See uma.h */
2952void
2953uma_zone_set_warning(uma_zone_t zone, const char *warning)
2954{
2955
2956	ZONE_LOCK(zone);
2957	zone->uz_warning = warning;
2958	ZONE_UNLOCK(zone);
2959}
2960
2961/* See uma.h */
2962int
2963uma_zone_get_cur(uma_zone_t zone)
2964{
2965	int64_t nitems;
2966	u_int i;
2967
2968	ZONE_LOCK(zone);
2969	nitems = zone->uz_allocs - zone->uz_frees;
2970	CPU_FOREACH(i) {
2971		/*
2972		 * See the comment in sysctl_vm_zone_stats() regarding the
2973		 * safety of accessing the per-cpu caches. With the zone lock
2974		 * held, it is safe, but can potentially result in stale data.
2975		 */
2976		nitems += zone->uz_cpu[i].uc_allocs -
2977		    zone->uz_cpu[i].uc_frees;
2978	}
2979	ZONE_UNLOCK(zone);
2980
2981	return (nitems < 0 ? 0 : nitems);
2982}
2983
2984/* See uma.h */
2985void
2986uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2987{
2988	uma_keg_t keg;
2989
2990	keg = zone_first_keg(zone);
2991	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2992	KEG_LOCK(keg);
2993	KASSERT(keg->uk_pages == 0,
2994	    ("uma_zone_set_init on non-empty keg"));
2995	keg->uk_init = uminit;
2996	KEG_UNLOCK(keg);
2997}
2998
2999/* See uma.h */
3000void
3001uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3002{
3003	uma_keg_t keg;
3004
3005	keg = zone_first_keg(zone);
3006	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3007	KEG_LOCK(keg);
3008	KASSERT(keg->uk_pages == 0,
3009	    ("uma_zone_set_fini on non-empty keg"));
3010	keg->uk_fini = fini;
3011	KEG_UNLOCK(keg);
3012}
3013
3014/* See uma.h */
3015void
3016uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3017{
3018
3019	ZONE_LOCK(zone);
3020	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3021	    ("uma_zone_set_zinit on non-empty keg"));
3022	zone->uz_init = zinit;
3023	ZONE_UNLOCK(zone);
3024}
3025
3026/* See uma.h */
3027void
3028uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3029{
3030
3031	ZONE_LOCK(zone);
3032	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3033	    ("uma_zone_set_zfini on non-empty keg"));
3034	zone->uz_fini = zfini;
3035	ZONE_UNLOCK(zone);
3036}
3037
3038/* See uma.h */
3039/* XXX uk_freef is not actually used with the zone locked */
3040void
3041uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3042{
3043	uma_keg_t keg;
3044
3045	keg = zone_first_keg(zone);
3046	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3047	KEG_LOCK(keg);
3048	keg->uk_freef = freef;
3049	KEG_UNLOCK(keg);
3050}
3051
3052/* See uma.h */
3053/* XXX uk_allocf is not actually used with the zone locked */
3054void
3055uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3056{
3057	uma_keg_t keg;
3058
3059	keg = zone_first_keg(zone);
3060	KEG_LOCK(keg);
3061	keg->uk_allocf = allocf;
3062	KEG_UNLOCK(keg);
3063}
3064
3065/* See uma.h */
3066void
3067uma_zone_reserve(uma_zone_t zone, int items)
3068{
3069	uma_keg_t keg;
3070
3071	keg = zone_first_keg(zone);
3072	if (keg == NULL)
3073		return;
3074	KEG_LOCK(keg);
3075	keg->uk_reserve = items;
3076	KEG_UNLOCK(keg);
3077
3078	return;
3079}
3080
3081/* See uma.h */
3082int
3083uma_zone_reserve_kva(uma_zone_t zone, int count)
3084{
3085	uma_keg_t keg;
3086	vm_offset_t kva;
3087	int pages;
3088
3089	keg = zone_first_keg(zone);
3090	if (keg == NULL)
3091		return (0);
3092	pages = count / keg->uk_ipers;
3093
3094	if (pages * keg->uk_ipers < count)
3095		pages++;
3096
3097#ifdef UMA_MD_SMALL_ALLOC
3098	if (keg->uk_ppera > 1) {
3099#else
3100	if (1) {
3101#endif
3102		kva = kva_alloc(pages * UMA_SLAB_SIZE);
3103		if (kva == 0)
3104			return (0);
3105	} else
3106		kva = 0;
3107	KEG_LOCK(keg);
3108	keg->uk_kva = kva;
3109	keg->uk_offset = 0;
3110	keg->uk_maxpages = pages;
3111#ifdef UMA_MD_SMALL_ALLOC
3112	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3113#else
3114	keg->uk_allocf = noobj_alloc;
3115#endif
3116	keg->uk_flags |= UMA_ZONE_NOFREE;
3117	KEG_UNLOCK(keg);
3118
3119	return (1);
3120}
3121
3122/* See uma.h */
3123void
3124uma_prealloc(uma_zone_t zone, int items)
3125{
3126	int slabs;
3127	uma_slab_t slab;
3128	uma_keg_t keg;
3129
3130	keg = zone_first_keg(zone);
3131	if (keg == NULL)
3132		return;
3133	KEG_LOCK(keg);
3134	slabs = items / keg->uk_ipers;
3135	if (slabs * keg->uk_ipers < items)
3136		slabs++;
3137	while (slabs > 0) {
3138		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3139		if (slab == NULL)
3140			break;
3141		MPASS(slab->us_keg == keg);
3142		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3143		slabs--;
3144	}
3145	KEG_UNLOCK(keg);
3146}
3147
3148/* See uma.h */
3149uint32_t *
3150uma_find_refcnt(uma_zone_t zone, void *item)
3151{
3152	uma_slabrefcnt_t slabref;
3153	uma_slab_t slab;
3154	uma_keg_t keg;
3155	uint32_t *refcnt;
3156	int idx;
3157
3158	slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3159	slabref = (uma_slabrefcnt_t)slab;
3160	keg = slab->us_keg;
3161	KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3162	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3163	idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3164	refcnt = &slabref->us_refcnt[idx];
3165	return refcnt;
3166}
3167
3168/* See uma.h */
3169void
3170uma_reclaim(void)
3171{
3172#ifdef UMA_DEBUG
3173	printf("UMA: vm asked us to release pages!\n");
3174#endif
3175	bucket_enable();
3176	zone_foreach(zone_drain);
3177	if (vm_page_count_min()) {
3178		cache_drain_safe(NULL);
3179		zone_foreach(zone_drain);
3180	}
3181	/*
3182	 * Some slabs may have been freed but this zone will be visited early
3183	 * we visit again so that we can free pages that are empty once other
3184	 * zones are drained.  We have to do the same for buckets.
3185	 */
3186	zone_drain(slabzone);
3187	zone_drain(slabrefzone);
3188	bucket_zone_drain();
3189}
3190
3191/* See uma.h */
3192int
3193uma_zone_exhausted(uma_zone_t zone)
3194{
3195	int full;
3196
3197	ZONE_LOCK(zone);
3198	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3199	ZONE_UNLOCK(zone);
3200	return (full);
3201}
3202
3203int
3204uma_zone_exhausted_nolock(uma_zone_t zone)
3205{
3206	return (zone->uz_flags & UMA_ZFLAG_FULL);
3207}
3208
3209void *
3210uma_large_malloc(int size, int wait)
3211{
3212	void *mem;
3213	uma_slab_t slab;
3214	uint8_t flags;
3215
3216	slab = zone_alloc_item(slabzone, NULL, wait);
3217	if (slab == NULL)
3218		return (NULL);
3219	mem = page_alloc(NULL, size, &flags, wait);
3220	if (mem) {
3221		vsetslab((vm_offset_t)mem, slab);
3222		slab->us_data = mem;
3223		slab->us_flags = flags | UMA_SLAB_MALLOC;
3224		slab->us_size = size;
3225	} else {
3226		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3227	}
3228
3229	return (mem);
3230}
3231
3232void
3233uma_large_free(uma_slab_t slab)
3234{
3235
3236	page_free(slab->us_data, slab->us_size, slab->us_flags);
3237	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3238}
3239
3240void
3241uma_print_stats(void)
3242{
3243	zone_foreach(uma_print_zone);
3244}
3245
3246static void
3247slab_print(uma_slab_t slab)
3248{
3249	printf("slab: keg %p, data %p, freecount %d\n",
3250		slab->us_keg, slab->us_data, slab->us_freecount);
3251}
3252
3253static void
3254cache_print(uma_cache_t cache)
3255{
3256	printf("alloc: %p(%d), free: %p(%d)\n",
3257		cache->uc_allocbucket,
3258		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3259		cache->uc_freebucket,
3260		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3261}
3262
3263static void
3264uma_print_keg(uma_keg_t keg)
3265{
3266	uma_slab_t slab;
3267
3268	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3269	    "out %d free %d limit %d\n",
3270	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3271	    keg->uk_ipers, keg->uk_ppera,
3272	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3273	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3274	printf("Part slabs:\n");
3275	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3276		slab_print(slab);
3277	printf("Free slabs:\n");
3278	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3279		slab_print(slab);
3280	printf("Full slabs:\n");
3281	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3282		slab_print(slab);
3283}
3284
3285void
3286uma_print_zone(uma_zone_t zone)
3287{
3288	uma_cache_t cache;
3289	uma_klink_t kl;
3290	int i;
3291
3292	printf("zone: %s(%p) size %d flags %#x\n",
3293	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3294	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3295		uma_print_keg(kl->kl_keg);
3296	CPU_FOREACH(i) {
3297		cache = &zone->uz_cpu[i];
3298		printf("CPU %d Cache:\n", i);
3299		cache_print(cache);
3300	}
3301}
3302
3303#ifdef DDB
3304/*
3305 * Generate statistics across both the zone and its per-cpu cache's.  Return
3306 * desired statistics if the pointer is non-NULL for that statistic.
3307 *
3308 * Note: does not update the zone statistics, as it can't safely clear the
3309 * per-CPU cache statistic.
3310 *
3311 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3312 * safe from off-CPU; we should modify the caches to track this information
3313 * directly so that we don't have to.
3314 */
3315static void
3316uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3317    uint64_t *freesp, uint64_t *sleepsp)
3318{
3319	uma_cache_t cache;
3320	uint64_t allocs, frees, sleeps;
3321	int cachefree, cpu;
3322
3323	allocs = frees = sleeps = 0;
3324	cachefree = 0;
3325	CPU_FOREACH(cpu) {
3326		cache = &z->uz_cpu[cpu];
3327		if (cache->uc_allocbucket != NULL)
3328			cachefree += cache->uc_allocbucket->ub_cnt;
3329		if (cache->uc_freebucket != NULL)
3330			cachefree += cache->uc_freebucket->ub_cnt;
3331		allocs += cache->uc_allocs;
3332		frees += cache->uc_frees;
3333	}
3334	allocs += z->uz_allocs;
3335	frees += z->uz_frees;
3336	sleeps += z->uz_sleeps;
3337	if (cachefreep != NULL)
3338		*cachefreep = cachefree;
3339	if (allocsp != NULL)
3340		*allocsp = allocs;
3341	if (freesp != NULL)
3342		*freesp = frees;
3343	if (sleepsp != NULL)
3344		*sleepsp = sleeps;
3345}
3346#endif /* DDB */
3347
3348static int
3349sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3350{
3351	uma_keg_t kz;
3352	uma_zone_t z;
3353	int count;
3354
3355	count = 0;
3356	mtx_lock(&uma_mtx);
3357	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3358		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3359			count++;
3360	}
3361	mtx_unlock(&uma_mtx);
3362	return (sysctl_handle_int(oidp, &count, 0, req));
3363}
3364
3365static int
3366sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3367{
3368	struct uma_stream_header ush;
3369	struct uma_type_header uth;
3370	struct uma_percpu_stat ups;
3371	uma_bucket_t bucket;
3372	struct sbuf sbuf;
3373	uma_cache_t cache;
3374	uma_klink_t kl;
3375	uma_keg_t kz;
3376	uma_zone_t z;
3377	uma_keg_t k;
3378	int count, error, i;
3379
3380	error = sysctl_wire_old_buffer(req, 0);
3381	if (error != 0)
3382		return (error);
3383	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3384
3385	count = 0;
3386	mtx_lock(&uma_mtx);
3387	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3388		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3389			count++;
3390	}
3391
3392	/*
3393	 * Insert stream header.
3394	 */
3395	bzero(&ush, sizeof(ush));
3396	ush.ush_version = UMA_STREAM_VERSION;
3397	ush.ush_maxcpus = (mp_maxid + 1);
3398	ush.ush_count = count;
3399	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3400
3401	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3402		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3403			bzero(&uth, sizeof(uth));
3404			ZONE_LOCK(z);
3405			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3406			uth.uth_align = kz->uk_align;
3407			uth.uth_size = kz->uk_size;
3408			uth.uth_rsize = kz->uk_rsize;
3409			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3410				k = kl->kl_keg;
3411				uth.uth_maxpages += k->uk_maxpages;
3412				uth.uth_pages += k->uk_pages;
3413				uth.uth_keg_free += k->uk_free;
3414				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3415				    * k->uk_ipers;
3416			}
3417
3418			/*
3419			 * A zone is secondary is it is not the first entry
3420			 * on the keg's zone list.
3421			 */
3422			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3423			    (LIST_FIRST(&kz->uk_zones) != z))
3424				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3425
3426			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3427				uth.uth_zone_free += bucket->ub_cnt;
3428			uth.uth_allocs = z->uz_allocs;
3429			uth.uth_frees = z->uz_frees;
3430			uth.uth_fails = z->uz_fails;
3431			uth.uth_sleeps = z->uz_sleeps;
3432			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3433			/*
3434			 * While it is not normally safe to access the cache
3435			 * bucket pointers while not on the CPU that owns the
3436			 * cache, we only allow the pointers to be exchanged
3437			 * without the zone lock held, not invalidated, so
3438			 * accept the possible race associated with bucket
3439			 * exchange during monitoring.
3440			 */
3441			for (i = 0; i < (mp_maxid + 1); i++) {
3442				bzero(&ups, sizeof(ups));
3443				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3444					goto skip;
3445				if (CPU_ABSENT(i))
3446					goto skip;
3447				cache = &z->uz_cpu[i];
3448				if (cache->uc_allocbucket != NULL)
3449					ups.ups_cache_free +=
3450					    cache->uc_allocbucket->ub_cnt;
3451				if (cache->uc_freebucket != NULL)
3452					ups.ups_cache_free +=
3453					    cache->uc_freebucket->ub_cnt;
3454				ups.ups_allocs = cache->uc_allocs;
3455				ups.ups_frees = cache->uc_frees;
3456skip:
3457				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3458			}
3459			ZONE_UNLOCK(z);
3460		}
3461	}
3462	mtx_unlock(&uma_mtx);
3463	error = sbuf_finish(&sbuf);
3464	sbuf_delete(&sbuf);
3465	return (error);
3466}
3467
3468#ifdef DDB
3469DB_SHOW_COMMAND(uma, db_show_uma)
3470{
3471	uint64_t allocs, frees, sleeps;
3472	uma_bucket_t bucket;
3473	uma_keg_t kz;
3474	uma_zone_t z;
3475	int cachefree;
3476
3477	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3478	    "Free", "Requests", "Sleeps", "Bucket");
3479	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3480		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3481			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3482				allocs = z->uz_allocs;
3483				frees = z->uz_frees;
3484				sleeps = z->uz_sleeps;
3485				cachefree = 0;
3486			} else
3487				uma_zone_sumstat(z, &cachefree, &allocs,
3488				    &frees, &sleeps);
3489			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3490			    (LIST_FIRST(&kz->uk_zones) != z)))
3491				cachefree += kz->uk_free;
3492			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3493				cachefree += bucket->ub_cnt;
3494			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3495			    z->uz_name, (uintmax_t)kz->uk_size,
3496			    (intmax_t)(allocs - frees), cachefree,
3497			    (uintmax_t)allocs, sleeps, z->uz_count);
3498			if (db_pager_quit)
3499				return;
3500		}
3501	}
3502}
3503
3504DB_SHOW_COMMAND(umacache, db_show_umacache)
3505{
3506	uint64_t allocs, frees;
3507	uma_bucket_t bucket;
3508	uma_zone_t z;
3509	int cachefree;
3510
3511	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3512	    "Requests", "Bucket");
3513	LIST_FOREACH(z, &uma_cachezones, uz_link) {
3514		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3515		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3516			cachefree += bucket->ub_cnt;
3517		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3518		    z->uz_name, (uintmax_t)z->uz_size,
3519		    (intmax_t)(allocs - frees), cachefree,
3520		    (uintmax_t)allocs, z->uz_count);
3521		if (db_pager_quit)
3522			return;
3523	}
3524}
3525#endif
3526