uma_core.c revision 260300
1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * uma_core.c  Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34 * effecient.  A primary design goal is to return unused memory to the rest of
35 * the system.  This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44/*
45 * TODO:
46 *	- Improve memory usage for large allocations
47 *	- Investigate cache size adjustments
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: stable/10/sys/vm/uma_core.c 260300 2014-01-04 23:35:34Z mav $");
52
53/* I should really use ktr.. */
54/*
55#define UMA_DEBUG 1
56#define UMA_DEBUG_ALLOC 1
57#define UMA_DEBUG_ALLOC_1 1
58*/
59
60#include "opt_ddb.h"
61#include "opt_param.h"
62#include "opt_vm.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/bitset.h>
67#include <sys/kernel.h>
68#include <sys/types.h>
69#include <sys/queue.h>
70#include <sys/malloc.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/sysctl.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/rwlock.h>
77#include <sys/sbuf.h>
78#include <sys/smp.h>
79#include <sys/vmmeter.h>
80
81#include <vm/vm.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_pageout.h>
85#include <vm/vm_param.h>
86#include <vm/vm_map.h>
87#include <vm/vm_kern.h>
88#include <vm/vm_extern.h>
89#include <vm/uma.h>
90#include <vm/uma_int.h>
91#include <vm/uma_dbg.h>
92
93#include <ddb/ddb.h>
94
95#ifdef DEBUG_MEMGUARD
96#include <vm/memguard.h>
97#endif
98
99/*
100 * This is the zone and keg from which all zones are spawned.  The idea is that
101 * even the zone & keg heads are allocated from the allocator, so we use the
102 * bss section to bootstrap us.
103 */
104static struct uma_keg masterkeg;
105static struct uma_zone masterzone_k;
106static struct uma_zone masterzone_z;
107static uma_zone_t kegs = &masterzone_k;
108static uma_zone_t zones = &masterzone_z;
109
110/* This is the zone from which all of uma_slab_t's are allocated. */
111static uma_zone_t slabzone;
112static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
113
114/*
115 * The initial hash tables come out of this zone so they can be allocated
116 * prior to malloc coming up.
117 */
118static uma_zone_t hashzone;
119
120/* The boot-time adjusted value for cache line alignment. */
121int uma_align_cache = 64 - 1;
122
123static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
124
125/*
126 * Are we allowed to allocate buckets?
127 */
128static int bucketdisable = 1;
129
130/* Linked list of all kegs in the system */
131static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
132
133/* This mutex protects the keg list */
134static struct mtx_padalign uma_mtx;
135
136/* Linked list of boot time pages */
137static LIST_HEAD(,uma_slab) uma_boot_pages =
138    LIST_HEAD_INITIALIZER(uma_boot_pages);
139
140/* This mutex protects the boot time pages list */
141static struct mtx_padalign uma_boot_pages_mtx;
142
143/* Is the VM done starting up? */
144static int booted = 0;
145#define	UMA_STARTUP	1
146#define	UMA_STARTUP2	2
147
148/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
149static const u_int uma_max_ipers = SLAB_SETSIZE;
150
151/*
152 * Only mbuf clusters use ref zones.  Just provide enough references
153 * to support the one user.  New code should not use the ref facility.
154 */
155static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
156
157/*
158 * This is the handle used to schedule events that need to happen
159 * outside of the allocation fast path.
160 */
161static struct callout uma_callout;
162#define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
163
164/*
165 * This structure is passed as the zone ctor arg so that I don't have to create
166 * a special allocation function just for zones.
167 */
168struct uma_zctor_args {
169	const char *name;
170	size_t size;
171	uma_ctor ctor;
172	uma_dtor dtor;
173	uma_init uminit;
174	uma_fini fini;
175	uma_import import;
176	uma_release release;
177	void *arg;
178	uma_keg_t keg;
179	int align;
180	uint32_t flags;
181};
182
183struct uma_kctor_args {
184	uma_zone_t zone;
185	size_t size;
186	uma_init uminit;
187	uma_fini fini;
188	int align;
189	uint32_t flags;
190};
191
192struct uma_bucket_zone {
193	uma_zone_t	ubz_zone;
194	char		*ubz_name;
195	int		ubz_entries;	/* Number of items it can hold. */
196	int		ubz_maxsize;	/* Maximum allocation size per-item. */
197};
198
199/*
200 * Compute the actual number of bucket entries to pack them in power
201 * of two sizes for more efficient space utilization.
202 */
203#define	BUCKET_SIZE(n)						\
204    (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
205
206#define	BUCKET_MAX	BUCKET_SIZE(128)
207
208struct uma_bucket_zone bucket_zones[] = {
209	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
210	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
211	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
212	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
213	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
214	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
215	{ NULL, NULL, 0}
216};
217
218/*
219 * Flags and enumerations to be passed to internal functions.
220 */
221enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
222
223/* Prototypes.. */
224
225static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
226static void *page_alloc(uma_zone_t, int, uint8_t *, int);
227static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
228static void page_free(void *, int, uint8_t);
229static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
230static void cache_drain(uma_zone_t);
231static void bucket_drain(uma_zone_t, uma_bucket_t);
232static void bucket_cache_drain(uma_zone_t zone);
233static int keg_ctor(void *, int, void *, int);
234static void keg_dtor(void *, int, void *);
235static int zone_ctor(void *, int, void *, int);
236static void zone_dtor(void *, int, void *);
237static int zero_init(void *, int, int);
238static void keg_small_init(uma_keg_t keg);
239static void keg_large_init(uma_keg_t keg);
240static void zone_foreach(void (*zfunc)(uma_zone_t));
241static void zone_timeout(uma_zone_t zone);
242static int hash_alloc(struct uma_hash *);
243static int hash_expand(struct uma_hash *, struct uma_hash *);
244static void hash_free(struct uma_hash *hash);
245static void uma_timeout(void *);
246static void uma_startup3(void);
247static void *zone_alloc_item(uma_zone_t, void *, int);
248static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
249static void bucket_enable(void);
250static void bucket_init(void);
251static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
252static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
253static void bucket_zone_drain(void);
254static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
255static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
256static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
257static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
258static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
259static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
260    uma_fini fini, int align, uint32_t flags);
261static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
262static void zone_release(uma_zone_t zone, void **bucket, int cnt);
263
264void uma_print_zone(uma_zone_t);
265void uma_print_stats(void);
266static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
267static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
268
269SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
270
271SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
272    0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
273
274SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
275    0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
276
277static int zone_warnings = 1;
278TUNABLE_INT("vm.zone_warnings", &zone_warnings);
279SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
280    "Warn when UMA zones becomes full");
281
282/*
283 * This routine checks to see whether or not it's safe to enable buckets.
284 */
285static void
286bucket_enable(void)
287{
288	bucketdisable = vm_page_count_min();
289}
290
291/*
292 * Initialize bucket_zones, the array of zones of buckets of various sizes.
293 *
294 * For each zone, calculate the memory required for each bucket, consisting
295 * of the header and an array of pointers.
296 */
297static void
298bucket_init(void)
299{
300	struct uma_bucket_zone *ubz;
301	int size;
302	int i;
303
304	for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
305		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
306		size += sizeof(void *) * ubz->ubz_entries;
307		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
308		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
309		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
310	}
311}
312
313/*
314 * Given a desired number of entries for a bucket, return the zone from which
315 * to allocate the bucket.
316 */
317static struct uma_bucket_zone *
318bucket_zone_lookup(int entries)
319{
320	struct uma_bucket_zone *ubz;
321
322	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
323		if (ubz->ubz_entries >= entries)
324			return (ubz);
325	ubz--;
326	return (ubz);
327}
328
329static int
330bucket_select(int size)
331{
332	struct uma_bucket_zone *ubz;
333
334	ubz = &bucket_zones[0];
335	if (size > ubz->ubz_maxsize)
336		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
337
338	for (; ubz->ubz_entries != 0; ubz++)
339		if (ubz->ubz_maxsize < size)
340			break;
341	ubz--;
342	return (ubz->ubz_entries);
343}
344
345static uma_bucket_t
346bucket_alloc(uma_zone_t zone, void *udata, int flags)
347{
348	struct uma_bucket_zone *ubz;
349	uma_bucket_t bucket;
350
351	/*
352	 * This is to stop us from allocating per cpu buckets while we're
353	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
354	 * boot pages.  This also prevents us from allocating buckets in
355	 * low memory situations.
356	 */
357	if (bucketdisable)
358		return (NULL);
359	/*
360	 * To limit bucket recursion we store the original zone flags
361	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
362	 * NOVM flag to persist even through deep recursions.  We also
363	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
364	 * a bucket for a bucket zone so we do not allow infinite bucket
365	 * recursion.  This cookie will even persist to frees of unused
366	 * buckets via the allocation path or bucket allocations in the
367	 * free path.
368	 */
369	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
370		udata = (void *)(uintptr_t)zone->uz_flags;
371	else {
372		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
373			return (NULL);
374		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
375	}
376	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
377		flags |= M_NOVM;
378	ubz = bucket_zone_lookup(zone->uz_count);
379	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
380	if (bucket) {
381#ifdef INVARIANTS
382		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
383#endif
384		bucket->ub_cnt = 0;
385		bucket->ub_entries = ubz->ubz_entries;
386	}
387
388	return (bucket);
389}
390
391static void
392bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
393{
394	struct uma_bucket_zone *ubz;
395
396	KASSERT(bucket->ub_cnt == 0,
397	    ("bucket_free: Freeing a non free bucket."));
398	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
399		udata = (void *)(uintptr_t)zone->uz_flags;
400	ubz = bucket_zone_lookup(bucket->ub_entries);
401	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
402}
403
404static void
405bucket_zone_drain(void)
406{
407	struct uma_bucket_zone *ubz;
408
409	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
410		zone_drain(ubz->ubz_zone);
411}
412
413static void
414zone_log_warning(uma_zone_t zone)
415{
416	static const struct timeval warninterval = { 300, 0 };
417
418	if (!zone_warnings || zone->uz_warning == NULL)
419		return;
420
421	if (ratecheck(&zone->uz_ratecheck, &warninterval))
422		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
423}
424
425static void
426zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
427{
428	uma_klink_t klink;
429
430	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
431		kegfn(klink->kl_keg);
432}
433
434/*
435 * Routine called by timeout which is used to fire off some time interval
436 * based calculations.  (stats, hash size, etc.)
437 *
438 * Arguments:
439 *	arg   Unused
440 *
441 * Returns:
442 *	Nothing
443 */
444static void
445uma_timeout(void *unused)
446{
447	bucket_enable();
448	zone_foreach(zone_timeout);
449
450	/* Reschedule this event */
451	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
452}
453
454/*
455 * Routine to perform timeout driven calculations.  This expands the
456 * hashes and does per cpu statistics aggregation.
457 *
458 *  Returns nothing.
459 */
460static void
461keg_timeout(uma_keg_t keg)
462{
463
464	KEG_LOCK(keg);
465	/*
466	 * Expand the keg hash table.
467	 *
468	 * This is done if the number of slabs is larger than the hash size.
469	 * What I'm trying to do here is completely reduce collisions.  This
470	 * may be a little aggressive.  Should I allow for two collisions max?
471	 */
472	if (keg->uk_flags & UMA_ZONE_HASH &&
473	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
474		struct uma_hash newhash;
475		struct uma_hash oldhash;
476		int ret;
477
478		/*
479		 * This is so involved because allocating and freeing
480		 * while the keg lock is held will lead to deadlock.
481		 * I have to do everything in stages and check for
482		 * races.
483		 */
484		newhash = keg->uk_hash;
485		KEG_UNLOCK(keg);
486		ret = hash_alloc(&newhash);
487		KEG_LOCK(keg);
488		if (ret) {
489			if (hash_expand(&keg->uk_hash, &newhash)) {
490				oldhash = keg->uk_hash;
491				keg->uk_hash = newhash;
492			} else
493				oldhash = newhash;
494
495			KEG_UNLOCK(keg);
496			hash_free(&oldhash);
497			return;
498		}
499	}
500	KEG_UNLOCK(keg);
501}
502
503static void
504zone_timeout(uma_zone_t zone)
505{
506
507	zone_foreach_keg(zone, &keg_timeout);
508}
509
510/*
511 * Allocate and zero fill the next sized hash table from the appropriate
512 * backing store.
513 *
514 * Arguments:
515 *	hash  A new hash structure with the old hash size in uh_hashsize
516 *
517 * Returns:
518 *	1 on sucess and 0 on failure.
519 */
520static int
521hash_alloc(struct uma_hash *hash)
522{
523	int oldsize;
524	int alloc;
525
526	oldsize = hash->uh_hashsize;
527
528	/* We're just going to go to a power of two greater */
529	if (oldsize)  {
530		hash->uh_hashsize = oldsize * 2;
531		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
532		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
533		    M_UMAHASH, M_NOWAIT);
534	} else {
535		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
536		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
537		    M_WAITOK);
538		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
539	}
540	if (hash->uh_slab_hash) {
541		bzero(hash->uh_slab_hash, alloc);
542		hash->uh_hashmask = hash->uh_hashsize - 1;
543		return (1);
544	}
545
546	return (0);
547}
548
549/*
550 * Expands the hash table for HASH zones.  This is done from zone_timeout
551 * to reduce collisions.  This must not be done in the regular allocation
552 * path, otherwise, we can recurse on the vm while allocating pages.
553 *
554 * Arguments:
555 *	oldhash  The hash you want to expand
556 *	newhash  The hash structure for the new table
557 *
558 * Returns:
559 *	Nothing
560 *
561 * Discussion:
562 */
563static int
564hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
565{
566	uma_slab_t slab;
567	int hval;
568	int i;
569
570	if (!newhash->uh_slab_hash)
571		return (0);
572
573	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
574		return (0);
575
576	/*
577	 * I need to investigate hash algorithms for resizing without a
578	 * full rehash.
579	 */
580
581	for (i = 0; i < oldhash->uh_hashsize; i++)
582		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
583			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
584			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
585			hval = UMA_HASH(newhash, slab->us_data);
586			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
587			    slab, us_hlink);
588		}
589
590	return (1);
591}
592
593/*
594 * Free the hash bucket to the appropriate backing store.
595 *
596 * Arguments:
597 *	slab_hash  The hash bucket we're freeing
598 *	hashsize   The number of entries in that hash bucket
599 *
600 * Returns:
601 *	Nothing
602 */
603static void
604hash_free(struct uma_hash *hash)
605{
606	if (hash->uh_slab_hash == NULL)
607		return;
608	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
609		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
610	else
611		free(hash->uh_slab_hash, M_UMAHASH);
612}
613
614/*
615 * Frees all outstanding items in a bucket
616 *
617 * Arguments:
618 *	zone   The zone to free to, must be unlocked.
619 *	bucket The free/alloc bucket with items, cpu queue must be locked.
620 *
621 * Returns:
622 *	Nothing
623 */
624
625static void
626bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
627{
628	int i;
629
630	if (bucket == NULL)
631		return;
632
633	if (zone->uz_fini)
634		for (i = 0; i < bucket->ub_cnt; i++)
635			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
636	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
637	bucket->ub_cnt = 0;
638}
639
640/*
641 * Drains the per cpu caches for a zone.
642 *
643 * NOTE: This may only be called while the zone is being turn down, and not
644 * during normal operation.  This is necessary in order that we do not have
645 * to migrate CPUs to drain the per-CPU caches.
646 *
647 * Arguments:
648 *	zone     The zone to drain, must be unlocked.
649 *
650 * Returns:
651 *	Nothing
652 */
653static void
654cache_drain(uma_zone_t zone)
655{
656	uma_cache_t cache;
657	int cpu;
658
659	/*
660	 * XXX: It is safe to not lock the per-CPU caches, because we're
661	 * tearing down the zone anyway.  I.e., there will be no further use
662	 * of the caches at this point.
663	 *
664	 * XXX: It would good to be able to assert that the zone is being
665	 * torn down to prevent improper use of cache_drain().
666	 *
667	 * XXX: We lock the zone before passing into bucket_cache_drain() as
668	 * it is used elsewhere.  Should the tear-down path be made special
669	 * there in some form?
670	 */
671	CPU_FOREACH(cpu) {
672		cache = &zone->uz_cpu[cpu];
673		bucket_drain(zone, cache->uc_allocbucket);
674		bucket_drain(zone, cache->uc_freebucket);
675		if (cache->uc_allocbucket != NULL)
676			bucket_free(zone, cache->uc_allocbucket, NULL);
677		if (cache->uc_freebucket != NULL)
678			bucket_free(zone, cache->uc_freebucket, NULL);
679		cache->uc_allocbucket = cache->uc_freebucket = NULL;
680	}
681	ZONE_LOCK(zone);
682	bucket_cache_drain(zone);
683	ZONE_UNLOCK(zone);
684}
685
686/*
687 * Drain the cached buckets from a zone.  Expects a locked zone on entry.
688 */
689static void
690bucket_cache_drain(uma_zone_t zone)
691{
692	uma_bucket_t bucket;
693
694	/*
695	 * Drain the bucket queues and free the buckets, we just keep two per
696	 * cpu (alloc/free).
697	 */
698	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
699		LIST_REMOVE(bucket, ub_link);
700		ZONE_UNLOCK(zone);
701		bucket_drain(zone, bucket);
702		bucket_free(zone, bucket, NULL);
703		ZONE_LOCK(zone);
704	}
705
706	/*
707	 * Shrink further bucket sizes.  Price of single zone lock collision
708	 * is probably lower then price of global cache drain.
709	 */
710	if (zone->uz_count > zone->uz_count_min)
711		zone->uz_count--;
712}
713
714static void
715keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
716{
717	uint8_t *mem;
718	int i;
719	uint8_t flags;
720
721	mem = slab->us_data;
722	flags = slab->us_flags;
723	i = start;
724	if (keg->uk_fini != NULL) {
725		for (i--; i > -1; i--)
726			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
727			    keg->uk_size);
728	}
729	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
730		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
731#ifdef UMA_DEBUG
732	printf("%s: Returning %d bytes.\n", keg->uk_name,
733	    PAGE_SIZE * keg->uk_ppera);
734#endif
735	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
736}
737
738/*
739 * Frees pages from a keg back to the system.  This is done on demand from
740 * the pageout daemon.
741 *
742 * Returns nothing.
743 */
744static void
745keg_drain(uma_keg_t keg)
746{
747	struct slabhead freeslabs = { 0 };
748	uma_slab_t slab;
749	uma_slab_t n;
750
751	/*
752	 * We don't want to take pages from statically allocated kegs at this
753	 * time
754	 */
755	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
756		return;
757
758#ifdef UMA_DEBUG
759	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
760#endif
761	KEG_LOCK(keg);
762	if (keg->uk_free == 0)
763		goto finished;
764
765	slab = LIST_FIRST(&keg->uk_free_slab);
766	while (slab) {
767		n = LIST_NEXT(slab, us_link);
768
769		/* We have no where to free these to */
770		if (slab->us_flags & UMA_SLAB_BOOT) {
771			slab = n;
772			continue;
773		}
774
775		LIST_REMOVE(slab, us_link);
776		keg->uk_pages -= keg->uk_ppera;
777		keg->uk_free -= keg->uk_ipers;
778
779		if (keg->uk_flags & UMA_ZONE_HASH)
780			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
781
782		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
783
784		slab = n;
785	}
786finished:
787	KEG_UNLOCK(keg);
788
789	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
790		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
791		keg_free_slab(keg, slab, keg->uk_ipers);
792	}
793}
794
795static void
796zone_drain_wait(uma_zone_t zone, int waitok)
797{
798
799	/*
800	 * Set draining to interlock with zone_dtor() so we can release our
801	 * locks as we go.  Only dtor() should do a WAITOK call since it
802	 * is the only call that knows the structure will still be available
803	 * when it wakes up.
804	 */
805	ZONE_LOCK(zone);
806	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
807		if (waitok == M_NOWAIT)
808			goto out;
809		mtx_unlock(&uma_mtx);
810		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
811		mtx_lock(&uma_mtx);
812	}
813	zone->uz_flags |= UMA_ZFLAG_DRAINING;
814	bucket_cache_drain(zone);
815	ZONE_UNLOCK(zone);
816	/*
817	 * The DRAINING flag protects us from being freed while
818	 * we're running.  Normally the uma_mtx would protect us but we
819	 * must be able to release and acquire the right lock for each keg.
820	 */
821	zone_foreach_keg(zone, &keg_drain);
822	ZONE_LOCK(zone);
823	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
824	wakeup(zone);
825out:
826	ZONE_UNLOCK(zone);
827}
828
829void
830zone_drain(uma_zone_t zone)
831{
832
833	zone_drain_wait(zone, M_NOWAIT);
834}
835
836/*
837 * Allocate a new slab for a keg.  This does not insert the slab onto a list.
838 *
839 * Arguments:
840 *	wait  Shall we wait?
841 *
842 * Returns:
843 *	The slab that was allocated or NULL if there is no memory and the
844 *	caller specified M_NOWAIT.
845 */
846static uma_slab_t
847keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
848{
849	uma_slabrefcnt_t slabref;
850	uma_alloc allocf;
851	uma_slab_t slab;
852	uint8_t *mem;
853	uint8_t flags;
854	int i;
855
856	mtx_assert(&keg->uk_lock, MA_OWNED);
857	slab = NULL;
858	mem = NULL;
859
860#ifdef UMA_DEBUG
861	printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
862#endif
863	allocf = keg->uk_allocf;
864	KEG_UNLOCK(keg);
865
866	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
867		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
868		if (slab == NULL)
869			goto out;
870	}
871
872	/*
873	 * This reproduces the old vm_zone behavior of zero filling pages the
874	 * first time they are added to a zone.
875	 *
876	 * Malloced items are zeroed in uma_zalloc.
877	 */
878
879	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
880		wait |= M_ZERO;
881	else
882		wait &= ~M_ZERO;
883
884	if (keg->uk_flags & UMA_ZONE_NODUMP)
885		wait |= M_NODUMP;
886
887	/* zone is passed for legacy reasons. */
888	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
889	if (mem == NULL) {
890		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
891			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
892		slab = NULL;
893		goto out;
894	}
895
896	/* Point the slab into the allocated memory */
897	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
898		slab = (uma_slab_t )(mem + keg->uk_pgoff);
899
900	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
901		for (i = 0; i < keg->uk_ppera; i++)
902			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
903
904	slab->us_keg = keg;
905	slab->us_data = mem;
906	slab->us_freecount = keg->uk_ipers;
907	slab->us_flags = flags;
908	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
909#ifdef INVARIANTS
910	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
911#endif
912	if (keg->uk_flags & UMA_ZONE_REFCNT) {
913		slabref = (uma_slabrefcnt_t)slab;
914		for (i = 0; i < keg->uk_ipers; i++)
915			slabref->us_refcnt[i] = 0;
916	}
917
918	if (keg->uk_init != NULL) {
919		for (i = 0; i < keg->uk_ipers; i++)
920			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
921			    keg->uk_size, wait) != 0)
922				break;
923		if (i != keg->uk_ipers) {
924			keg_free_slab(keg, slab, i);
925			slab = NULL;
926			goto out;
927		}
928	}
929out:
930	KEG_LOCK(keg);
931
932	if (slab != NULL) {
933		if (keg->uk_flags & UMA_ZONE_HASH)
934			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
935
936		keg->uk_pages += keg->uk_ppera;
937		keg->uk_free += keg->uk_ipers;
938	}
939
940	return (slab);
941}
942
943/*
944 * This function is intended to be used early on in place of page_alloc() so
945 * that we may use the boot time page cache to satisfy allocations before
946 * the VM is ready.
947 */
948static void *
949startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
950{
951	uma_keg_t keg;
952	uma_slab_t tmps;
953	int pages, check_pages;
954
955	keg = zone_first_keg(zone);
956	pages = howmany(bytes, PAGE_SIZE);
957	check_pages = pages - 1;
958	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
959
960	/*
961	 * Check our small startup cache to see if it has pages remaining.
962	 */
963	mtx_lock(&uma_boot_pages_mtx);
964
965	/* First check if we have enough room. */
966	tmps = LIST_FIRST(&uma_boot_pages);
967	while (tmps != NULL && check_pages-- > 0)
968		tmps = LIST_NEXT(tmps, us_link);
969	if (tmps != NULL) {
970		/*
971		 * It's ok to lose tmps references.  The last one will
972		 * have tmps->us_data pointing to the start address of
973		 * "pages" contiguous pages of memory.
974		 */
975		while (pages-- > 0) {
976			tmps = LIST_FIRST(&uma_boot_pages);
977			LIST_REMOVE(tmps, us_link);
978		}
979		mtx_unlock(&uma_boot_pages_mtx);
980		*pflag = tmps->us_flags;
981		return (tmps->us_data);
982	}
983	mtx_unlock(&uma_boot_pages_mtx);
984	if (booted < UMA_STARTUP2)
985		panic("UMA: Increase vm.boot_pages");
986	/*
987	 * Now that we've booted reset these users to their real allocator.
988	 */
989#ifdef UMA_MD_SMALL_ALLOC
990	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
991#else
992	keg->uk_allocf = page_alloc;
993#endif
994	return keg->uk_allocf(zone, bytes, pflag, wait);
995}
996
997/*
998 * Allocates a number of pages from the system
999 *
1000 * Arguments:
1001 *	bytes  The number of bytes requested
1002 *	wait  Shall we wait?
1003 *
1004 * Returns:
1005 *	A pointer to the alloced memory or possibly
1006 *	NULL if M_NOWAIT is set.
1007 */
1008static void *
1009page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1010{
1011	void *p;	/* Returned page */
1012
1013	*pflag = UMA_SLAB_KMEM;
1014	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1015
1016	return (p);
1017}
1018
1019/*
1020 * Allocates a number of pages from within an object
1021 *
1022 * Arguments:
1023 *	bytes  The number of bytes requested
1024 *	wait   Shall we wait?
1025 *
1026 * Returns:
1027 *	A pointer to the alloced memory or possibly
1028 *	NULL if M_NOWAIT is set.
1029 */
1030static void *
1031noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
1032{
1033	TAILQ_HEAD(, vm_page) alloctail;
1034	u_long npages;
1035	vm_offset_t retkva, zkva;
1036	vm_page_t p, p_next;
1037	uma_keg_t keg;
1038
1039	TAILQ_INIT(&alloctail);
1040	keg = zone_first_keg(zone);
1041
1042	npages = howmany(bytes, PAGE_SIZE);
1043	while (npages > 0) {
1044		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1045		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1046		if (p != NULL) {
1047			/*
1048			 * Since the page does not belong to an object, its
1049			 * listq is unused.
1050			 */
1051			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1052			npages--;
1053			continue;
1054		}
1055		if (wait & M_WAITOK) {
1056			VM_WAIT;
1057			continue;
1058		}
1059
1060		/*
1061		 * Page allocation failed, free intermediate pages and
1062		 * exit.
1063		 */
1064		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1065			vm_page_unwire(p, 0);
1066			vm_page_free(p);
1067		}
1068		return (NULL);
1069	}
1070	*flags = UMA_SLAB_PRIV;
1071	zkva = keg->uk_kva +
1072	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1073	retkva = zkva;
1074	TAILQ_FOREACH(p, &alloctail, listq) {
1075		pmap_qenter(zkva, &p, 1);
1076		zkva += PAGE_SIZE;
1077	}
1078
1079	return ((void *)retkva);
1080}
1081
1082/*
1083 * Frees a number of pages to the system
1084 *
1085 * Arguments:
1086 *	mem   A pointer to the memory to be freed
1087 *	size  The size of the memory being freed
1088 *	flags The original p->us_flags field
1089 *
1090 * Returns:
1091 *	Nothing
1092 */
1093static void
1094page_free(void *mem, int size, uint8_t flags)
1095{
1096	struct vmem *vmem;
1097
1098	if (flags & UMA_SLAB_KMEM)
1099		vmem = kmem_arena;
1100	else if (flags & UMA_SLAB_KERNEL)
1101		vmem = kernel_arena;
1102	else
1103		panic("UMA: page_free used with invalid flags %d", flags);
1104
1105	kmem_free(vmem, (vm_offset_t)mem, size);
1106}
1107
1108/*
1109 * Zero fill initializer
1110 *
1111 * Arguments/Returns follow uma_init specifications
1112 */
1113static int
1114zero_init(void *mem, int size, int flags)
1115{
1116	bzero(mem, size);
1117	return (0);
1118}
1119
1120/*
1121 * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1122 *
1123 * Arguments
1124 *	keg  The zone we should initialize
1125 *
1126 * Returns
1127 *	Nothing
1128 */
1129static void
1130keg_small_init(uma_keg_t keg)
1131{
1132	u_int rsize;
1133	u_int memused;
1134	u_int wastedspace;
1135	u_int shsize;
1136
1137	if (keg->uk_flags & UMA_ZONE_PCPU) {
1138		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1139
1140		keg->uk_slabsize = sizeof(struct pcpu);
1141		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1142		    PAGE_SIZE);
1143	} else {
1144		keg->uk_slabsize = UMA_SLAB_SIZE;
1145		keg->uk_ppera = 1;
1146	}
1147
1148	/*
1149	 * Calculate the size of each allocation (rsize) according to
1150	 * alignment.  If the requested size is smaller than we have
1151	 * allocation bits for we round it up.
1152	 */
1153	rsize = keg->uk_size;
1154	if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1155		rsize = keg->uk_slabsize / SLAB_SETSIZE;
1156	if (rsize & keg->uk_align)
1157		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1158	keg->uk_rsize = rsize;
1159
1160	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1161	    keg->uk_rsize < sizeof(struct pcpu),
1162	    ("%s: size %u too large", __func__, keg->uk_rsize));
1163
1164	if (keg->uk_flags & UMA_ZONE_REFCNT)
1165		rsize += sizeof(uint32_t);
1166
1167	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1168		shsize = 0;
1169	else
1170		shsize = sizeof(struct uma_slab);
1171
1172	keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1173	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1174	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1175
1176	memused = keg->uk_ipers * rsize + shsize;
1177	wastedspace = keg->uk_slabsize - memused;
1178
1179	/*
1180	 * We can't do OFFPAGE if we're internal or if we've been
1181	 * asked to not go to the VM for buckets.  If we do this we
1182	 * may end up going to the VM  for slabs which we do not
1183	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1184	 * of UMA_ZONE_VM, which clearly forbids it.
1185	 */
1186	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1187	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1188		return;
1189
1190	/*
1191	 * See if using an OFFPAGE slab will limit our waste.  Only do
1192	 * this if it permits more items per-slab.
1193	 *
1194	 * XXX We could try growing slabsize to limit max waste as well.
1195	 * Historically this was not done because the VM could not
1196	 * efficiently handle contiguous allocations.
1197	 */
1198	if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1199	    (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1200		keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1201		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1202		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1203#ifdef UMA_DEBUG
1204		printf("UMA decided we need offpage slab headers for "
1205		    "keg: %s, calculated wastedspace = %d, "
1206		    "maximum wasted space allowed = %d, "
1207		    "calculated ipers = %d, "
1208		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1209		    keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1210		    keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1211#endif
1212		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1213	}
1214
1215	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1216	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1217		keg->uk_flags |= UMA_ZONE_HASH;
1218}
1219
1220/*
1221 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1222 * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1223 * more complicated.
1224 *
1225 * Arguments
1226 *	keg  The keg we should initialize
1227 *
1228 * Returns
1229 *	Nothing
1230 */
1231static void
1232keg_large_init(uma_keg_t keg)
1233{
1234
1235	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1236	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1237	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1238	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1239	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1240
1241	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1242	keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1243	keg->uk_ipers = 1;
1244	keg->uk_rsize = keg->uk_size;
1245
1246	/* We can't do OFFPAGE if we're internal, bail out here. */
1247	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1248		return;
1249
1250	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1251	if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1252		keg->uk_flags |= UMA_ZONE_HASH;
1253}
1254
1255static void
1256keg_cachespread_init(uma_keg_t keg)
1257{
1258	int alignsize;
1259	int trailer;
1260	int pages;
1261	int rsize;
1262
1263	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1264	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1265
1266	alignsize = keg->uk_align + 1;
1267	rsize = keg->uk_size;
1268	/*
1269	 * We want one item to start on every align boundary in a page.  To
1270	 * do this we will span pages.  We will also extend the item by the
1271	 * size of align if it is an even multiple of align.  Otherwise, it
1272	 * would fall on the same boundary every time.
1273	 */
1274	if (rsize & keg->uk_align)
1275		rsize = (rsize & ~keg->uk_align) + alignsize;
1276	if ((rsize & alignsize) == 0)
1277		rsize += alignsize;
1278	trailer = rsize - keg->uk_size;
1279	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1280	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1281	keg->uk_rsize = rsize;
1282	keg->uk_ppera = pages;
1283	keg->uk_slabsize = UMA_SLAB_SIZE;
1284	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1285	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1286	KASSERT(keg->uk_ipers <= uma_max_ipers,
1287	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1288	    keg->uk_ipers));
1289}
1290
1291/*
1292 * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1293 * the keg onto the global keg list.
1294 *
1295 * Arguments/Returns follow uma_ctor specifications
1296 *	udata  Actually uma_kctor_args
1297 */
1298static int
1299keg_ctor(void *mem, int size, void *udata, int flags)
1300{
1301	struct uma_kctor_args *arg = udata;
1302	uma_keg_t keg = mem;
1303	uma_zone_t zone;
1304
1305	bzero(keg, size);
1306	keg->uk_size = arg->size;
1307	keg->uk_init = arg->uminit;
1308	keg->uk_fini = arg->fini;
1309	keg->uk_align = arg->align;
1310	keg->uk_free = 0;
1311	keg->uk_reserve = 0;
1312	keg->uk_pages = 0;
1313	keg->uk_flags = arg->flags;
1314	keg->uk_allocf = page_alloc;
1315	keg->uk_freef = page_free;
1316	keg->uk_slabzone = NULL;
1317
1318	/*
1319	 * The master zone is passed to us at keg-creation time.
1320	 */
1321	zone = arg->zone;
1322	keg->uk_name = zone->uz_name;
1323
1324	if (arg->flags & UMA_ZONE_VM)
1325		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1326
1327	if (arg->flags & UMA_ZONE_ZINIT)
1328		keg->uk_init = zero_init;
1329
1330	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1331		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1332
1333	if (arg->flags & UMA_ZONE_PCPU)
1334#ifdef SMP
1335		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1336#else
1337		keg->uk_flags &= ~UMA_ZONE_PCPU;
1338#endif
1339
1340	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1341		keg_cachespread_init(keg);
1342	} else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1343		if (keg->uk_size >
1344		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1345		    sizeof(uint32_t)))
1346			keg_large_init(keg);
1347		else
1348			keg_small_init(keg);
1349	} else {
1350		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1351			keg_large_init(keg);
1352		else
1353			keg_small_init(keg);
1354	}
1355
1356	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1357		if (keg->uk_flags & UMA_ZONE_REFCNT) {
1358			if (keg->uk_ipers > uma_max_ipers_ref)
1359				panic("Too many ref items per zone: %d > %d\n",
1360				    keg->uk_ipers, uma_max_ipers_ref);
1361			keg->uk_slabzone = slabrefzone;
1362		} else
1363			keg->uk_slabzone = slabzone;
1364	}
1365
1366	/*
1367	 * If we haven't booted yet we need allocations to go through the
1368	 * startup cache until the vm is ready.
1369	 */
1370	if (keg->uk_ppera == 1) {
1371#ifdef UMA_MD_SMALL_ALLOC
1372		keg->uk_allocf = uma_small_alloc;
1373		keg->uk_freef = uma_small_free;
1374
1375		if (booted < UMA_STARTUP)
1376			keg->uk_allocf = startup_alloc;
1377#else
1378		if (booted < UMA_STARTUP2)
1379			keg->uk_allocf = startup_alloc;
1380#endif
1381	} else if (booted < UMA_STARTUP2 &&
1382	    (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1383		keg->uk_allocf = startup_alloc;
1384
1385	/*
1386	 * Initialize keg's lock
1387	 */
1388	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1389
1390	/*
1391	 * If we're putting the slab header in the actual page we need to
1392	 * figure out where in each page it goes.  This calculates a right
1393	 * justified offset into the memory on an ALIGN_PTR boundary.
1394	 */
1395	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1396		u_int totsize;
1397
1398		/* Size of the slab struct and free list */
1399		totsize = sizeof(struct uma_slab);
1400
1401		/* Size of the reference counts. */
1402		if (keg->uk_flags & UMA_ZONE_REFCNT)
1403			totsize += keg->uk_ipers * sizeof(uint32_t);
1404
1405		if (totsize & UMA_ALIGN_PTR)
1406			totsize = (totsize & ~UMA_ALIGN_PTR) +
1407			    (UMA_ALIGN_PTR + 1);
1408		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1409
1410		/*
1411		 * The only way the following is possible is if with our
1412		 * UMA_ALIGN_PTR adjustments we are now bigger than
1413		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1414		 * mathematically possible for all cases, so we make
1415		 * sure here anyway.
1416		 */
1417		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1418		if (keg->uk_flags & UMA_ZONE_REFCNT)
1419			totsize += keg->uk_ipers * sizeof(uint32_t);
1420		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1421			printf("zone %s ipers %d rsize %d size %d\n",
1422			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1423			    keg->uk_size);
1424			panic("UMA slab won't fit.");
1425		}
1426	}
1427
1428	if (keg->uk_flags & UMA_ZONE_HASH)
1429		hash_alloc(&keg->uk_hash);
1430
1431#ifdef UMA_DEBUG
1432	printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1433	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1434	    keg->uk_ipers, keg->uk_ppera,
1435	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1436#endif
1437
1438	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1439
1440	mtx_lock(&uma_mtx);
1441	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1442	mtx_unlock(&uma_mtx);
1443	return (0);
1444}
1445
1446/*
1447 * Zone header ctor.  This initializes all fields, locks, etc.
1448 *
1449 * Arguments/Returns follow uma_ctor specifications
1450 *	udata  Actually uma_zctor_args
1451 */
1452static int
1453zone_ctor(void *mem, int size, void *udata, int flags)
1454{
1455	struct uma_zctor_args *arg = udata;
1456	uma_zone_t zone = mem;
1457	uma_zone_t z;
1458	uma_keg_t keg;
1459
1460	bzero(zone, size);
1461	zone->uz_name = arg->name;
1462	zone->uz_ctor = arg->ctor;
1463	zone->uz_dtor = arg->dtor;
1464	zone->uz_slab = zone_fetch_slab;
1465	zone->uz_init = NULL;
1466	zone->uz_fini = NULL;
1467	zone->uz_allocs = 0;
1468	zone->uz_frees = 0;
1469	zone->uz_fails = 0;
1470	zone->uz_sleeps = 0;
1471	zone->uz_count = 0;
1472	zone->uz_count_min = 0;
1473	zone->uz_flags = 0;
1474	zone->uz_warning = NULL;
1475	timevalclear(&zone->uz_ratecheck);
1476	keg = arg->keg;
1477
1478	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1479
1480	/*
1481	 * This is a pure cache zone, no kegs.
1482	 */
1483	if (arg->import) {
1484		if (arg->flags & UMA_ZONE_VM)
1485			arg->flags |= UMA_ZFLAG_CACHEONLY;
1486		zone->uz_flags = arg->flags;
1487		zone->uz_size = arg->size;
1488		zone->uz_import = arg->import;
1489		zone->uz_release = arg->release;
1490		zone->uz_arg = arg->arg;
1491		zone->uz_lockptr = &zone->uz_lock;
1492		goto out;
1493	}
1494
1495	/*
1496	 * Use the regular zone/keg/slab allocator.
1497	 */
1498	zone->uz_import = (uma_import)zone_import;
1499	zone->uz_release = (uma_release)zone_release;
1500	zone->uz_arg = zone;
1501
1502	if (arg->flags & UMA_ZONE_SECONDARY) {
1503		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1504		zone->uz_init = arg->uminit;
1505		zone->uz_fini = arg->fini;
1506		zone->uz_lockptr = &keg->uk_lock;
1507		zone->uz_flags |= UMA_ZONE_SECONDARY;
1508		mtx_lock(&uma_mtx);
1509		ZONE_LOCK(zone);
1510		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1511			if (LIST_NEXT(z, uz_link) == NULL) {
1512				LIST_INSERT_AFTER(z, zone, uz_link);
1513				break;
1514			}
1515		}
1516		ZONE_UNLOCK(zone);
1517		mtx_unlock(&uma_mtx);
1518	} else if (keg == NULL) {
1519		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1520		    arg->align, arg->flags)) == NULL)
1521			return (ENOMEM);
1522	} else {
1523		struct uma_kctor_args karg;
1524		int error;
1525
1526		/* We should only be here from uma_startup() */
1527		karg.size = arg->size;
1528		karg.uminit = arg->uminit;
1529		karg.fini = arg->fini;
1530		karg.align = arg->align;
1531		karg.flags = arg->flags;
1532		karg.zone = zone;
1533		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1534		    flags);
1535		if (error)
1536			return (error);
1537	}
1538
1539	/*
1540	 * Link in the first keg.
1541	 */
1542	zone->uz_klink.kl_keg = keg;
1543	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1544	zone->uz_lockptr = &keg->uk_lock;
1545	zone->uz_size = keg->uk_size;
1546	zone->uz_flags |= (keg->uk_flags &
1547	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1548
1549	/*
1550	 * Some internal zones don't have room allocated for the per cpu
1551	 * caches.  If we're internal, bail out here.
1552	 */
1553	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1554		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1555		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1556		return (0);
1557	}
1558
1559out:
1560	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1561		zone->uz_count = bucket_select(zone->uz_size);
1562	else
1563		zone->uz_count = BUCKET_MAX;
1564	zone->uz_count_min = zone->uz_count;
1565
1566	return (0);
1567}
1568
1569/*
1570 * Keg header dtor.  This frees all data, destroys locks, frees the hash
1571 * table and removes the keg from the global list.
1572 *
1573 * Arguments/Returns follow uma_dtor specifications
1574 *	udata  unused
1575 */
1576static void
1577keg_dtor(void *arg, int size, void *udata)
1578{
1579	uma_keg_t keg;
1580
1581	keg = (uma_keg_t)arg;
1582	KEG_LOCK(keg);
1583	if (keg->uk_free != 0) {
1584		printf("Freed UMA keg (%s) was not empty (%d items). "
1585		    " Lost %d pages of memory.\n",
1586		    keg->uk_name ? keg->uk_name : "",
1587		    keg->uk_free, keg->uk_pages);
1588	}
1589	KEG_UNLOCK(keg);
1590
1591	hash_free(&keg->uk_hash);
1592
1593	KEG_LOCK_FINI(keg);
1594}
1595
1596/*
1597 * Zone header dtor.
1598 *
1599 * Arguments/Returns follow uma_dtor specifications
1600 *	udata  unused
1601 */
1602static void
1603zone_dtor(void *arg, int size, void *udata)
1604{
1605	uma_klink_t klink;
1606	uma_zone_t zone;
1607	uma_keg_t keg;
1608
1609	zone = (uma_zone_t)arg;
1610	keg = zone_first_keg(zone);
1611
1612	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1613		cache_drain(zone);
1614
1615	mtx_lock(&uma_mtx);
1616	LIST_REMOVE(zone, uz_link);
1617	mtx_unlock(&uma_mtx);
1618	/*
1619	 * XXX there are some races here where
1620	 * the zone can be drained but zone lock
1621	 * released and then refilled before we
1622	 * remove it... we dont care for now
1623	 */
1624	zone_drain_wait(zone, M_WAITOK);
1625	/*
1626	 * Unlink all of our kegs.
1627	 */
1628	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1629		klink->kl_keg = NULL;
1630		LIST_REMOVE(klink, kl_link);
1631		if (klink == &zone->uz_klink)
1632			continue;
1633		free(klink, M_TEMP);
1634	}
1635	/*
1636	 * We only destroy kegs from non secondary zones.
1637	 */
1638	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1639		mtx_lock(&uma_mtx);
1640		LIST_REMOVE(keg, uk_link);
1641		mtx_unlock(&uma_mtx);
1642		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1643	}
1644	ZONE_LOCK_FINI(zone);
1645}
1646
1647/*
1648 * Traverses every zone in the system and calls a callback
1649 *
1650 * Arguments:
1651 *	zfunc  A pointer to a function which accepts a zone
1652 *		as an argument.
1653 *
1654 * Returns:
1655 *	Nothing
1656 */
1657static void
1658zone_foreach(void (*zfunc)(uma_zone_t))
1659{
1660	uma_keg_t keg;
1661	uma_zone_t zone;
1662
1663	mtx_lock(&uma_mtx);
1664	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1665		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1666			zfunc(zone);
1667	}
1668	mtx_unlock(&uma_mtx);
1669}
1670
1671/* Public functions */
1672/* See uma.h */
1673void
1674uma_startup(void *bootmem, int boot_pages)
1675{
1676	struct uma_zctor_args args;
1677	uma_slab_t slab;
1678	u_int slabsize;
1679	int i;
1680
1681#ifdef UMA_DEBUG
1682	printf("Creating uma keg headers zone and keg.\n");
1683#endif
1684	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1685
1686	/* "manually" create the initial zone */
1687	memset(&args, 0, sizeof(args));
1688	args.name = "UMA Kegs";
1689	args.size = sizeof(struct uma_keg);
1690	args.ctor = keg_ctor;
1691	args.dtor = keg_dtor;
1692	args.uminit = zero_init;
1693	args.fini = NULL;
1694	args.keg = &masterkeg;
1695	args.align = 32 - 1;
1696	args.flags = UMA_ZFLAG_INTERNAL;
1697	/* The initial zone has no Per cpu queues so it's smaller */
1698	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1699
1700#ifdef UMA_DEBUG
1701	printf("Filling boot free list.\n");
1702#endif
1703	for (i = 0; i < boot_pages; i++) {
1704		slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1705		slab->us_data = (uint8_t *)slab;
1706		slab->us_flags = UMA_SLAB_BOOT;
1707		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1708	}
1709	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1710
1711#ifdef UMA_DEBUG
1712	printf("Creating uma zone headers zone and keg.\n");
1713#endif
1714	args.name = "UMA Zones";
1715	args.size = sizeof(struct uma_zone) +
1716	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1717	args.ctor = zone_ctor;
1718	args.dtor = zone_dtor;
1719	args.uminit = zero_init;
1720	args.fini = NULL;
1721	args.keg = NULL;
1722	args.align = 32 - 1;
1723	args.flags = UMA_ZFLAG_INTERNAL;
1724	/* The initial zone has no Per cpu queues so it's smaller */
1725	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1726
1727#ifdef UMA_DEBUG
1728	printf("Initializing pcpu cache locks.\n");
1729#endif
1730#ifdef UMA_DEBUG
1731	printf("Creating slab and hash zones.\n");
1732#endif
1733
1734	/* Now make a zone for slab headers */
1735	slabzone = uma_zcreate("UMA Slabs",
1736				sizeof(struct uma_slab),
1737				NULL, NULL, NULL, NULL,
1738				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1739
1740	/*
1741	 * We also create a zone for the bigger slabs with reference
1742	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1743	 */
1744	slabsize = sizeof(struct uma_slab_refcnt);
1745	slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1746	slabrefzone = uma_zcreate("UMA RCntSlabs",
1747				  slabsize,
1748				  NULL, NULL, NULL, NULL,
1749				  UMA_ALIGN_PTR,
1750				  UMA_ZFLAG_INTERNAL);
1751
1752	hashzone = uma_zcreate("UMA Hash",
1753	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1754	    NULL, NULL, NULL, NULL,
1755	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1756
1757	bucket_init();
1758
1759	booted = UMA_STARTUP;
1760
1761#ifdef UMA_DEBUG
1762	printf("UMA startup complete.\n");
1763#endif
1764}
1765
1766/* see uma.h */
1767void
1768uma_startup2(void)
1769{
1770	booted = UMA_STARTUP2;
1771	bucket_enable();
1772#ifdef UMA_DEBUG
1773	printf("UMA startup2 complete.\n");
1774#endif
1775}
1776
1777/*
1778 * Initialize our callout handle
1779 *
1780 */
1781
1782static void
1783uma_startup3(void)
1784{
1785#ifdef UMA_DEBUG
1786	printf("Starting callout.\n");
1787#endif
1788	callout_init(&uma_callout, CALLOUT_MPSAFE);
1789	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1790#ifdef UMA_DEBUG
1791	printf("UMA startup3 complete.\n");
1792#endif
1793}
1794
1795static uma_keg_t
1796uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1797		int align, uint32_t flags)
1798{
1799	struct uma_kctor_args args;
1800
1801	args.size = size;
1802	args.uminit = uminit;
1803	args.fini = fini;
1804	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1805	args.flags = flags;
1806	args.zone = zone;
1807	return (zone_alloc_item(kegs, &args, M_WAITOK));
1808}
1809
1810/* See uma.h */
1811void
1812uma_set_align(int align)
1813{
1814
1815	if (align != UMA_ALIGN_CACHE)
1816		uma_align_cache = align;
1817}
1818
1819/* See uma.h */
1820uma_zone_t
1821uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1822		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1823
1824{
1825	struct uma_zctor_args args;
1826
1827	/* This stuff is essential for the zone ctor */
1828	memset(&args, 0, sizeof(args));
1829	args.name = name;
1830	args.size = size;
1831	args.ctor = ctor;
1832	args.dtor = dtor;
1833	args.uminit = uminit;
1834	args.fini = fini;
1835	args.align = align;
1836	args.flags = flags;
1837	args.keg = NULL;
1838
1839	return (zone_alloc_item(zones, &args, M_WAITOK));
1840}
1841
1842/* See uma.h */
1843uma_zone_t
1844uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1845		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1846{
1847	struct uma_zctor_args args;
1848	uma_keg_t keg;
1849
1850	keg = zone_first_keg(master);
1851	memset(&args, 0, sizeof(args));
1852	args.name = name;
1853	args.size = keg->uk_size;
1854	args.ctor = ctor;
1855	args.dtor = dtor;
1856	args.uminit = zinit;
1857	args.fini = zfini;
1858	args.align = keg->uk_align;
1859	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1860	args.keg = keg;
1861
1862	/* XXX Attaches only one keg of potentially many. */
1863	return (zone_alloc_item(zones, &args, M_WAITOK));
1864}
1865
1866/* See uma.h */
1867uma_zone_t
1868uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1869		    uma_init zinit, uma_fini zfini, uma_import zimport,
1870		    uma_release zrelease, void *arg, int flags)
1871{
1872	struct uma_zctor_args args;
1873
1874	memset(&args, 0, sizeof(args));
1875	args.name = name;
1876	args.size = size;
1877	args.ctor = ctor;
1878	args.dtor = dtor;
1879	args.uminit = zinit;
1880	args.fini = zfini;
1881	args.import = zimport;
1882	args.release = zrelease;
1883	args.arg = arg;
1884	args.align = 0;
1885	args.flags = flags;
1886
1887	return (zone_alloc_item(zones, &args, M_WAITOK));
1888}
1889
1890static void
1891zone_lock_pair(uma_zone_t a, uma_zone_t b)
1892{
1893	if (a < b) {
1894		ZONE_LOCK(a);
1895		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1896	} else {
1897		ZONE_LOCK(b);
1898		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1899	}
1900}
1901
1902static void
1903zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1904{
1905
1906	ZONE_UNLOCK(a);
1907	ZONE_UNLOCK(b);
1908}
1909
1910int
1911uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1912{
1913	uma_klink_t klink;
1914	uma_klink_t kl;
1915	int error;
1916
1917	error = 0;
1918	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1919
1920	zone_lock_pair(zone, master);
1921	/*
1922	 * zone must use vtoslab() to resolve objects and must already be
1923	 * a secondary.
1924	 */
1925	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1926	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1927		error = EINVAL;
1928		goto out;
1929	}
1930	/*
1931	 * The new master must also use vtoslab().
1932	 */
1933	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1934		error = EINVAL;
1935		goto out;
1936	}
1937	/*
1938	 * Both must either be refcnt, or not be refcnt.
1939	 */
1940	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
1941	    (master->uz_flags & UMA_ZONE_REFCNT)) {
1942		error = EINVAL;
1943		goto out;
1944	}
1945	/*
1946	 * The underlying object must be the same size.  rsize
1947	 * may be different.
1948	 */
1949	if (master->uz_size != zone->uz_size) {
1950		error = E2BIG;
1951		goto out;
1952	}
1953	/*
1954	 * Put it at the end of the list.
1955	 */
1956	klink->kl_keg = zone_first_keg(master);
1957	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1958		if (LIST_NEXT(kl, kl_link) == NULL) {
1959			LIST_INSERT_AFTER(kl, klink, kl_link);
1960			break;
1961		}
1962	}
1963	klink = NULL;
1964	zone->uz_flags |= UMA_ZFLAG_MULTI;
1965	zone->uz_slab = zone_fetch_slab_multi;
1966
1967out:
1968	zone_unlock_pair(zone, master);
1969	if (klink != NULL)
1970		free(klink, M_TEMP);
1971
1972	return (error);
1973}
1974
1975
1976/* See uma.h */
1977void
1978uma_zdestroy(uma_zone_t zone)
1979{
1980
1981	zone_free_item(zones, zone, NULL, SKIP_NONE);
1982}
1983
1984/* See uma.h */
1985void *
1986uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1987{
1988	void *item;
1989	uma_cache_t cache;
1990	uma_bucket_t bucket;
1991	int lockfail;
1992	int cpu;
1993
1994	/* This is the fast path allocation */
1995#ifdef UMA_DEBUG_ALLOC_1
1996	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1997#endif
1998	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1999	    zone->uz_name, flags);
2000
2001	if (flags & M_WAITOK) {
2002		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2003		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2004	}
2005#ifdef DEBUG_MEMGUARD
2006	if (memguard_cmp_zone(zone)) {
2007		item = memguard_alloc(zone->uz_size, flags);
2008		if (item != NULL) {
2009			/*
2010			 * Avoid conflict with the use-after-free
2011			 * protecting infrastructure from INVARIANTS.
2012			 */
2013			if (zone->uz_init != NULL &&
2014			    zone->uz_init != mtrash_init &&
2015			    zone->uz_init(item, zone->uz_size, flags) != 0)
2016				return (NULL);
2017			if (zone->uz_ctor != NULL &&
2018			    zone->uz_ctor != mtrash_ctor &&
2019			    zone->uz_ctor(item, zone->uz_size, udata,
2020			    flags) != 0) {
2021			    	zone->uz_fini(item, zone->uz_size);
2022				return (NULL);
2023			}
2024			return (item);
2025		}
2026		/* This is unfortunate but should not be fatal. */
2027	}
2028#endif
2029	/*
2030	 * If possible, allocate from the per-CPU cache.  There are two
2031	 * requirements for safe access to the per-CPU cache: (1) the thread
2032	 * accessing the cache must not be preempted or yield during access,
2033	 * and (2) the thread must not migrate CPUs without switching which
2034	 * cache it accesses.  We rely on a critical section to prevent
2035	 * preemption and migration.  We release the critical section in
2036	 * order to acquire the zone mutex if we are unable to allocate from
2037	 * the current cache; when we re-acquire the critical section, we
2038	 * must detect and handle migration if it has occurred.
2039	 */
2040	critical_enter();
2041	cpu = curcpu;
2042	cache = &zone->uz_cpu[cpu];
2043
2044zalloc_start:
2045	bucket = cache->uc_allocbucket;
2046	if (bucket != NULL && bucket->ub_cnt > 0) {
2047		bucket->ub_cnt--;
2048		item = bucket->ub_bucket[bucket->ub_cnt];
2049#ifdef INVARIANTS
2050		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2051#endif
2052		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2053		cache->uc_allocs++;
2054		critical_exit();
2055		if (zone->uz_ctor != NULL &&
2056		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2057			atomic_add_long(&zone->uz_fails, 1);
2058			zone_free_item(zone, item, udata, SKIP_DTOR);
2059			return (NULL);
2060		}
2061#ifdef INVARIANTS
2062		uma_dbg_alloc(zone, NULL, item);
2063#endif
2064		if (flags & M_ZERO)
2065			bzero(item, zone->uz_size);
2066		return (item);
2067	}
2068
2069	/*
2070	 * We have run out of items in our alloc bucket.
2071	 * See if we can switch with our free bucket.
2072	 */
2073	bucket = cache->uc_freebucket;
2074	if (bucket != NULL && bucket->ub_cnt > 0) {
2075#ifdef UMA_DEBUG_ALLOC
2076		printf("uma_zalloc: Swapping empty with alloc.\n");
2077#endif
2078		cache->uc_freebucket = cache->uc_allocbucket;
2079		cache->uc_allocbucket = bucket;
2080		goto zalloc_start;
2081	}
2082
2083	/*
2084	 * Discard any empty allocation bucket while we hold no locks.
2085	 */
2086	bucket = cache->uc_allocbucket;
2087	cache->uc_allocbucket = NULL;
2088	critical_exit();
2089	if (bucket != NULL)
2090		bucket_free(zone, bucket, udata);
2091
2092	/* Short-circuit for zones without buckets and low memory. */
2093	if (zone->uz_count == 0 || bucketdisable)
2094		goto zalloc_item;
2095
2096	/*
2097	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2098	 * we must go back to the zone.  This requires the zone lock, so we
2099	 * must drop the critical section, then re-acquire it when we go back
2100	 * to the cache.  Since the critical section is released, we may be
2101	 * preempted or migrate.  As such, make sure not to maintain any
2102	 * thread-local state specific to the cache from prior to releasing
2103	 * the critical section.
2104	 */
2105	lockfail = 0;
2106	if (ZONE_TRYLOCK(zone) == 0) {
2107		/* Record contention to size the buckets. */
2108		ZONE_LOCK(zone);
2109		lockfail = 1;
2110	}
2111	critical_enter();
2112	cpu = curcpu;
2113	cache = &zone->uz_cpu[cpu];
2114
2115	/*
2116	 * Since we have locked the zone we may as well send back our stats.
2117	 */
2118	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2119	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2120	cache->uc_allocs = 0;
2121	cache->uc_frees = 0;
2122
2123	/* See if we lost the race to fill the cache. */
2124	if (cache->uc_allocbucket != NULL) {
2125		ZONE_UNLOCK(zone);
2126		goto zalloc_start;
2127	}
2128
2129	/*
2130	 * Check the zone's cache of buckets.
2131	 */
2132	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2133		KASSERT(bucket->ub_cnt != 0,
2134		    ("uma_zalloc_arg: Returning an empty bucket."));
2135
2136		LIST_REMOVE(bucket, ub_link);
2137		cache->uc_allocbucket = bucket;
2138		ZONE_UNLOCK(zone);
2139		goto zalloc_start;
2140	}
2141	/* We are no longer associated with this CPU. */
2142	critical_exit();
2143
2144	/*
2145	 * We bump the uz count when the cache size is insufficient to
2146	 * handle the working set.
2147	 */
2148	if (lockfail && zone->uz_count < BUCKET_MAX)
2149		zone->uz_count++;
2150	ZONE_UNLOCK(zone);
2151
2152	/*
2153	 * Now lets just fill a bucket and put it on the free list.  If that
2154	 * works we'll restart the allocation from the begining and it
2155	 * will use the just filled bucket.
2156	 */
2157	bucket = zone_alloc_bucket(zone, udata, flags);
2158	if (bucket != NULL) {
2159		ZONE_LOCK(zone);
2160		critical_enter();
2161		cpu = curcpu;
2162		cache = &zone->uz_cpu[cpu];
2163		/*
2164		 * See if we lost the race or were migrated.  Cache the
2165		 * initialized bucket to make this less likely or claim
2166		 * the memory directly.
2167		 */
2168		if (cache->uc_allocbucket == NULL)
2169			cache->uc_allocbucket = bucket;
2170		else
2171			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2172		ZONE_UNLOCK(zone);
2173		goto zalloc_start;
2174	}
2175
2176	/*
2177	 * We may not be able to get a bucket so return an actual item.
2178	 */
2179#ifdef UMA_DEBUG
2180	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2181#endif
2182
2183zalloc_item:
2184	item = zone_alloc_item(zone, udata, flags);
2185
2186	return (item);
2187}
2188
2189static uma_slab_t
2190keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2191{
2192	uma_slab_t slab;
2193	int reserve;
2194
2195	mtx_assert(&keg->uk_lock, MA_OWNED);
2196	slab = NULL;
2197	reserve = 0;
2198	if ((flags & M_USE_RESERVE) == 0)
2199		reserve = keg->uk_reserve;
2200
2201	for (;;) {
2202		/*
2203		 * Find a slab with some space.  Prefer slabs that are partially
2204		 * used over those that are totally full.  This helps to reduce
2205		 * fragmentation.
2206		 */
2207		if (keg->uk_free > reserve) {
2208			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2209				slab = LIST_FIRST(&keg->uk_part_slab);
2210			} else {
2211				slab = LIST_FIRST(&keg->uk_free_slab);
2212				LIST_REMOVE(slab, us_link);
2213				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2214				    us_link);
2215			}
2216			MPASS(slab->us_keg == keg);
2217			return (slab);
2218		}
2219
2220		/*
2221		 * M_NOVM means don't ask at all!
2222		 */
2223		if (flags & M_NOVM)
2224			break;
2225
2226		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2227			keg->uk_flags |= UMA_ZFLAG_FULL;
2228			/*
2229			 * If this is not a multi-zone, set the FULL bit.
2230			 * Otherwise slab_multi() takes care of it.
2231			 */
2232			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2233				zone->uz_flags |= UMA_ZFLAG_FULL;
2234				zone_log_warning(zone);
2235			}
2236			if (flags & M_NOWAIT)
2237				break;
2238			zone->uz_sleeps++;
2239			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2240			continue;
2241		}
2242		slab = keg_alloc_slab(keg, zone, flags);
2243		/*
2244		 * If we got a slab here it's safe to mark it partially used
2245		 * and return.  We assume that the caller is going to remove
2246		 * at least one item.
2247		 */
2248		if (slab) {
2249			MPASS(slab->us_keg == keg);
2250			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2251			return (slab);
2252		}
2253		/*
2254		 * We might not have been able to get a slab but another cpu
2255		 * could have while we were unlocked.  Check again before we
2256		 * fail.
2257		 */
2258		flags |= M_NOVM;
2259	}
2260	return (slab);
2261}
2262
2263static uma_slab_t
2264zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2265{
2266	uma_slab_t slab;
2267
2268	if (keg == NULL) {
2269		keg = zone_first_keg(zone);
2270		KEG_LOCK(keg);
2271	}
2272
2273	for (;;) {
2274		slab = keg_fetch_slab(keg, zone, flags);
2275		if (slab)
2276			return (slab);
2277		if (flags & (M_NOWAIT | M_NOVM))
2278			break;
2279	}
2280	KEG_UNLOCK(keg);
2281	return (NULL);
2282}
2283
2284/*
2285 * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2286 * with the keg locked.  On NULL no lock is held.
2287 *
2288 * The last pointer is used to seed the search.  It is not required.
2289 */
2290static uma_slab_t
2291zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2292{
2293	uma_klink_t klink;
2294	uma_slab_t slab;
2295	uma_keg_t keg;
2296	int flags;
2297	int empty;
2298	int full;
2299
2300	/*
2301	 * Don't wait on the first pass.  This will skip limit tests
2302	 * as well.  We don't want to block if we can find a provider
2303	 * without blocking.
2304	 */
2305	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2306	/*
2307	 * Use the last slab allocated as a hint for where to start
2308	 * the search.
2309	 */
2310	if (last != NULL) {
2311		slab = keg_fetch_slab(last, zone, flags);
2312		if (slab)
2313			return (slab);
2314		KEG_UNLOCK(last);
2315	}
2316	/*
2317	 * Loop until we have a slab incase of transient failures
2318	 * while M_WAITOK is specified.  I'm not sure this is 100%
2319	 * required but we've done it for so long now.
2320	 */
2321	for (;;) {
2322		empty = 0;
2323		full = 0;
2324		/*
2325		 * Search the available kegs for slabs.  Be careful to hold the
2326		 * correct lock while calling into the keg layer.
2327		 */
2328		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2329			keg = klink->kl_keg;
2330			KEG_LOCK(keg);
2331			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2332				slab = keg_fetch_slab(keg, zone, flags);
2333				if (slab)
2334					return (slab);
2335			}
2336			if (keg->uk_flags & UMA_ZFLAG_FULL)
2337				full++;
2338			else
2339				empty++;
2340			KEG_UNLOCK(keg);
2341		}
2342		if (rflags & (M_NOWAIT | M_NOVM))
2343			break;
2344		flags = rflags;
2345		/*
2346		 * All kegs are full.  XXX We can't atomically check all kegs
2347		 * and sleep so just sleep for a short period and retry.
2348		 */
2349		if (full && !empty) {
2350			ZONE_LOCK(zone);
2351			zone->uz_flags |= UMA_ZFLAG_FULL;
2352			zone->uz_sleeps++;
2353			zone_log_warning(zone);
2354			msleep(zone, zone->uz_lockptr, PVM,
2355			    "zonelimit", hz/100);
2356			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2357			ZONE_UNLOCK(zone);
2358			continue;
2359		}
2360	}
2361	return (NULL);
2362}
2363
2364static void *
2365slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2366{
2367	void *item;
2368	uint8_t freei;
2369
2370	MPASS(keg == slab->us_keg);
2371	mtx_assert(&keg->uk_lock, MA_OWNED);
2372
2373	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2374	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2375	item = slab->us_data + (keg->uk_rsize * freei);
2376	slab->us_freecount--;
2377	keg->uk_free--;
2378
2379	/* Move this slab to the full list */
2380	if (slab->us_freecount == 0) {
2381		LIST_REMOVE(slab, us_link);
2382		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2383	}
2384
2385	return (item);
2386}
2387
2388static int
2389zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2390{
2391	uma_slab_t slab;
2392	uma_keg_t keg;
2393	int i;
2394
2395	slab = NULL;
2396	keg = NULL;
2397	/* Try to keep the buckets totally full */
2398	for (i = 0; i < max; ) {
2399		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2400			break;
2401		keg = slab->us_keg;
2402		while (slab->us_freecount && i < max) {
2403			bucket[i++] = slab_alloc_item(keg, slab);
2404			if (keg->uk_free <= keg->uk_reserve)
2405				break;
2406		}
2407		/* Don't grab more than one slab at a time. */
2408		flags &= ~M_WAITOK;
2409		flags |= M_NOWAIT;
2410	}
2411	if (slab != NULL)
2412		KEG_UNLOCK(keg);
2413
2414	return i;
2415}
2416
2417static uma_bucket_t
2418zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2419{
2420	uma_bucket_t bucket;
2421	int max;
2422
2423	/* Don't wait for buckets, preserve caller's NOVM setting. */
2424	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2425	if (bucket == NULL)
2426		goto out;
2427
2428	max = MIN(bucket->ub_entries, zone->uz_count);
2429	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2430	    max, flags);
2431
2432	/*
2433	 * Initialize the memory if necessary.
2434	 */
2435	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2436		int i;
2437
2438		for (i = 0; i < bucket->ub_cnt; i++)
2439			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2440			    flags) != 0)
2441				break;
2442		/*
2443		 * If we couldn't initialize the whole bucket, put the
2444		 * rest back onto the freelist.
2445		 */
2446		if (i != bucket->ub_cnt) {
2447			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2448			    bucket->ub_cnt - i);
2449#ifdef INVARIANTS
2450			bzero(&bucket->ub_bucket[i],
2451			    sizeof(void *) * (bucket->ub_cnt - i));
2452#endif
2453			bucket->ub_cnt = i;
2454		}
2455	}
2456
2457out:
2458	if (bucket == NULL || bucket->ub_cnt == 0) {
2459		if (bucket != NULL)
2460			bucket_free(zone, bucket, udata);
2461		atomic_add_long(&zone->uz_fails, 1);
2462		return (NULL);
2463	}
2464
2465	return (bucket);
2466}
2467
2468/*
2469 * Allocates a single item from a zone.
2470 *
2471 * Arguments
2472 *	zone   The zone to alloc for.
2473 *	udata  The data to be passed to the constructor.
2474 *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2475 *
2476 * Returns
2477 *	NULL if there is no memory and M_NOWAIT is set
2478 *	An item if successful
2479 */
2480
2481static void *
2482zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2483{
2484	void *item;
2485
2486	item = NULL;
2487
2488#ifdef UMA_DEBUG_ALLOC
2489	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2490#endif
2491	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2492		goto fail;
2493	atomic_add_long(&zone->uz_allocs, 1);
2494
2495	/*
2496	 * We have to call both the zone's init (not the keg's init)
2497	 * and the zone's ctor.  This is because the item is going from
2498	 * a keg slab directly to the user, and the user is expecting it
2499	 * to be both zone-init'd as well as zone-ctor'd.
2500	 */
2501	if (zone->uz_init != NULL) {
2502		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2503			zone_free_item(zone, item, udata, SKIP_FINI);
2504			goto fail;
2505		}
2506	}
2507	if (zone->uz_ctor != NULL) {
2508		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2509			zone_free_item(zone, item, udata, SKIP_DTOR);
2510			goto fail;
2511		}
2512	}
2513#ifdef INVARIANTS
2514	uma_dbg_alloc(zone, NULL, item);
2515#endif
2516	if (flags & M_ZERO)
2517		bzero(item, zone->uz_size);
2518
2519	return (item);
2520
2521fail:
2522	atomic_add_long(&zone->uz_fails, 1);
2523	return (NULL);
2524}
2525
2526/* See uma.h */
2527void
2528uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2529{
2530	uma_cache_t cache;
2531	uma_bucket_t bucket;
2532	int cpu;
2533
2534#ifdef UMA_DEBUG_ALLOC_1
2535	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2536#endif
2537	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2538	    zone->uz_name);
2539
2540        /* uma_zfree(..., NULL) does nothing, to match free(9). */
2541        if (item == NULL)
2542                return;
2543#ifdef DEBUG_MEMGUARD
2544	if (is_memguard_addr(item)) {
2545		if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2546			zone->uz_dtor(item, zone->uz_size, udata);
2547		if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2548			zone->uz_fini(item, zone->uz_size);
2549		memguard_free(item);
2550		return;
2551	}
2552#endif
2553#ifdef INVARIANTS
2554	if (zone->uz_flags & UMA_ZONE_MALLOC)
2555		uma_dbg_free(zone, udata, item);
2556	else
2557		uma_dbg_free(zone, NULL, item);
2558#endif
2559	if (zone->uz_dtor != NULL)
2560		zone->uz_dtor(item, zone->uz_size, udata);
2561
2562	/*
2563	 * The race here is acceptable.  If we miss it we'll just have to wait
2564	 * a little longer for the limits to be reset.
2565	 */
2566	if (zone->uz_flags & UMA_ZFLAG_FULL)
2567		goto zfree_item;
2568
2569	/*
2570	 * If possible, free to the per-CPU cache.  There are two
2571	 * requirements for safe access to the per-CPU cache: (1) the thread
2572	 * accessing the cache must not be preempted or yield during access,
2573	 * and (2) the thread must not migrate CPUs without switching which
2574	 * cache it accesses.  We rely on a critical section to prevent
2575	 * preemption and migration.  We release the critical section in
2576	 * order to acquire the zone mutex if we are unable to free to the
2577	 * current cache; when we re-acquire the critical section, we must
2578	 * detect and handle migration if it has occurred.
2579	 */
2580zfree_restart:
2581	critical_enter();
2582	cpu = curcpu;
2583	cache = &zone->uz_cpu[cpu];
2584
2585zfree_start:
2586	/*
2587	 * Try to free into the allocbucket first to give LIFO ordering
2588	 * for cache-hot datastructures.  Spill over into the freebucket
2589	 * if necessary.  Alloc will swap them if one runs dry.
2590	 */
2591	bucket = cache->uc_allocbucket;
2592	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2593		bucket = cache->uc_freebucket;
2594	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2595		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2596		    ("uma_zfree: Freeing to non free bucket index."));
2597		bucket->ub_bucket[bucket->ub_cnt] = item;
2598		bucket->ub_cnt++;
2599		cache->uc_frees++;
2600		critical_exit();
2601		return;
2602	}
2603
2604	/*
2605	 * We must go back the zone, which requires acquiring the zone lock,
2606	 * which in turn means we must release and re-acquire the critical
2607	 * section.  Since the critical section is released, we may be
2608	 * preempted or migrate.  As such, make sure not to maintain any
2609	 * thread-local state specific to the cache from prior to releasing
2610	 * the critical section.
2611	 */
2612	critical_exit();
2613	if (zone->uz_count == 0 || bucketdisable)
2614		goto zfree_item;
2615
2616	ZONE_LOCK(zone);
2617	critical_enter();
2618	cpu = curcpu;
2619	cache = &zone->uz_cpu[cpu];
2620
2621	/*
2622	 * Since we have locked the zone we may as well send back our stats.
2623	 */
2624	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2625	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2626	cache->uc_allocs = 0;
2627	cache->uc_frees = 0;
2628
2629	bucket = cache->uc_freebucket;
2630	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2631		ZONE_UNLOCK(zone);
2632		goto zfree_start;
2633	}
2634	cache->uc_freebucket = NULL;
2635
2636	/* Can we throw this on the zone full list? */
2637	if (bucket != NULL) {
2638#ifdef UMA_DEBUG_ALLOC
2639		printf("uma_zfree: Putting old bucket on the free list.\n");
2640#endif
2641		/* ub_cnt is pointing to the last free item */
2642		KASSERT(bucket->ub_cnt != 0,
2643		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2644		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2645	}
2646
2647	/* We are no longer associated with this CPU. */
2648	critical_exit();
2649
2650	/* And the zone.. */
2651	ZONE_UNLOCK(zone);
2652
2653#ifdef UMA_DEBUG_ALLOC
2654	printf("uma_zfree: Allocating new free bucket.\n");
2655#endif
2656	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2657	if (bucket) {
2658		critical_enter();
2659		cpu = curcpu;
2660		cache = &zone->uz_cpu[cpu];
2661		if (cache->uc_freebucket == NULL) {
2662			cache->uc_freebucket = bucket;
2663			goto zfree_start;
2664		}
2665		/*
2666		 * We lost the race, start over.  We have to drop our
2667		 * critical section to free the bucket.
2668		 */
2669		critical_exit();
2670		bucket_free(zone, bucket, udata);
2671		goto zfree_restart;
2672	}
2673
2674	/*
2675	 * If nothing else caught this, we'll just do an internal free.
2676	 */
2677zfree_item:
2678	zone_free_item(zone, item, udata, SKIP_DTOR);
2679
2680	return;
2681}
2682
2683static void
2684slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2685{
2686	uint8_t freei;
2687
2688	mtx_assert(&keg->uk_lock, MA_OWNED);
2689	MPASS(keg == slab->us_keg);
2690
2691	/* Do we need to remove from any lists? */
2692	if (slab->us_freecount+1 == keg->uk_ipers) {
2693		LIST_REMOVE(slab, us_link);
2694		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2695	} else if (slab->us_freecount == 0) {
2696		LIST_REMOVE(slab, us_link);
2697		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2698	}
2699
2700	/* Slab management. */
2701	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2702	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2703	slab->us_freecount++;
2704
2705	/* Keg statistics. */
2706	keg->uk_free++;
2707}
2708
2709static void
2710zone_release(uma_zone_t zone, void **bucket, int cnt)
2711{
2712	void *item;
2713	uma_slab_t slab;
2714	uma_keg_t keg;
2715	uint8_t *mem;
2716	int clearfull;
2717	int i;
2718
2719	clearfull = 0;
2720	keg = zone_first_keg(zone);
2721	KEG_LOCK(keg);
2722	for (i = 0; i < cnt; i++) {
2723		item = bucket[i];
2724		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2725			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2726			if (zone->uz_flags & UMA_ZONE_HASH) {
2727				slab = hash_sfind(&keg->uk_hash, mem);
2728			} else {
2729				mem += keg->uk_pgoff;
2730				slab = (uma_slab_t)mem;
2731			}
2732		} else {
2733			slab = vtoslab((vm_offset_t)item);
2734			if (slab->us_keg != keg) {
2735				KEG_UNLOCK(keg);
2736				keg = slab->us_keg;
2737				KEG_LOCK(keg);
2738			}
2739		}
2740		slab_free_item(keg, slab, item);
2741		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2742			if (keg->uk_pages < keg->uk_maxpages) {
2743				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2744				clearfull = 1;
2745			}
2746
2747			/*
2748			 * We can handle one more allocation. Since we're
2749			 * clearing ZFLAG_FULL, wake up all procs blocked
2750			 * on pages. This should be uncommon, so keeping this
2751			 * simple for now (rather than adding count of blocked
2752			 * threads etc).
2753			 */
2754			wakeup(keg);
2755		}
2756	}
2757	KEG_UNLOCK(keg);
2758	if (clearfull) {
2759		ZONE_LOCK(zone);
2760		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2761		wakeup(zone);
2762		ZONE_UNLOCK(zone);
2763	}
2764
2765}
2766
2767/*
2768 * Frees a single item to any zone.
2769 *
2770 * Arguments:
2771 *	zone   The zone to free to
2772 *	item   The item we're freeing
2773 *	udata  User supplied data for the dtor
2774 *	skip   Skip dtors and finis
2775 */
2776static void
2777zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2778{
2779
2780#ifdef INVARIANTS
2781	if (skip == SKIP_NONE) {
2782		if (zone->uz_flags & UMA_ZONE_MALLOC)
2783			uma_dbg_free(zone, udata, item);
2784		else
2785			uma_dbg_free(zone, NULL, item);
2786	}
2787#endif
2788	if (skip < SKIP_DTOR && zone->uz_dtor)
2789		zone->uz_dtor(item, zone->uz_size, udata);
2790
2791	if (skip < SKIP_FINI && zone->uz_fini)
2792		zone->uz_fini(item, zone->uz_size);
2793
2794	atomic_add_long(&zone->uz_frees, 1);
2795	zone->uz_release(zone->uz_arg, &item, 1);
2796}
2797
2798/* See uma.h */
2799int
2800uma_zone_set_max(uma_zone_t zone, int nitems)
2801{
2802	uma_keg_t keg;
2803
2804	keg = zone_first_keg(zone);
2805	if (keg == NULL)
2806		return (0);
2807	KEG_LOCK(keg);
2808	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2809	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2810		keg->uk_maxpages += keg->uk_ppera;
2811	nitems = keg->uk_maxpages * keg->uk_ipers;
2812	KEG_UNLOCK(keg);
2813
2814	return (nitems);
2815}
2816
2817/* See uma.h */
2818int
2819uma_zone_get_max(uma_zone_t zone)
2820{
2821	int nitems;
2822	uma_keg_t keg;
2823
2824	keg = zone_first_keg(zone);
2825	if (keg == NULL)
2826		return (0);
2827	KEG_LOCK(keg);
2828	nitems = keg->uk_maxpages * keg->uk_ipers;
2829	KEG_UNLOCK(keg);
2830
2831	return (nitems);
2832}
2833
2834/* See uma.h */
2835void
2836uma_zone_set_warning(uma_zone_t zone, const char *warning)
2837{
2838
2839	ZONE_LOCK(zone);
2840	zone->uz_warning = warning;
2841	ZONE_UNLOCK(zone);
2842}
2843
2844/* See uma.h */
2845int
2846uma_zone_get_cur(uma_zone_t zone)
2847{
2848	int64_t nitems;
2849	u_int i;
2850
2851	ZONE_LOCK(zone);
2852	nitems = zone->uz_allocs - zone->uz_frees;
2853	CPU_FOREACH(i) {
2854		/*
2855		 * See the comment in sysctl_vm_zone_stats() regarding the
2856		 * safety of accessing the per-cpu caches. With the zone lock
2857		 * held, it is safe, but can potentially result in stale data.
2858		 */
2859		nitems += zone->uz_cpu[i].uc_allocs -
2860		    zone->uz_cpu[i].uc_frees;
2861	}
2862	ZONE_UNLOCK(zone);
2863
2864	return (nitems < 0 ? 0 : nitems);
2865}
2866
2867/* See uma.h */
2868void
2869uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2870{
2871	uma_keg_t keg;
2872
2873	keg = zone_first_keg(zone);
2874	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2875	KEG_LOCK(keg);
2876	KASSERT(keg->uk_pages == 0,
2877	    ("uma_zone_set_init on non-empty keg"));
2878	keg->uk_init = uminit;
2879	KEG_UNLOCK(keg);
2880}
2881
2882/* See uma.h */
2883void
2884uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2885{
2886	uma_keg_t keg;
2887
2888	keg = zone_first_keg(zone);
2889	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2890	KEG_LOCK(keg);
2891	KASSERT(keg->uk_pages == 0,
2892	    ("uma_zone_set_fini on non-empty keg"));
2893	keg->uk_fini = fini;
2894	KEG_UNLOCK(keg);
2895}
2896
2897/* See uma.h */
2898void
2899uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2900{
2901
2902	ZONE_LOCK(zone);
2903	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2904	    ("uma_zone_set_zinit on non-empty keg"));
2905	zone->uz_init = zinit;
2906	ZONE_UNLOCK(zone);
2907}
2908
2909/* See uma.h */
2910void
2911uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2912{
2913
2914	ZONE_LOCK(zone);
2915	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2916	    ("uma_zone_set_zfini on non-empty keg"));
2917	zone->uz_fini = zfini;
2918	ZONE_UNLOCK(zone);
2919}
2920
2921/* See uma.h */
2922/* XXX uk_freef is not actually used with the zone locked */
2923void
2924uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2925{
2926	uma_keg_t keg;
2927
2928	keg = zone_first_keg(zone);
2929	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2930	KEG_LOCK(keg);
2931	keg->uk_freef = freef;
2932	KEG_UNLOCK(keg);
2933}
2934
2935/* See uma.h */
2936/* XXX uk_allocf is not actually used with the zone locked */
2937void
2938uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2939{
2940	uma_keg_t keg;
2941
2942	keg = zone_first_keg(zone);
2943	KEG_LOCK(keg);
2944	keg->uk_allocf = allocf;
2945	KEG_UNLOCK(keg);
2946}
2947
2948/* See uma.h */
2949void
2950uma_zone_reserve(uma_zone_t zone, int items)
2951{
2952	uma_keg_t keg;
2953
2954	keg = zone_first_keg(zone);
2955	if (keg == NULL)
2956		return;
2957	KEG_LOCK(keg);
2958	keg->uk_reserve = items;
2959	KEG_UNLOCK(keg);
2960
2961	return;
2962}
2963
2964/* See uma.h */
2965int
2966uma_zone_reserve_kva(uma_zone_t zone, int count)
2967{
2968	uma_keg_t keg;
2969	vm_offset_t kva;
2970	int pages;
2971
2972	keg = zone_first_keg(zone);
2973	if (keg == NULL)
2974		return (0);
2975	pages = count / keg->uk_ipers;
2976
2977	if (pages * keg->uk_ipers < count)
2978		pages++;
2979
2980#ifdef UMA_MD_SMALL_ALLOC
2981	if (keg->uk_ppera > 1) {
2982#else
2983	if (1) {
2984#endif
2985		kva = kva_alloc(pages * UMA_SLAB_SIZE);
2986		if (kva == 0)
2987			return (0);
2988	} else
2989		kva = 0;
2990	KEG_LOCK(keg);
2991	keg->uk_kva = kva;
2992	keg->uk_offset = 0;
2993	keg->uk_maxpages = pages;
2994#ifdef UMA_MD_SMALL_ALLOC
2995	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
2996#else
2997	keg->uk_allocf = noobj_alloc;
2998#endif
2999	keg->uk_flags |= UMA_ZONE_NOFREE;
3000	KEG_UNLOCK(keg);
3001
3002	return (1);
3003}
3004
3005/* See uma.h */
3006void
3007uma_prealloc(uma_zone_t zone, int items)
3008{
3009	int slabs;
3010	uma_slab_t slab;
3011	uma_keg_t keg;
3012
3013	keg = zone_first_keg(zone);
3014	if (keg == NULL)
3015		return;
3016	KEG_LOCK(keg);
3017	slabs = items / keg->uk_ipers;
3018	if (slabs * keg->uk_ipers < items)
3019		slabs++;
3020	while (slabs > 0) {
3021		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3022		if (slab == NULL)
3023			break;
3024		MPASS(slab->us_keg == keg);
3025		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3026		slabs--;
3027	}
3028	KEG_UNLOCK(keg);
3029}
3030
3031/* See uma.h */
3032uint32_t *
3033uma_find_refcnt(uma_zone_t zone, void *item)
3034{
3035	uma_slabrefcnt_t slabref;
3036	uma_slab_t slab;
3037	uma_keg_t keg;
3038	uint32_t *refcnt;
3039	int idx;
3040
3041	slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3042	slabref = (uma_slabrefcnt_t)slab;
3043	keg = slab->us_keg;
3044	KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3045	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3046	idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3047	refcnt = &slabref->us_refcnt[idx];
3048	return refcnt;
3049}
3050
3051/* See uma.h */
3052void
3053uma_reclaim(void)
3054{
3055#ifdef UMA_DEBUG
3056	printf("UMA: vm asked us to release pages!\n");
3057#endif
3058	bucket_enable();
3059	zone_foreach(zone_drain);
3060	/*
3061	 * Some slabs may have been freed but this zone will be visited early
3062	 * we visit again so that we can free pages that are empty once other
3063	 * zones are drained.  We have to do the same for buckets.
3064	 */
3065	zone_drain(slabzone);
3066	zone_drain(slabrefzone);
3067	bucket_zone_drain();
3068}
3069
3070/* See uma.h */
3071int
3072uma_zone_exhausted(uma_zone_t zone)
3073{
3074	int full;
3075
3076	ZONE_LOCK(zone);
3077	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3078	ZONE_UNLOCK(zone);
3079	return (full);
3080}
3081
3082int
3083uma_zone_exhausted_nolock(uma_zone_t zone)
3084{
3085	return (zone->uz_flags & UMA_ZFLAG_FULL);
3086}
3087
3088void *
3089uma_large_malloc(int size, int wait)
3090{
3091	void *mem;
3092	uma_slab_t slab;
3093	uint8_t flags;
3094
3095	slab = zone_alloc_item(slabzone, NULL, wait);
3096	if (slab == NULL)
3097		return (NULL);
3098	mem = page_alloc(NULL, size, &flags, wait);
3099	if (mem) {
3100		vsetslab((vm_offset_t)mem, slab);
3101		slab->us_data = mem;
3102		slab->us_flags = flags | UMA_SLAB_MALLOC;
3103		slab->us_size = size;
3104	} else {
3105		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3106	}
3107
3108	return (mem);
3109}
3110
3111void
3112uma_large_free(uma_slab_t slab)
3113{
3114
3115	page_free(slab->us_data, slab->us_size, slab->us_flags);
3116	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3117}
3118
3119void
3120uma_print_stats(void)
3121{
3122	zone_foreach(uma_print_zone);
3123}
3124
3125static void
3126slab_print(uma_slab_t slab)
3127{
3128	printf("slab: keg %p, data %p, freecount %d\n",
3129		slab->us_keg, slab->us_data, slab->us_freecount);
3130}
3131
3132static void
3133cache_print(uma_cache_t cache)
3134{
3135	printf("alloc: %p(%d), free: %p(%d)\n",
3136		cache->uc_allocbucket,
3137		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3138		cache->uc_freebucket,
3139		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3140}
3141
3142static void
3143uma_print_keg(uma_keg_t keg)
3144{
3145	uma_slab_t slab;
3146
3147	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3148	    "out %d free %d limit %d\n",
3149	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3150	    keg->uk_ipers, keg->uk_ppera,
3151	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3152	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3153	printf("Part slabs:\n");
3154	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3155		slab_print(slab);
3156	printf("Free slabs:\n");
3157	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3158		slab_print(slab);
3159	printf("Full slabs:\n");
3160	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3161		slab_print(slab);
3162}
3163
3164void
3165uma_print_zone(uma_zone_t zone)
3166{
3167	uma_cache_t cache;
3168	uma_klink_t kl;
3169	int i;
3170
3171	printf("zone: %s(%p) size %d flags %#x\n",
3172	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3173	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3174		uma_print_keg(kl->kl_keg);
3175	CPU_FOREACH(i) {
3176		cache = &zone->uz_cpu[i];
3177		printf("CPU %d Cache:\n", i);
3178		cache_print(cache);
3179	}
3180}
3181
3182#ifdef DDB
3183/*
3184 * Generate statistics across both the zone and its per-cpu cache's.  Return
3185 * desired statistics if the pointer is non-NULL for that statistic.
3186 *
3187 * Note: does not update the zone statistics, as it can't safely clear the
3188 * per-CPU cache statistic.
3189 *
3190 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3191 * safe from off-CPU; we should modify the caches to track this information
3192 * directly so that we don't have to.
3193 */
3194static void
3195uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3196    uint64_t *freesp, uint64_t *sleepsp)
3197{
3198	uma_cache_t cache;
3199	uint64_t allocs, frees, sleeps;
3200	int cachefree, cpu;
3201
3202	allocs = frees = sleeps = 0;
3203	cachefree = 0;
3204	CPU_FOREACH(cpu) {
3205		cache = &z->uz_cpu[cpu];
3206		if (cache->uc_allocbucket != NULL)
3207			cachefree += cache->uc_allocbucket->ub_cnt;
3208		if (cache->uc_freebucket != NULL)
3209			cachefree += cache->uc_freebucket->ub_cnt;
3210		allocs += cache->uc_allocs;
3211		frees += cache->uc_frees;
3212	}
3213	allocs += z->uz_allocs;
3214	frees += z->uz_frees;
3215	sleeps += z->uz_sleeps;
3216	if (cachefreep != NULL)
3217		*cachefreep = cachefree;
3218	if (allocsp != NULL)
3219		*allocsp = allocs;
3220	if (freesp != NULL)
3221		*freesp = frees;
3222	if (sleepsp != NULL)
3223		*sleepsp = sleeps;
3224}
3225#endif /* DDB */
3226
3227static int
3228sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3229{
3230	uma_keg_t kz;
3231	uma_zone_t z;
3232	int count;
3233
3234	count = 0;
3235	mtx_lock(&uma_mtx);
3236	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3237		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3238			count++;
3239	}
3240	mtx_unlock(&uma_mtx);
3241	return (sysctl_handle_int(oidp, &count, 0, req));
3242}
3243
3244static int
3245sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3246{
3247	struct uma_stream_header ush;
3248	struct uma_type_header uth;
3249	struct uma_percpu_stat ups;
3250	uma_bucket_t bucket;
3251	struct sbuf sbuf;
3252	uma_cache_t cache;
3253	uma_klink_t kl;
3254	uma_keg_t kz;
3255	uma_zone_t z;
3256	uma_keg_t k;
3257	int count, error, i;
3258
3259	error = sysctl_wire_old_buffer(req, 0);
3260	if (error != 0)
3261		return (error);
3262	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3263
3264	count = 0;
3265	mtx_lock(&uma_mtx);
3266	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3267		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3268			count++;
3269	}
3270
3271	/*
3272	 * Insert stream header.
3273	 */
3274	bzero(&ush, sizeof(ush));
3275	ush.ush_version = UMA_STREAM_VERSION;
3276	ush.ush_maxcpus = (mp_maxid + 1);
3277	ush.ush_count = count;
3278	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3279
3280	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3281		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3282			bzero(&uth, sizeof(uth));
3283			ZONE_LOCK(z);
3284			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3285			uth.uth_align = kz->uk_align;
3286			uth.uth_size = kz->uk_size;
3287			uth.uth_rsize = kz->uk_rsize;
3288			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3289				k = kl->kl_keg;
3290				uth.uth_maxpages += k->uk_maxpages;
3291				uth.uth_pages += k->uk_pages;
3292				uth.uth_keg_free += k->uk_free;
3293				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3294				    * k->uk_ipers;
3295			}
3296
3297			/*
3298			 * A zone is secondary is it is not the first entry
3299			 * on the keg's zone list.
3300			 */
3301			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3302			    (LIST_FIRST(&kz->uk_zones) != z))
3303				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3304
3305			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3306				uth.uth_zone_free += bucket->ub_cnt;
3307			uth.uth_allocs = z->uz_allocs;
3308			uth.uth_frees = z->uz_frees;
3309			uth.uth_fails = z->uz_fails;
3310			uth.uth_sleeps = z->uz_sleeps;
3311			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3312			/*
3313			 * While it is not normally safe to access the cache
3314			 * bucket pointers while not on the CPU that owns the
3315			 * cache, we only allow the pointers to be exchanged
3316			 * without the zone lock held, not invalidated, so
3317			 * accept the possible race associated with bucket
3318			 * exchange during monitoring.
3319			 */
3320			for (i = 0; i < (mp_maxid + 1); i++) {
3321				bzero(&ups, sizeof(ups));
3322				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3323					goto skip;
3324				if (CPU_ABSENT(i))
3325					goto skip;
3326				cache = &z->uz_cpu[i];
3327				if (cache->uc_allocbucket != NULL)
3328					ups.ups_cache_free +=
3329					    cache->uc_allocbucket->ub_cnt;
3330				if (cache->uc_freebucket != NULL)
3331					ups.ups_cache_free +=
3332					    cache->uc_freebucket->ub_cnt;
3333				ups.ups_allocs = cache->uc_allocs;
3334				ups.ups_frees = cache->uc_frees;
3335skip:
3336				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3337			}
3338			ZONE_UNLOCK(z);
3339		}
3340	}
3341	mtx_unlock(&uma_mtx);
3342	error = sbuf_finish(&sbuf);
3343	sbuf_delete(&sbuf);
3344	return (error);
3345}
3346
3347#ifdef DDB
3348DB_SHOW_COMMAND(uma, db_show_uma)
3349{
3350	uint64_t allocs, frees, sleeps;
3351	uma_bucket_t bucket;
3352	uma_keg_t kz;
3353	uma_zone_t z;
3354	int cachefree;
3355
3356	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3357	    "Requests", "Sleeps");
3358	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3359		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3360			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3361				allocs = z->uz_allocs;
3362				frees = z->uz_frees;
3363				sleeps = z->uz_sleeps;
3364				cachefree = 0;
3365			} else
3366				uma_zone_sumstat(z, &cachefree, &allocs,
3367				    &frees, &sleeps);
3368			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3369			    (LIST_FIRST(&kz->uk_zones) != z)))
3370				cachefree += kz->uk_free;
3371			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3372				cachefree += bucket->ub_cnt;
3373			db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3374			    (uintmax_t)kz->uk_size,
3375			    (intmax_t)(allocs - frees), cachefree,
3376			    (uintmax_t)allocs, sleeps);
3377			if (db_pager_quit)
3378				return;
3379		}
3380	}
3381}
3382#endif
3383