1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 *	The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35/*
36 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
37 * based on memory types.  Back end is implemented using the UMA(9) zone
38 * allocator.  A set of fixed-size buckets are used for smaller allocations,
39 * and a special UMA allocation interface is used for larger allocations.
40 * Callers declare memory types, and statistics are maintained independently
41 * for each memory type.  Statistics are maintained per-CPU for performance
42 * reasons.  See malloc(9) and comments in malloc.h for a detailed
43 * description.
44 */
45
46#include <sys/cdefs.h>
47#include "opt_ddb.h"
48#include "opt_vm.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/asan.h>
53#include <sys/kdb.h>
54#include <sys/kernel.h>
55#include <sys/lock.h>
56#include <sys/malloc.h>
57#include <sys/msan.h>
58#include <sys/mutex.h>
59#include <sys/vmmeter.h>
60#include <sys/proc.h>
61#include <sys/queue.h>
62#include <sys/sbuf.h>
63#include <sys/smp.h>
64#include <sys/sysctl.h>
65#include <sys/time.h>
66#include <sys/vmem.h>
67#ifdef EPOCH_TRACE
68#include <sys/epoch.h>
69#endif
70
71#include <vm/vm.h>
72#include <vm/pmap.h>
73#include <vm/vm_domainset.h>
74#include <vm/vm_pageout.h>
75#include <vm/vm_param.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_map.h>
79#include <vm/vm_page.h>
80#include <vm/vm_phys.h>
81#include <vm/vm_pagequeue.h>
82#include <vm/uma.h>
83#include <vm/uma_int.h>
84#include <vm/uma_dbg.h>
85
86#ifdef DEBUG_MEMGUARD
87#include <vm/memguard.h>
88#endif
89#ifdef DEBUG_REDZONE
90#include <vm/redzone.h>
91#endif
92
93#if defined(INVARIANTS) && defined(__i386__)
94#include <machine/cpu.h>
95#endif
96
97#include <ddb/ddb.h>
98
99#ifdef KDTRACE_HOOKS
100#include <sys/dtrace_bsd.h>
101
102bool	__read_frequently			dtrace_malloc_enabled;
103dtrace_malloc_probe_func_t __read_mostly	dtrace_malloc_probe;
104#endif
105
106#if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
107    defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
108#define	MALLOC_DEBUG	1
109#endif
110
111#if defined(KASAN) || defined(DEBUG_REDZONE)
112#define	DEBUG_REDZONE_ARG_DEF	, unsigned long osize
113#define	DEBUG_REDZONE_ARG	, osize
114#else
115#define	DEBUG_REDZONE_ARG_DEF
116#define	DEBUG_REDZONE_ARG
117#endif
118
119/*
120 * When realloc() is called, if the new size is sufficiently smaller than
121 * the old size, realloc() will allocate a new, smaller block to avoid
122 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
123 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
124 */
125#ifndef REALLOC_FRACTION
126#define	REALLOC_FRACTION	1	/* new block if <= half the size */
127#endif
128
129/*
130 * Centrally define some common malloc types.
131 */
132MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
133MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
134MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
135
136static struct malloc_type *kmemstatistics;
137static int kmemcount;
138
139#define KMEM_ZSHIFT	4
140#define KMEM_ZBASE	16
141#define KMEM_ZMASK	(KMEM_ZBASE - 1)
142
143#define KMEM_ZMAX	65536
144#define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
145static uint8_t kmemsize[KMEM_ZSIZE + 1];
146
147#ifndef MALLOC_DEBUG_MAXZONES
148#define	MALLOC_DEBUG_MAXZONES	1
149#endif
150static int numzones = MALLOC_DEBUG_MAXZONES;
151
152/*
153 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
154 * of various sizes.
155 *
156 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
157 *
158 * XXX: The comment here used to read "These won't be powers of two for
159 * long."  It's possible that a significant amount of wasted memory could be
160 * recovered by tuning the sizes of these buckets.
161 */
162struct {
163	int kz_size;
164	const char *kz_name;
165	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
166} kmemzones[] = {
167	{16, "malloc-16", },
168	{32, "malloc-32", },
169	{64, "malloc-64", },
170	{128, "malloc-128", },
171	{256, "malloc-256", },
172	{384, "malloc-384", },
173	{512, "malloc-512", },
174	{1024, "malloc-1024", },
175	{2048, "malloc-2048", },
176	{4096, "malloc-4096", },
177	{8192, "malloc-8192", },
178	{16384, "malloc-16384", },
179	{32768, "malloc-32768", },
180	{65536, "malloc-65536", },
181	{0, NULL},
182};
183
184u_long vm_kmem_size;
185SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
186    "Size of kernel memory");
187
188static u_long kmem_zmax = KMEM_ZMAX;
189SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
190    "Maximum allocation size that malloc(9) would use UMA as backend");
191
192static u_long vm_kmem_size_min;
193SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
194    "Minimum size of kernel memory");
195
196static u_long vm_kmem_size_max;
197SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
198    "Maximum size of kernel memory");
199
200static u_int vm_kmem_size_scale;
201SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
202    "Scale factor for kernel memory size");
203
204static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
205SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
206    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
207    sysctl_kmem_map_size, "LU", "Current kmem allocation size");
208
209static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
210SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
211    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
212    sysctl_kmem_map_free, "LU", "Free space in kmem");
213
214static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
215    "Malloc information");
216
217static u_int vm_malloc_zone_count = nitems(kmemzones);
218SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
219    CTLFLAG_RD, &vm_malloc_zone_count, 0,
220    "Number of malloc zones");
221
222static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
223SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
224    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
225    sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
226
227/*
228 * The malloc_mtx protects the kmemstatistics linked list.
229 */
230struct mtx malloc_mtx;
231
232static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
233
234#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
235static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
236    "Kernel malloc debugging options");
237#endif
238
239/*
240 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
241 * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
242 */
243#ifdef MALLOC_MAKE_FAILURES
244static int malloc_failure_rate;
245static int malloc_nowait_count;
246static int malloc_failure_count;
247SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
248    &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
249SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
250    &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
251#endif
252
253static int
254sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
255{
256	u_long size;
257
258	size = uma_size();
259	return (sysctl_handle_long(oidp, &size, 0, req));
260}
261
262static int
263sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
264{
265	u_long size, limit;
266
267	/* The sysctl is unsigned, implement as a saturation value. */
268	size = uma_size();
269	limit = uma_limit();
270	if (size > limit)
271		size = 0;
272	else
273		size = limit - size;
274	return (sysctl_handle_long(oidp, &size, 0, req));
275}
276
277static int
278sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
279{
280	int sizes[nitems(kmemzones)];
281	int i;
282
283	for (i = 0; i < nitems(kmemzones); i++) {
284		sizes[i] = kmemzones[i].kz_size;
285	}
286
287	return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
288}
289
290/*
291 * malloc(9) uma zone separation -- sub-page buffer overruns in one
292 * malloc type will affect only a subset of other malloc types.
293 */
294#if MALLOC_DEBUG_MAXZONES > 1
295static void
296tunable_set_numzones(void)
297{
298
299	TUNABLE_INT_FETCH("debug.malloc.numzones",
300	    &numzones);
301
302	/* Sanity check the number of malloc uma zones. */
303	if (numzones <= 0)
304		numzones = 1;
305	if (numzones > MALLOC_DEBUG_MAXZONES)
306		numzones = MALLOC_DEBUG_MAXZONES;
307}
308SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
309SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
310    &numzones, 0, "Number of malloc uma subzones");
311
312/*
313 * Any number that changes regularly is an okay choice for the
314 * offset.  Build numbers are pretty good of you have them.
315 */
316static u_int zone_offset = __FreeBSD_version;
317TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
318SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
319    &zone_offset, 0, "Separate malloc types by examining the "
320    "Nth character in the malloc type short description.");
321
322static void
323mtp_set_subzone(struct malloc_type *mtp)
324{
325	struct malloc_type_internal *mtip;
326	const char *desc;
327	size_t len;
328	u_int val;
329
330	mtip = &mtp->ks_mti;
331	desc = mtp->ks_shortdesc;
332	if (desc == NULL || (len = strlen(desc)) == 0)
333		val = 0;
334	else
335		val = desc[zone_offset % len];
336	mtip->mti_zone = (val % numzones);
337}
338
339static inline u_int
340mtp_get_subzone(struct malloc_type *mtp)
341{
342	struct malloc_type_internal *mtip;
343
344	mtip = &mtp->ks_mti;
345
346	KASSERT(mtip->mti_zone < numzones,
347	    ("mti_zone %u out of range %d",
348	    mtip->mti_zone, numzones));
349	return (mtip->mti_zone);
350}
351#elif MALLOC_DEBUG_MAXZONES == 0
352#error "MALLOC_DEBUG_MAXZONES must be positive."
353#else
354static void
355mtp_set_subzone(struct malloc_type *mtp)
356{
357	struct malloc_type_internal *mtip;
358
359	mtip = &mtp->ks_mti;
360	mtip->mti_zone = 0;
361}
362
363static inline u_int
364mtp_get_subzone(struct malloc_type *mtp)
365{
366
367	return (0);
368}
369#endif /* MALLOC_DEBUG_MAXZONES > 1 */
370
371/*
372 * An allocation has succeeded -- update malloc type statistics for the
373 * amount of bucket size.  Occurs within a critical section so that the
374 * thread isn't preempted and doesn't migrate while updating per-PCU
375 * statistics.
376 */
377static void
378malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
379    int zindx)
380{
381	struct malloc_type_internal *mtip;
382	struct malloc_type_stats *mtsp;
383
384	critical_enter();
385	mtip = &mtp->ks_mti;
386	mtsp = zpcpu_get(mtip->mti_stats);
387	if (size > 0) {
388		mtsp->mts_memalloced += size;
389		mtsp->mts_numallocs++;
390	}
391	if (zindx != -1)
392		mtsp->mts_size |= 1 << zindx;
393
394#ifdef KDTRACE_HOOKS
395	if (__predict_false(dtrace_malloc_enabled)) {
396		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
397		if (probe_id != 0)
398			(dtrace_malloc_probe)(probe_id,
399			    (uintptr_t) mtp, (uintptr_t) mtip,
400			    (uintptr_t) mtsp, size, zindx);
401	}
402#endif
403
404	critical_exit();
405}
406
407void
408malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
409{
410
411	if (size > 0)
412		malloc_type_zone_allocated(mtp, size, -1);
413}
414
415/*
416 * A free operation has occurred -- update malloc type statistics for the
417 * amount of the bucket size.  Occurs within a critical section so that the
418 * thread isn't preempted and doesn't migrate while updating per-CPU
419 * statistics.
420 */
421void
422malloc_type_freed(struct malloc_type *mtp, unsigned long size)
423{
424	struct malloc_type_internal *mtip;
425	struct malloc_type_stats *mtsp;
426
427	critical_enter();
428	mtip = &mtp->ks_mti;
429	mtsp = zpcpu_get(mtip->mti_stats);
430	mtsp->mts_memfreed += size;
431	mtsp->mts_numfrees++;
432
433#ifdef KDTRACE_HOOKS
434	if (__predict_false(dtrace_malloc_enabled)) {
435		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
436		if (probe_id != 0)
437			(dtrace_malloc_probe)(probe_id,
438			    (uintptr_t) mtp, (uintptr_t) mtip,
439			    (uintptr_t) mtsp, size, 0);
440	}
441#endif
442
443	critical_exit();
444}
445
446/*
447 *	contigmalloc:
448 *
449 *	Allocate a block of physically contiguous memory.
450 *
451 *	If M_NOWAIT is set, this routine will not block and return NULL if
452 *	the allocation fails.
453 */
454void *
455contigmalloc(unsigned long size, struct malloc_type *type, int flags,
456    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
457    vm_paddr_t boundary)
458{
459	void *ret;
460
461	ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
462	    boundary, VM_MEMATTR_DEFAULT);
463	if (ret != NULL)
464		malloc_type_allocated(type, round_page(size));
465	return (ret);
466}
467
468void *
469contigmalloc_domainset(unsigned long size, struct malloc_type *type,
470    struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
471    unsigned long alignment, vm_paddr_t boundary)
472{
473	void *ret;
474
475	ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
476	    alignment, boundary, VM_MEMATTR_DEFAULT);
477	if (ret != NULL)
478		malloc_type_allocated(type, round_page(size));
479	return (ret);
480}
481
482/*
483 *	contigfree:
484 *
485 *	Free a block of memory allocated by contigmalloc.
486 *
487 *	This routine may not block.
488 */
489void
490contigfree(void *addr, unsigned long size, struct malloc_type *type)
491{
492
493	kmem_free(addr, size);
494	malloc_type_freed(type, round_page(size));
495}
496
497#ifdef MALLOC_DEBUG
498static int
499malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
500    int flags)
501{
502#ifdef INVARIANTS
503	int indx;
504
505	KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
506	/*
507	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
508	 */
509	indx = flags & (M_WAITOK | M_NOWAIT);
510	if (indx != M_NOWAIT && indx != M_WAITOK) {
511		static	struct timeval lasterr;
512		static	int curerr, once;
513		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
514			printf("Bad malloc flags: %x\n", indx);
515			kdb_backtrace();
516			flags |= M_WAITOK;
517			once++;
518		}
519	}
520#endif
521#ifdef MALLOC_MAKE_FAILURES
522	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
523		atomic_add_int(&malloc_nowait_count, 1);
524		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
525			atomic_add_int(&malloc_failure_count, 1);
526			*vap = NULL;
527			return (EJUSTRETURN);
528		}
529	}
530#endif
531	if (flags & M_WAITOK) {
532		KASSERT(curthread->td_intr_nesting_level == 0,
533		   ("malloc(M_WAITOK) in interrupt context"));
534		if (__predict_false(!THREAD_CAN_SLEEP())) {
535#ifdef EPOCH_TRACE
536			epoch_trace_list(curthread);
537#endif
538			KASSERT(0,
539			    ("malloc(M_WAITOK) with sleeping prohibited"));
540		}
541	}
542	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
543	    ("malloc: called with spinlock or critical section held"));
544
545#ifdef DEBUG_MEMGUARD
546	if (memguard_cmp_mtp(mtp, *sizep)) {
547		*vap = memguard_alloc(*sizep, flags);
548		if (*vap != NULL)
549			return (EJUSTRETURN);
550		/* This is unfortunate but should not be fatal. */
551	}
552#endif
553
554#ifdef DEBUG_REDZONE
555	*sizep = redzone_size_ntor(*sizep);
556#endif
557
558	return (0);
559}
560#endif
561
562/*
563 * Handle large allocations and frees by using kmem_malloc directly.
564 */
565static inline bool
566malloc_large_slab(uma_slab_t slab)
567{
568	uintptr_t va;
569
570	va = (uintptr_t)slab;
571	return ((va & 1) != 0);
572}
573
574static inline size_t
575malloc_large_size(uma_slab_t slab)
576{
577	uintptr_t va;
578
579	va = (uintptr_t)slab;
580	return (va >> 1);
581}
582
583static caddr_t __noinline
584malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
585    int flags DEBUG_REDZONE_ARG_DEF)
586{
587	void *va;
588
589	size = roundup(size, PAGE_SIZE);
590	va = kmem_malloc_domainset(policy, size, flags);
591	if (va != NULL) {
592		/* The low bit is unused for slab pointers. */
593		vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
594		uma_total_inc(size);
595	}
596	malloc_type_allocated(mtp, va == NULL ? 0 : size);
597	if (__predict_false(va == NULL)) {
598		KASSERT((flags & M_WAITOK) == 0,
599		    ("malloc(M_WAITOK) returned NULL"));
600	} else {
601#ifdef DEBUG_REDZONE
602		va = redzone_setup(va, osize);
603#endif
604		kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
605	}
606	return (va);
607}
608
609static void
610free_large(void *addr, size_t size)
611{
612
613	kmem_free(addr, size);
614	uma_total_dec(size);
615}
616
617/*
618 *	malloc:
619 *
620 *	Allocate a block of memory.
621 *
622 *	If M_NOWAIT is set, this routine will not block and return NULL if
623 *	the allocation fails.
624 */
625void *
626(malloc)(size_t size, struct malloc_type *mtp, int flags)
627{
628	int indx;
629	caddr_t va;
630	uma_zone_t zone;
631#if defined(DEBUG_REDZONE) || defined(KASAN)
632	unsigned long osize = size;
633#endif
634
635	MPASS((flags & M_EXEC) == 0);
636
637#ifdef MALLOC_DEBUG
638	va = NULL;
639	if (malloc_dbg(&va, &size, mtp, flags) != 0)
640		return (va);
641#endif
642
643	if (__predict_false(size > kmem_zmax))
644		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
645		    DEBUG_REDZONE_ARG));
646
647	if (size & KMEM_ZMASK)
648		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
649	indx = kmemsize[size >> KMEM_ZSHIFT];
650	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
651	va = uma_zalloc_arg(zone, zone, flags);
652	if (va != NULL) {
653		size = zone->uz_size;
654		if ((flags & M_ZERO) == 0) {
655			kmsan_mark(va, size, KMSAN_STATE_UNINIT);
656			kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
657		}
658	}
659	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
660	if (__predict_false(va == NULL)) {
661		KASSERT((flags & M_WAITOK) == 0,
662		    ("malloc(M_WAITOK) returned NULL"));
663	}
664#ifdef DEBUG_REDZONE
665	if (va != NULL)
666		va = redzone_setup(va, osize);
667#endif
668#ifdef KASAN
669	if (va != NULL)
670		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
671#endif
672	return ((void *) va);
673}
674
675static void *
676malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
677    int flags)
678{
679	uma_zone_t zone;
680	caddr_t va;
681	size_t size;
682	int indx;
683
684	size = *sizep;
685	KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
686	    ("malloc_domain: Called with bad flag / size combination."));
687	if (size & KMEM_ZMASK)
688		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
689	indx = kmemsize[size >> KMEM_ZSHIFT];
690	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
691	va = uma_zalloc_domain(zone, zone, domain, flags);
692	if (va != NULL)
693		*sizep = zone->uz_size;
694	*indxp = indx;
695	return ((void *)va);
696}
697
698void *
699malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
700    int flags)
701{
702	struct vm_domainset_iter di;
703	caddr_t va;
704	int domain;
705	int indx;
706#if defined(KASAN) || defined(DEBUG_REDZONE)
707	unsigned long osize = size;
708#endif
709
710	MPASS((flags & M_EXEC) == 0);
711
712#ifdef MALLOC_DEBUG
713	va = NULL;
714	if (malloc_dbg(&va, &size, mtp, flags) != 0)
715		return (va);
716#endif
717
718	if (__predict_false(size > kmem_zmax))
719		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
720		    DEBUG_REDZONE_ARG));
721
722	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
723	do {
724		va = malloc_domain(&size, &indx, mtp, domain, flags);
725	} while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
726	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
727	if (__predict_false(va == NULL)) {
728		KASSERT((flags & M_WAITOK) == 0,
729		    ("malloc(M_WAITOK) returned NULL"));
730	}
731#ifdef DEBUG_REDZONE
732	if (va != NULL)
733		va = redzone_setup(va, osize);
734#endif
735#ifdef KASAN
736	if (va != NULL)
737		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
738#endif
739#ifdef KMSAN
740	if ((flags & M_ZERO) == 0) {
741		kmsan_mark(va, size, KMSAN_STATE_UNINIT);
742		kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
743	}
744#endif
745	return (va);
746}
747
748/*
749 * Allocate an executable area.
750 */
751void *
752malloc_exec(size_t size, struct malloc_type *mtp, int flags)
753{
754
755	return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
756}
757
758void *
759malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
760    int flags)
761{
762#if defined(DEBUG_REDZONE) || defined(KASAN)
763	unsigned long osize = size;
764#endif
765#ifdef MALLOC_DEBUG
766	caddr_t va;
767#endif
768
769	flags |= M_EXEC;
770
771#ifdef MALLOC_DEBUG
772	va = NULL;
773	if (malloc_dbg(&va, &size, mtp, flags) != 0)
774		return (va);
775#endif
776
777	return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
778}
779
780void *
781malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
782{
783	return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
784	    flags));
785}
786
787void *
788malloc_domainset_aligned(size_t size, size_t align,
789    struct malloc_type *mtp, struct domainset *ds, int flags)
790{
791	void *res;
792	size_t asize;
793
794	KASSERT(powerof2(align),
795	    ("malloc_domainset_aligned: wrong align %#zx size %#zx",
796	    align, size));
797	KASSERT(align <= PAGE_SIZE,
798	    ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
799	    align, size));
800
801	/*
802	 * Round the allocation size up to the next power of 2,
803	 * because we can only guarantee alignment for
804	 * power-of-2-sized allocations.  Further increase the
805	 * allocation size to align if the rounded size is less than
806	 * align, since malloc zones provide alignment equal to their
807	 * size.
808	 */
809	if (size == 0)
810		size = 1;
811	asize = size <= align ? align : 1UL << flsl(size - 1);
812
813	res = malloc_domainset(asize, mtp, ds, flags);
814	KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
815	    ("malloc_domainset_aligned: result not aligned %p size %#zx "
816	    "allocsize %#zx align %#zx", res, size, asize, align));
817	return (res);
818}
819
820void *
821mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
822{
823
824	if (WOULD_OVERFLOW(nmemb, size))
825		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
826
827	return (malloc(size * nmemb, type, flags));
828}
829
830void *
831mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
832    struct domainset *ds, int flags)
833{
834
835	if (WOULD_OVERFLOW(nmemb, size))
836		panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
837
838	return (malloc_domainset(size * nmemb, type, ds, flags));
839}
840
841#if defined(INVARIANTS) && !defined(KASAN)
842static void
843free_save_type(void *addr, struct malloc_type *mtp, u_long size)
844{
845	struct malloc_type **mtpp = addr;
846
847	/*
848	 * Cache a pointer to the malloc_type that most recently freed
849	 * this memory here.  This way we know who is most likely to
850	 * have stepped on it later.
851	 *
852	 * This code assumes that size is a multiple of 8 bytes for
853	 * 64 bit machines
854	 */
855	mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
856	mtpp += (size - sizeof(struct malloc_type *)) /
857	    sizeof(struct malloc_type *);
858	*mtpp = mtp;
859}
860#endif
861
862#ifdef MALLOC_DEBUG
863static int
864free_dbg(void **addrp, struct malloc_type *mtp)
865{
866	void *addr;
867
868	addr = *addrp;
869	KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
870	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
871	    ("free: called with spinlock or critical section held"));
872
873	/* free(NULL, ...) does nothing */
874	if (addr == NULL)
875		return (EJUSTRETURN);
876
877#ifdef DEBUG_MEMGUARD
878	if (is_memguard_addr(addr)) {
879		memguard_free(addr);
880		return (EJUSTRETURN);
881	}
882#endif
883
884#ifdef DEBUG_REDZONE
885	redzone_check(addr);
886	*addrp = redzone_addr_ntor(addr);
887#endif
888
889	return (0);
890}
891#endif
892
893/*
894 *	free:
895 *
896 *	Free a block of memory allocated by malloc.
897 *
898 *	This routine may not block.
899 */
900void
901free(void *addr, struct malloc_type *mtp)
902{
903	uma_zone_t zone;
904	uma_slab_t slab;
905	u_long size;
906
907#ifdef MALLOC_DEBUG
908	if (free_dbg(&addr, mtp) != 0)
909		return;
910#endif
911	/* free(NULL, ...) does nothing */
912	if (addr == NULL)
913		return;
914
915	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
916	if (slab == NULL)
917		panic("free: address %p(%p) has not been allocated.\n",
918		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
919
920	if (__predict_true(!malloc_large_slab(slab))) {
921		size = zone->uz_size;
922#if defined(INVARIANTS) && !defined(KASAN)
923		free_save_type(addr, mtp, size);
924#endif
925		uma_zfree_arg(zone, addr, slab);
926	} else {
927		size = malloc_large_size(slab);
928		free_large(addr, size);
929	}
930	malloc_type_freed(mtp, size);
931}
932
933/*
934 *	zfree:
935 *
936 *	Zero then free a block of memory allocated by malloc.
937 *
938 *	This routine may not block.
939 */
940void
941zfree(void *addr, struct malloc_type *mtp)
942{
943	uma_zone_t zone;
944	uma_slab_t slab;
945	u_long size;
946
947#ifdef MALLOC_DEBUG
948	if (free_dbg(&addr, mtp) != 0)
949		return;
950#endif
951	/* free(NULL, ...) does nothing */
952	if (addr == NULL)
953		return;
954
955	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
956	if (slab == NULL)
957		panic("free: address %p(%p) has not been allocated.\n",
958		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
959
960	if (__predict_true(!malloc_large_slab(slab))) {
961		size = zone->uz_size;
962#if defined(INVARIANTS) && !defined(KASAN)
963		free_save_type(addr, mtp, size);
964#endif
965		kasan_mark(addr, size, size, 0);
966		explicit_bzero(addr, size);
967		uma_zfree_arg(zone, addr, slab);
968	} else {
969		size = malloc_large_size(slab);
970		kasan_mark(addr, size, size, 0);
971		explicit_bzero(addr, size);
972		free_large(addr, size);
973	}
974	malloc_type_freed(mtp, size);
975}
976
977/*
978 *	realloc: change the size of a memory block
979 */
980void *
981realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
982{
983#ifndef DEBUG_REDZONE
984	uma_zone_t zone;
985	uma_slab_t slab;
986#endif
987	unsigned long alloc;
988	void *newaddr;
989
990	KASSERT(mtp->ks_version == M_VERSION,
991	    ("realloc: bad malloc type version"));
992	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
993	    ("realloc: called with spinlock or critical section held"));
994
995	/* realloc(NULL, ...) is equivalent to malloc(...) */
996	if (addr == NULL)
997		return (malloc(size, mtp, flags));
998
999	/*
1000	 * XXX: Should report free of old memory and alloc of new memory to
1001	 * per-CPU stats.
1002	 */
1003
1004#ifdef DEBUG_MEMGUARD
1005	if (is_memguard_addr(addr))
1006		return (memguard_realloc(addr, size, mtp, flags));
1007#endif
1008
1009#ifdef DEBUG_REDZONE
1010	alloc = redzone_get_size(addr);
1011#else
1012	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1013
1014	/* Sanity check */
1015	KASSERT(slab != NULL,
1016	    ("realloc: address %p out of range", (void *)addr));
1017
1018	/* Get the size of the original block */
1019	if (!malloc_large_slab(slab))
1020		alloc = zone->uz_size;
1021	else
1022		alloc = malloc_large_size(slab);
1023
1024	/* Reuse the original block if appropriate */
1025	if (size <= alloc &&
1026	    (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1027		kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1028		return (addr);
1029	}
1030#endif /* !DEBUG_REDZONE */
1031
1032	/* Allocate a new, bigger (or smaller) block */
1033	if ((newaddr = malloc(size, mtp, flags)) == NULL)
1034		return (NULL);
1035
1036	/*
1037	 * Copy over original contents.  For KASAN, the redzone must be marked
1038	 * valid before performing the copy.
1039	 */
1040	kasan_mark(addr, alloc, alloc, 0);
1041	bcopy(addr, newaddr, min(size, alloc));
1042	free(addr, mtp);
1043	return (newaddr);
1044}
1045
1046/*
1047 *	reallocf: same as realloc() but free memory on failure.
1048 */
1049void *
1050reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1051{
1052	void *mem;
1053
1054	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1055		free(addr, mtp);
1056	return (mem);
1057}
1058
1059/*
1060 * 	malloc_size: returns the number of bytes allocated for a request of the
1061 * 		     specified size
1062 */
1063size_t
1064malloc_size(size_t size)
1065{
1066	int indx;
1067
1068	if (size > kmem_zmax)
1069		return (0);
1070	if (size & KMEM_ZMASK)
1071		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1072	indx = kmemsize[size >> KMEM_ZSHIFT];
1073	return (kmemzones[indx].kz_size);
1074}
1075
1076/*
1077 *	malloc_usable_size: returns the usable size of the allocation.
1078 */
1079size_t
1080malloc_usable_size(const void *addr)
1081{
1082#ifndef DEBUG_REDZONE
1083	uma_zone_t zone;
1084	uma_slab_t slab;
1085#endif
1086	u_long size;
1087
1088	if (addr == NULL)
1089		return (0);
1090
1091#ifdef DEBUG_MEMGUARD
1092	if (is_memguard_addr(__DECONST(void *, addr)))
1093		return (memguard_get_req_size(addr));
1094#endif
1095
1096#ifdef DEBUG_REDZONE
1097	size = redzone_get_size(__DECONST(void *, addr));
1098#else
1099	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1100	if (slab == NULL)
1101		panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1102		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1103
1104	if (!malloc_large_slab(slab))
1105		size = zone->uz_size;
1106	else
1107		size = malloc_large_size(slab);
1108#endif
1109
1110	/*
1111	 * Unmark the redzone to avoid reports from consumers who are
1112	 * (presumably) about to use the full allocation size.
1113	 */
1114	kasan_mark(addr, size, size, 0);
1115
1116	return (size);
1117}
1118
1119CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1120
1121/*
1122 * Initialize the kernel memory (kmem) arena.
1123 */
1124void
1125kmeminit(void)
1126{
1127	u_long mem_size;
1128	u_long tmp;
1129
1130#ifdef VM_KMEM_SIZE
1131	if (vm_kmem_size == 0)
1132		vm_kmem_size = VM_KMEM_SIZE;
1133#endif
1134#ifdef VM_KMEM_SIZE_MIN
1135	if (vm_kmem_size_min == 0)
1136		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1137#endif
1138#ifdef VM_KMEM_SIZE_MAX
1139	if (vm_kmem_size_max == 0)
1140		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1141#endif
1142	/*
1143	 * Calculate the amount of kernel virtual address (KVA) space that is
1144	 * preallocated to the kmem arena.  In order to support a wide range
1145	 * of machines, it is a function of the physical memory size,
1146	 * specifically,
1147	 *
1148	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1149	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1150	 *
1151	 * Every architecture must define an integral value for
1152	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
1153	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1154	 * ceiling on this preallocation, are optional.  Typically,
1155	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1156	 * a given architecture.
1157	 */
1158	mem_size = vm_cnt.v_page_count;
1159	if (mem_size <= 32768) /* delphij XXX 128MB */
1160		kmem_zmax = PAGE_SIZE;
1161
1162	if (vm_kmem_size_scale < 1)
1163		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1164
1165	/*
1166	 * Check if we should use defaults for the "vm_kmem_size"
1167	 * variable:
1168	 */
1169	if (vm_kmem_size == 0) {
1170		vm_kmem_size = mem_size / vm_kmem_size_scale;
1171		vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1172		    vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1173		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1174			vm_kmem_size = vm_kmem_size_min;
1175		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1176			vm_kmem_size = vm_kmem_size_max;
1177	}
1178	if (vm_kmem_size == 0)
1179		panic("Tune VM_KMEM_SIZE_* for the platform");
1180
1181	/*
1182	 * The amount of KVA space that is preallocated to the
1183	 * kmem arena can be set statically at compile-time or manually
1184	 * through the kernel environment.  However, it is still limited to
1185	 * twice the physical memory size, which has been sufficient to handle
1186	 * the most severe cases of external fragmentation in the kmem arena.
1187	 */
1188	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1189		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1190
1191	vm_kmem_size = round_page(vm_kmem_size);
1192
1193	/*
1194	 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
1195	 * shadowed.  Account for this when setting the UMA limit.
1196	 */
1197#if defined(KASAN)
1198	vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1199	    (KASAN_SHADOW_SCALE + 1);
1200#elif defined(KMSAN)
1201	vm_kmem_size /= 3;
1202#endif
1203
1204#ifdef DEBUG_MEMGUARD
1205	tmp = memguard_fudge(vm_kmem_size, kernel_map);
1206#else
1207	tmp = vm_kmem_size;
1208#endif
1209	uma_set_limit(tmp);
1210
1211#ifdef DEBUG_MEMGUARD
1212	/*
1213	 * Initialize MemGuard if support compiled in.  MemGuard is a
1214	 * replacement allocator used for detecting tamper-after-free
1215	 * scenarios as they occur.  It is only used for debugging.
1216	 */
1217	memguard_init(kernel_arena);
1218#endif
1219}
1220
1221/*
1222 * Initialize the kernel memory allocator
1223 */
1224/* ARGSUSED*/
1225static void
1226mallocinit(void *dummy)
1227{
1228	int i;
1229	uint8_t indx;
1230
1231	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1232
1233	kmeminit();
1234
1235	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1236		kmem_zmax = KMEM_ZMAX;
1237
1238	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1239		int size = kmemzones[indx].kz_size;
1240		const char *name = kmemzones[indx].kz_name;
1241		size_t align;
1242		int subzone;
1243
1244		align = UMA_ALIGN_PTR;
1245		if (powerof2(size) && size > sizeof(void *))
1246			align = MIN(size, PAGE_SIZE) - 1;
1247		for (subzone = 0; subzone < numzones; subzone++) {
1248			kmemzones[indx].kz_zone[subzone] =
1249			    uma_zcreate(name, size,
1250#if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1251			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1252#else
1253			    NULL, NULL, NULL, NULL,
1254#endif
1255			    align, UMA_ZONE_MALLOC);
1256		}
1257		for (;i <= size; i+= KMEM_ZBASE)
1258			kmemsize[i >> KMEM_ZSHIFT] = indx;
1259	}
1260}
1261SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1262
1263void
1264malloc_init(void *data)
1265{
1266	struct malloc_type_internal *mtip;
1267	struct malloc_type *mtp;
1268
1269	KASSERT(vm_cnt.v_page_count != 0,
1270	    ("malloc_init() called before vm_mem_init()"));
1271
1272	mtp = data;
1273	if (mtp->ks_version != M_VERSION)
1274		panic("malloc_init: type %s with unsupported version %lu",
1275		    mtp->ks_shortdesc, mtp->ks_version);
1276
1277	mtip = &mtp->ks_mti;
1278	mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1279	mtp_set_subzone(mtp);
1280
1281	mtx_lock(&malloc_mtx);
1282	mtp->ks_next = kmemstatistics;
1283	kmemstatistics = mtp;
1284	kmemcount++;
1285	mtx_unlock(&malloc_mtx);
1286}
1287
1288void
1289malloc_uninit(void *data)
1290{
1291	struct malloc_type_internal *mtip;
1292	struct malloc_type_stats *mtsp;
1293	struct malloc_type *mtp, *temp;
1294	long temp_allocs, temp_bytes;
1295	int i;
1296
1297	mtp = data;
1298	KASSERT(mtp->ks_version == M_VERSION,
1299	    ("malloc_uninit: bad malloc type version"));
1300
1301	mtx_lock(&malloc_mtx);
1302	mtip = &mtp->ks_mti;
1303	if (mtp != kmemstatistics) {
1304		for (temp = kmemstatistics; temp != NULL;
1305		    temp = temp->ks_next) {
1306			if (temp->ks_next == mtp) {
1307				temp->ks_next = mtp->ks_next;
1308				break;
1309			}
1310		}
1311		KASSERT(temp,
1312		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1313	} else
1314		kmemstatistics = mtp->ks_next;
1315	kmemcount--;
1316	mtx_unlock(&malloc_mtx);
1317
1318	/*
1319	 * Look for memory leaks.
1320	 */
1321	temp_allocs = temp_bytes = 0;
1322	for (i = 0; i <= mp_maxid; i++) {
1323		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1324		temp_allocs += mtsp->mts_numallocs;
1325		temp_allocs -= mtsp->mts_numfrees;
1326		temp_bytes += mtsp->mts_memalloced;
1327		temp_bytes -= mtsp->mts_memfreed;
1328	}
1329	if (temp_allocs > 0 || temp_bytes > 0) {
1330		printf("Warning: memory type %s leaked memory on destroy "
1331		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1332		    temp_allocs, temp_bytes);
1333	}
1334
1335	uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1336}
1337
1338struct malloc_type *
1339malloc_desc2type(const char *desc)
1340{
1341	struct malloc_type *mtp;
1342
1343	mtx_assert(&malloc_mtx, MA_OWNED);
1344	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1345		if (strcmp(mtp->ks_shortdesc, desc) == 0)
1346			return (mtp);
1347	}
1348	return (NULL);
1349}
1350
1351static int
1352sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1353{
1354	struct malloc_type_stream_header mtsh;
1355	struct malloc_type_internal *mtip;
1356	struct malloc_type_stats *mtsp, zeromts;
1357	struct malloc_type_header mth;
1358	struct malloc_type *mtp;
1359	int error, i;
1360	struct sbuf sbuf;
1361
1362	error = sysctl_wire_old_buffer(req, 0);
1363	if (error != 0)
1364		return (error);
1365	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1366	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1367	mtx_lock(&malloc_mtx);
1368
1369	bzero(&zeromts, sizeof(zeromts));
1370
1371	/*
1372	 * Insert stream header.
1373	 */
1374	bzero(&mtsh, sizeof(mtsh));
1375	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1376	mtsh.mtsh_maxcpus = MAXCPU;
1377	mtsh.mtsh_count = kmemcount;
1378	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1379
1380	/*
1381	 * Insert alternating sequence of type headers and type statistics.
1382	 */
1383	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1384		mtip = &mtp->ks_mti;
1385
1386		/*
1387		 * Insert type header.
1388		 */
1389		bzero(&mth, sizeof(mth));
1390		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1391		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1392
1393		/*
1394		 * Insert type statistics for each CPU.
1395		 */
1396		for (i = 0; i <= mp_maxid; i++) {
1397			mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1398			(void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1399		}
1400		/*
1401		 * Fill in the missing CPUs.
1402		 */
1403		for (; i < MAXCPU; i++) {
1404			(void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1405		}
1406	}
1407	mtx_unlock(&malloc_mtx);
1408	error = sbuf_finish(&sbuf);
1409	sbuf_delete(&sbuf);
1410	return (error);
1411}
1412
1413SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1414    CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1415    sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1416    "Return malloc types");
1417
1418SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1419    "Count of kernel malloc types");
1420
1421void
1422malloc_type_list(malloc_type_list_func_t *func, void *arg)
1423{
1424	struct malloc_type *mtp, **bufmtp;
1425	int count, i;
1426	size_t buflen;
1427
1428	mtx_lock(&malloc_mtx);
1429restart:
1430	mtx_assert(&malloc_mtx, MA_OWNED);
1431	count = kmemcount;
1432	mtx_unlock(&malloc_mtx);
1433
1434	buflen = sizeof(struct malloc_type *) * count;
1435	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1436
1437	mtx_lock(&malloc_mtx);
1438
1439	if (count < kmemcount) {
1440		free(bufmtp, M_TEMP);
1441		goto restart;
1442	}
1443
1444	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1445		bufmtp[i] = mtp;
1446
1447	mtx_unlock(&malloc_mtx);
1448
1449	for (i = 0; i < count; i++)
1450		(func)(bufmtp[i], arg);
1451
1452	free(bufmtp, M_TEMP);
1453}
1454
1455#ifdef DDB
1456static int64_t
1457get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1458    uint64_t *inuse)
1459{
1460	const struct malloc_type_stats *mtsp;
1461	uint64_t frees, alloced, freed;
1462	int i;
1463
1464	*allocs = 0;
1465	frees = 0;
1466	alloced = 0;
1467	freed = 0;
1468	for (i = 0; i <= mp_maxid; i++) {
1469		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1470
1471		*allocs += mtsp->mts_numallocs;
1472		frees += mtsp->mts_numfrees;
1473		alloced += mtsp->mts_memalloced;
1474		freed += mtsp->mts_memfreed;
1475	}
1476	*inuse = *allocs - frees;
1477	return (alloced - freed);
1478}
1479
1480DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
1481{
1482	const char *fmt_hdr, *fmt_entry;
1483	struct malloc_type *mtp;
1484	uint64_t allocs, inuse;
1485	int64_t size;
1486	/* variables for sorting */
1487	struct malloc_type *last_mtype, *cur_mtype;
1488	int64_t cur_size, last_size;
1489	int ties;
1490
1491	if (modif[0] == 'i') {
1492		fmt_hdr = "%s,%s,%s,%s\n";
1493		fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1494	} else {
1495		fmt_hdr = "%18s %12s  %12s %12s\n";
1496		fmt_entry = "%18s %12ju %12jdK %12ju\n";
1497	}
1498
1499	db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1500
1501	/* Select sort, largest size first. */
1502	last_mtype = NULL;
1503	last_size = INT64_MAX;
1504	for (;;) {
1505		cur_mtype = NULL;
1506		cur_size = -1;
1507		ties = 0;
1508
1509		for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1510			/*
1511			 * In the case of size ties, print out mtypes
1512			 * in the order they are encountered.  That is,
1513			 * when we encounter the most recently output
1514			 * mtype, we have already printed all preceding
1515			 * ties, and we must print all following ties.
1516			 */
1517			if (mtp == last_mtype) {
1518				ties = 1;
1519				continue;
1520			}
1521			size = get_malloc_stats(&mtp->ks_mti, &allocs,
1522			    &inuse);
1523			if (size > cur_size && size < last_size + ties) {
1524				cur_size = size;
1525				cur_mtype = mtp;
1526			}
1527		}
1528		if (cur_mtype == NULL)
1529			break;
1530
1531		size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1532		db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1533		    howmany(size, 1024), allocs);
1534
1535		if (db_pager_quit)
1536			break;
1537
1538		last_mtype = cur_mtype;
1539		last_size = cur_size;
1540	}
1541}
1542
1543#if MALLOC_DEBUG_MAXZONES > 1
1544DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1545{
1546	struct malloc_type_internal *mtip;
1547	struct malloc_type *mtp;
1548	u_int subzone;
1549
1550	if (!have_addr) {
1551		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1552		return;
1553	}
1554	mtp = (void *)addr;
1555	if (mtp->ks_version != M_VERSION) {
1556		db_printf("Version %lx does not match expected %x\n",
1557		    mtp->ks_version, M_VERSION);
1558		return;
1559	}
1560
1561	mtip = &mtp->ks_mti;
1562	subzone = mtip->mti_zone;
1563
1564	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1565		mtip = &mtp->ks_mti;
1566		if (mtip->mti_zone != subzone)
1567			continue;
1568		db_printf("%s\n", mtp->ks_shortdesc);
1569		if (db_pager_quit)
1570			break;
1571	}
1572}
1573#endif /* MALLOC_DEBUG_MAXZONES > 1 */
1574#endif /* DDB */
1575