1/*-
2 * Copyright (c) 1987, 1991, 1993
3 *	The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33 */
34
35/*
36 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
37 * based on memory types.  Back end is implemented using the UMA(9) zone
38 * allocator.  A set of fixed-size buckets are used for smaller allocations,
39 * and a special UMA allocation interface is used for larger allocations.
40 * Callers declare memory types, and statistics are maintained independently
41 * for each memory type.  Statistics are maintained per-CPU for performance
42 * reasons.  See malloc(9) and comments in malloc.h for a detailed
43 * description.
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD: stable/10/sys/kern/kern_malloc.c 328276 2018-01-23 04:37:31Z kp $");
48
49#include "opt_ddb.h"
50#include "opt_kdtrace.h"
51#include "opt_vm.h"
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/kdb.h>
56#include <sys/kernel.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mutex.h>
60#include <sys/vmmeter.h>
61#include <sys/proc.h>
62#include <sys/sbuf.h>
63#include <sys/sysctl.h>
64#include <sys/time.h>
65#include <sys/vmem.h>
66
67#include <vm/vm.h>
68#include <vm/pmap.h>
69#include <vm/vm_pageout.h>
70#include <vm/vm_param.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_extern.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/uma.h>
76#include <vm/uma_int.h>
77#include <vm/uma_dbg.h>
78
79#ifdef DEBUG_MEMGUARD
80#include <vm/memguard.h>
81#endif
82#ifdef DEBUG_REDZONE
83#include <vm/redzone.h>
84#endif
85
86#if defined(INVARIANTS) && defined(__i386__)
87#include <machine/cpu.h>
88#endif
89
90#include <ddb/ddb.h>
91
92#ifdef KDTRACE_HOOKS
93#include <sys/dtrace_bsd.h>
94
95dtrace_malloc_probe_func_t	dtrace_malloc_probe;
96#endif
97
98/*
99 * When realloc() is called, if the new size is sufficiently smaller than
100 * the old size, realloc() will allocate a new, smaller block to avoid
101 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
102 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
103 */
104#ifndef REALLOC_FRACTION
105#define	REALLOC_FRACTION	1	/* new block if <= half the size */
106#endif
107
108/*
109 * Centrally define some common malloc types.
110 */
111MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
112MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
113MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
114
115MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
116MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
117
118static struct malloc_type *kmemstatistics;
119static int kmemcount;
120
121#define KMEM_ZSHIFT	4
122#define KMEM_ZBASE	16
123#define KMEM_ZMASK	(KMEM_ZBASE - 1)
124
125#define KMEM_ZMAX	65536
126#define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
127static uint8_t kmemsize[KMEM_ZSIZE + 1];
128
129#ifndef MALLOC_DEBUG_MAXZONES
130#define	MALLOC_DEBUG_MAXZONES	1
131#endif
132static int numzones = MALLOC_DEBUG_MAXZONES;
133
134/*
135 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
136 * of various sizes.
137 *
138 * XXX: The comment here used to read "These won't be powers of two for
139 * long."  It's possible that a significant amount of wasted memory could be
140 * recovered by tuning the sizes of these buckets.
141 */
142struct {
143	int kz_size;
144	char *kz_name;
145	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
146} kmemzones[] = {
147	{16, "16", },
148	{32, "32", },
149	{64, "64", },
150	{128, "128", },
151	{256, "256", },
152	{512, "512", },
153	{1024, "1024", },
154	{2048, "2048", },
155	{4096, "4096", },
156	{8192, "8192", },
157	{16384, "16384", },
158	{32768, "32768", },
159	{65536, "65536", },
160	{0, NULL},
161};
162
163/*
164 * Zone to allocate malloc type descriptions from.  For ABI reasons, memory
165 * types are described by a data structure passed by the declaring code, but
166 * the malloc(9) implementation has its own data structure describing the
167 * type and statistics.  This permits the malloc(9)-internal data structures
168 * to be modified without breaking binary-compiled kernel modules that
169 * declare malloc types.
170 */
171static uma_zone_t mt_zone;
172
173u_long vm_kmem_size;
174SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
175    "Size of kernel memory");
176
177static u_long kmem_zmax = KMEM_ZMAX;
178SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
179    "Maximum allocation size that malloc(9) would use UMA as backend");
180
181static u_long vm_kmem_size_min;
182SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
183    "Minimum size of kernel memory");
184
185static u_long vm_kmem_size_max;
186SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
187    "Maximum size of kernel memory");
188
189static u_int vm_kmem_size_scale;
190SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
191    "Scale factor for kernel memory size");
192
193static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
194SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
195    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
196    sysctl_kmem_map_size, "LU", "Current kmem allocation size");
197
198static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
199SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
200    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
201    sysctl_kmem_map_free, "LU", "Free space in kmem");
202
203/*
204 * The malloc_mtx protects the kmemstatistics linked list.
205 */
206struct mtx malloc_mtx;
207
208#ifdef MALLOC_PROFILE
209uint64_t krequests[KMEM_ZSIZE + 1];
210
211static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
212#endif
213
214static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
215
216/*
217 * time_uptime of the last malloc(9) failure (induced or real).
218 */
219static time_t t_malloc_fail;
220
221#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
222static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
223    "Kernel malloc debugging options");
224#endif
225
226/*
227 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
228 * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
229 */
230#ifdef MALLOC_MAKE_FAILURES
231static int malloc_failure_rate;
232static int malloc_nowait_count;
233static int malloc_failure_count;
234SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
235    &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
236TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
237SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
238    &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
239#endif
240
241static int
242sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
243{
244	u_long size;
245
246	size = vmem_size(kmem_arena, VMEM_ALLOC);
247	return (sysctl_handle_long(oidp, &size, 0, req));
248}
249
250static int
251sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
252{
253	u_long size;
254
255	size = vmem_size(kmem_arena, VMEM_FREE);
256	return (sysctl_handle_long(oidp, &size, 0, req));
257}
258
259/*
260 * malloc(9) uma zone separation -- sub-page buffer overruns in one
261 * malloc type will affect only a subset of other malloc types.
262 */
263#if MALLOC_DEBUG_MAXZONES > 1
264static void
265tunable_set_numzones(void)
266{
267
268	TUNABLE_INT_FETCH("debug.malloc.numzones",
269	    &numzones);
270
271	/* Sanity check the number of malloc uma zones. */
272	if (numzones <= 0)
273		numzones = 1;
274	if (numzones > MALLOC_DEBUG_MAXZONES)
275		numzones = MALLOC_DEBUG_MAXZONES;
276}
277SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
278SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN,
279    &numzones, 0, "Number of malloc uma subzones");
280
281/*
282 * Any number that changes regularly is an okay choice for the
283 * offset.  Build numbers are pretty good of you have them.
284 */
285static u_int zone_offset = __FreeBSD_version;
286TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
287SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
288    &zone_offset, 0, "Separate malloc types by examining the "
289    "Nth character in the malloc type short description.");
290
291static u_int
292mtp_get_subzone(const char *desc)
293{
294	size_t len;
295	u_int val;
296
297	if (desc == NULL || (len = strlen(desc)) == 0)
298		return (0);
299	val = desc[zone_offset % len];
300	return (val % numzones);
301}
302#elif MALLOC_DEBUG_MAXZONES == 0
303#error "MALLOC_DEBUG_MAXZONES must be positive."
304#else
305static inline u_int
306mtp_get_subzone(const char *desc)
307{
308
309	return (0);
310}
311#endif /* MALLOC_DEBUG_MAXZONES > 1 */
312
313int
314malloc_last_fail(void)
315{
316
317	return (time_uptime - t_malloc_fail);
318}
319
320/*
321 * An allocation has succeeded -- update malloc type statistics for the
322 * amount of bucket size.  Occurs within a critical section so that the
323 * thread isn't preempted and doesn't migrate while updating per-PCU
324 * statistics.
325 */
326static void
327malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
328    int zindx)
329{
330	struct malloc_type_internal *mtip;
331	struct malloc_type_stats *mtsp;
332
333	critical_enter();
334	mtip = mtp->ks_handle;
335	mtsp = &mtip->mti_stats[curcpu];
336	if (size > 0) {
337		mtsp->mts_memalloced += size;
338		mtsp->mts_numallocs++;
339	}
340	if (zindx != -1)
341		mtsp->mts_size |= 1 << zindx;
342
343#ifdef KDTRACE_HOOKS
344	if (dtrace_malloc_probe != NULL) {
345		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
346		if (probe_id != 0)
347			(dtrace_malloc_probe)(probe_id,
348			    (uintptr_t) mtp, (uintptr_t) mtip,
349			    (uintptr_t) mtsp, size, zindx);
350	}
351#endif
352
353	critical_exit();
354}
355
356void
357malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
358{
359
360	if (size > 0)
361		malloc_type_zone_allocated(mtp, size, -1);
362}
363
364/*
365 * A free operation has occurred -- update malloc type statistics for the
366 * amount of the bucket size.  Occurs within a critical section so that the
367 * thread isn't preempted and doesn't migrate while updating per-CPU
368 * statistics.
369 */
370void
371malloc_type_freed(struct malloc_type *mtp, unsigned long size)
372{
373	struct malloc_type_internal *mtip;
374	struct malloc_type_stats *mtsp;
375
376	critical_enter();
377	mtip = mtp->ks_handle;
378	mtsp = &mtip->mti_stats[curcpu];
379	mtsp->mts_memfreed += size;
380	mtsp->mts_numfrees++;
381
382#ifdef KDTRACE_HOOKS
383	if (dtrace_malloc_probe != NULL) {
384		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
385		if (probe_id != 0)
386			(dtrace_malloc_probe)(probe_id,
387			    (uintptr_t) mtp, (uintptr_t) mtip,
388			    (uintptr_t) mtsp, size, 0);
389	}
390#endif
391
392	critical_exit();
393}
394
395/*
396 *	contigmalloc:
397 *
398 *	Allocate a block of physically contiguous memory.
399 *
400 *	If M_NOWAIT is set, this routine will not block and return NULL if
401 *	the allocation fails.
402 */
403void *
404contigmalloc(unsigned long size, struct malloc_type *type, int flags,
405    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
406    vm_paddr_t boundary)
407{
408	void *ret;
409
410	ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
411	    alignment, boundary, VM_MEMATTR_DEFAULT);
412	if (ret != NULL)
413		malloc_type_allocated(type, round_page(size));
414	return (ret);
415}
416
417/*
418 *	contigfree:
419 *
420 *	Free a block of memory allocated by contigmalloc.
421 *
422 *	This routine may not block.
423 */
424void
425contigfree(void *addr, unsigned long size, struct malloc_type *type)
426{
427
428	kmem_free(kernel_arena, (vm_offset_t)addr, size);
429	malloc_type_freed(type, round_page(size));
430}
431
432/*
433 *	malloc:
434 *
435 *	Allocate a block of memory.
436 *
437 *	If M_NOWAIT is set, this routine will not block and return NULL if
438 *	the allocation fails.
439 */
440void *
441malloc(unsigned long size, struct malloc_type *mtp, int flags)
442{
443	int indx;
444	struct malloc_type_internal *mtip;
445	caddr_t va;
446	uma_zone_t zone;
447#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
448	unsigned long osize = size;
449#endif
450
451#ifdef INVARIANTS
452	KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
453	/*
454	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
455	 */
456	indx = flags & (M_WAITOK | M_NOWAIT);
457	if (indx != M_NOWAIT && indx != M_WAITOK) {
458		static	struct timeval lasterr;
459		static	int curerr, once;
460		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
461			printf("Bad malloc flags: %x\n", indx);
462			kdb_backtrace();
463			flags |= M_WAITOK;
464			once++;
465		}
466	}
467#endif
468#ifdef MALLOC_MAKE_FAILURES
469	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
470		atomic_add_int(&malloc_nowait_count, 1);
471		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
472			atomic_add_int(&malloc_failure_count, 1);
473			t_malloc_fail = time_uptime;
474			return (NULL);
475		}
476	}
477#endif
478	if (flags & M_WAITOK)
479		KASSERT(curthread->td_intr_nesting_level == 0,
480		   ("malloc(M_WAITOK) in interrupt context"));
481
482#ifdef DEBUG_MEMGUARD
483	if (memguard_cmp_mtp(mtp, size)) {
484		va = memguard_alloc(size, flags);
485		if (va != NULL)
486			return (va);
487		/* This is unfortunate but should not be fatal. */
488	}
489#endif
490
491#ifdef DEBUG_REDZONE
492	size = redzone_size_ntor(size);
493#endif
494
495	if (size <= kmem_zmax) {
496		mtip = mtp->ks_handle;
497		if (size & KMEM_ZMASK)
498			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
499		indx = kmemsize[size >> KMEM_ZSHIFT];
500		KASSERT(mtip->mti_zone < numzones,
501		    ("mti_zone %u out of range %d",
502		    mtip->mti_zone, numzones));
503		zone = kmemzones[indx].kz_zone[mtip->mti_zone];
504#ifdef MALLOC_PROFILE
505		krequests[size >> KMEM_ZSHIFT]++;
506#endif
507		va = uma_zalloc(zone, flags);
508		if (va != NULL)
509			size = zone->uz_size;
510		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
511	} else {
512		size = roundup(size, PAGE_SIZE);
513		zone = NULL;
514		va = uma_large_malloc(size, flags);
515		malloc_type_allocated(mtp, va == NULL ? 0 : size);
516	}
517	if (flags & M_WAITOK)
518		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
519	else if (va == NULL)
520		t_malloc_fail = time_uptime;
521#ifdef DIAGNOSTIC
522	if (va != NULL && !(flags & M_ZERO)) {
523		memset(va, 0x70, osize);
524	}
525#endif
526#ifdef DEBUG_REDZONE
527	if (va != NULL)
528		va = redzone_setup(va, osize);
529#endif
530	return ((void *) va);
531}
532
533void *
534mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
535{
536
537	if (WOULD_OVERFLOW(nmemb, size))
538		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
539
540	return (malloc(size * nmemb, type, flags));
541}
542
543/*
544 *	free:
545 *
546 *	Free a block of memory allocated by malloc.
547 *
548 *	This routine may not block.
549 */
550void
551free(void *addr, struct malloc_type *mtp)
552{
553	uma_slab_t slab;
554	u_long size;
555
556	KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
557
558	/* free(NULL, ...) does nothing */
559	if (addr == NULL)
560		return;
561
562#ifdef DEBUG_MEMGUARD
563	if (is_memguard_addr(addr)) {
564		memguard_free(addr);
565		return;
566	}
567#endif
568
569#ifdef DEBUG_REDZONE
570	redzone_check(addr);
571	addr = redzone_addr_ntor(addr);
572#endif
573
574	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
575
576	if (slab == NULL)
577		panic("free: address %p(%p) has not been allocated.\n",
578		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
579
580	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
581#ifdef INVARIANTS
582		struct malloc_type **mtpp = addr;
583#endif
584		size = slab->us_keg->uk_size;
585#ifdef INVARIANTS
586		/*
587		 * Cache a pointer to the malloc_type that most recently freed
588		 * this memory here.  This way we know who is most likely to
589		 * have stepped on it later.
590		 *
591		 * This code assumes that size is a multiple of 8 bytes for
592		 * 64 bit machines
593		 */
594		mtpp = (struct malloc_type **)
595		    ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
596		mtpp += (size - sizeof(struct malloc_type *)) /
597		    sizeof(struct malloc_type *);
598		*mtpp = mtp;
599#endif
600		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
601	} else {
602		size = slab->us_size;
603		uma_large_free(slab);
604	}
605	malloc_type_freed(mtp, size);
606}
607
608/*
609 *	realloc: change the size of a memory block
610 */
611void *
612realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
613{
614	uma_slab_t slab;
615	unsigned long alloc;
616	void *newaddr;
617
618	KASSERT(mtp->ks_magic == M_MAGIC,
619	    ("realloc: bad malloc type magic"));
620
621	/* realloc(NULL, ...) is equivalent to malloc(...) */
622	if (addr == NULL)
623		return (malloc(size, mtp, flags));
624
625	/*
626	 * XXX: Should report free of old memory and alloc of new memory to
627	 * per-CPU stats.
628	 */
629
630#ifdef DEBUG_MEMGUARD
631	if (is_memguard_addr(addr))
632		return (memguard_realloc(addr, size, mtp, flags));
633#endif
634
635#ifdef DEBUG_REDZONE
636	slab = NULL;
637	alloc = redzone_get_size(addr);
638#else
639	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
640
641	/* Sanity check */
642	KASSERT(slab != NULL,
643	    ("realloc: address %p out of range", (void *)addr));
644
645	/* Get the size of the original block */
646	if (!(slab->us_flags & UMA_SLAB_MALLOC))
647		alloc = slab->us_keg->uk_size;
648	else
649		alloc = slab->us_size;
650
651	/* Reuse the original block if appropriate */
652	if (size <= alloc
653	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
654		return (addr);
655#endif /* !DEBUG_REDZONE */
656
657	/* Allocate a new, bigger (or smaller) block */
658	if ((newaddr = malloc(size, mtp, flags)) == NULL)
659		return (NULL);
660
661	/* Copy over original contents */
662	bcopy(addr, newaddr, min(size, alloc));
663	free(addr, mtp);
664	return (newaddr);
665}
666
667/*
668 *	reallocf: same as realloc() but free memory on failure.
669 */
670void *
671reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
672{
673	void *mem;
674
675	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
676		free(addr, mtp);
677	return (mem);
678}
679
680/*
681 * Wake the uma reclamation pagedaemon thread when we exhaust KVA.  It
682 * will call the lowmem handler and uma_reclaim() callbacks in a
683 * context that is safe.
684 */
685static void
686kmem_reclaim(vmem_t *vm, int flags)
687{
688
689	uma_reclaim_wakeup();
690	pagedaemon_wakeup();
691}
692
693CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
694
695/*
696 * Initialize the kernel memory (kmem) arena.
697 */
698void
699kmeminit(void)
700{
701	u_long mem_size, tmp;
702
703	/*
704	 * Calculate the amount of kernel virtual address (KVA) space that is
705	 * preallocated to the kmem arena.  In order to support a wide range
706	 * of machines, it is a function of the physical memory size,
707	 * specifically,
708	 *
709	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
710	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
711	 *
712	 * Every architecture must define an integral value for
713	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
714	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
715	 * ceiling on this preallocation, are optional.  Typically,
716	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
717	 * a given architecture.
718	 */
719	mem_size = cnt.v_page_count;
720
721	vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
722	TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
723	if (vm_kmem_size_scale < 1)
724		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
725
726	vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
727
728#if defined(VM_KMEM_SIZE_MIN)
729	vm_kmem_size_min = VM_KMEM_SIZE_MIN;
730#endif
731	TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
732	if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
733		vm_kmem_size = vm_kmem_size_min;
734
735#if defined(VM_KMEM_SIZE_MAX)
736	vm_kmem_size_max = VM_KMEM_SIZE_MAX;
737#endif
738	TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
739	if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
740		vm_kmem_size = vm_kmem_size_max;
741
742	/*
743	 * Alternatively, the amount of KVA space that is preallocated to the
744	 * kmem arena can be set statically at compile-time or manually
745	 * through the kernel environment.  However, it is still limited to
746	 * twice the physical memory size, which has been sufficient to handle
747	 * the most severe cases of external fragmentation in the kmem arena.
748	 */
749#if defined(VM_KMEM_SIZE)
750	vm_kmem_size = VM_KMEM_SIZE;
751#endif
752	TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
753	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
754		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
755
756	vm_kmem_size = round_page(vm_kmem_size);
757#ifdef DEBUG_MEMGUARD
758	tmp = memguard_fudge(vm_kmem_size, kernel_map);
759#else
760	tmp = vm_kmem_size;
761#endif
762	vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
763	    0, 0);
764	vmem_set_reclaim(kmem_arena, kmem_reclaim);
765
766#ifdef DEBUG_MEMGUARD
767	/*
768	 * Initialize MemGuard if support compiled in.  MemGuard is a
769	 * replacement allocator used for detecting tamper-after-free
770	 * scenarios as they occur.  It is only used for debugging.
771	 */
772	memguard_init(kmem_arena);
773#endif
774}
775
776/*
777 * Initialize the kernel memory allocator
778 */
779/* ARGSUSED*/
780static void
781mallocinit(void *dummy)
782{
783	int i;
784	uint8_t indx;
785
786	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
787
788	kmeminit();
789
790	uma_startup2();
791
792	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
793		kmem_zmax = KMEM_ZMAX;
794
795	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
796#ifdef INVARIANTS
797	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
798#else
799	    NULL, NULL, NULL, NULL,
800#endif
801	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
802	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
803		int size = kmemzones[indx].kz_size;
804		char *name = kmemzones[indx].kz_name;
805		int subzone;
806
807		for (subzone = 0; subzone < numzones; subzone++) {
808			kmemzones[indx].kz_zone[subzone] =
809			    uma_zcreate(name, size,
810#ifdef INVARIANTS
811			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
812#else
813			    NULL, NULL, NULL, NULL,
814#endif
815			    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
816		}
817		for (;i <= size; i+= KMEM_ZBASE)
818			kmemsize[i >> KMEM_ZSHIFT] = indx;
819
820	}
821}
822SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, mallocinit, NULL);
823
824void
825malloc_init(void *data)
826{
827	struct malloc_type_internal *mtip;
828	struct malloc_type *mtp;
829
830	KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
831
832	mtp = data;
833	if (mtp->ks_magic != M_MAGIC)
834		panic("malloc_init: bad malloc type magic");
835
836	mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
837	mtp->ks_handle = mtip;
838	mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
839
840	mtx_lock(&malloc_mtx);
841	mtp->ks_next = kmemstatistics;
842	kmemstatistics = mtp;
843	kmemcount++;
844	mtx_unlock(&malloc_mtx);
845}
846
847void
848malloc_uninit(void *data)
849{
850	struct malloc_type_internal *mtip;
851	struct malloc_type_stats *mtsp;
852	struct malloc_type *mtp, *temp;
853	uma_slab_t slab;
854	long temp_allocs, temp_bytes;
855	int i;
856
857	mtp = data;
858	KASSERT(mtp->ks_magic == M_MAGIC,
859	    ("malloc_uninit: bad malloc type magic"));
860	KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
861
862	mtx_lock(&malloc_mtx);
863	mtip = mtp->ks_handle;
864	mtp->ks_handle = NULL;
865	if (mtp != kmemstatistics) {
866		for (temp = kmemstatistics; temp != NULL;
867		    temp = temp->ks_next) {
868			if (temp->ks_next == mtp) {
869				temp->ks_next = mtp->ks_next;
870				break;
871			}
872		}
873		KASSERT(temp,
874		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
875	} else
876		kmemstatistics = mtp->ks_next;
877	kmemcount--;
878	mtx_unlock(&malloc_mtx);
879
880	/*
881	 * Look for memory leaks.
882	 */
883	temp_allocs = temp_bytes = 0;
884	for (i = 0; i < MAXCPU; i++) {
885		mtsp = &mtip->mti_stats[i];
886		temp_allocs += mtsp->mts_numallocs;
887		temp_allocs -= mtsp->mts_numfrees;
888		temp_bytes += mtsp->mts_memalloced;
889		temp_bytes -= mtsp->mts_memfreed;
890	}
891	if (temp_allocs > 0 || temp_bytes > 0) {
892		printf("Warning: memory type %s leaked memory on destroy "
893		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
894		    temp_allocs, temp_bytes);
895	}
896
897	slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
898	uma_zfree_arg(mt_zone, mtip, slab);
899}
900
901struct malloc_type *
902malloc_desc2type(const char *desc)
903{
904	struct malloc_type *mtp;
905
906	mtx_assert(&malloc_mtx, MA_OWNED);
907	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
908		if (strcmp(mtp->ks_shortdesc, desc) == 0)
909			return (mtp);
910	}
911	return (NULL);
912}
913
914static int
915sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
916{
917	struct malloc_type_stream_header mtsh;
918	struct malloc_type_internal *mtip;
919	struct malloc_type_header mth;
920	struct malloc_type *mtp;
921	int error, i;
922	struct sbuf sbuf;
923
924	error = sysctl_wire_old_buffer(req, 0);
925	if (error != 0)
926		return (error);
927	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
928	mtx_lock(&malloc_mtx);
929
930	/*
931	 * Insert stream header.
932	 */
933	bzero(&mtsh, sizeof(mtsh));
934	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
935	mtsh.mtsh_maxcpus = MAXCPU;
936	mtsh.mtsh_count = kmemcount;
937	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
938
939	/*
940	 * Insert alternating sequence of type headers and type statistics.
941	 */
942	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
943		mtip = (struct malloc_type_internal *)mtp->ks_handle;
944
945		/*
946		 * Insert type header.
947		 */
948		bzero(&mth, sizeof(mth));
949		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
950		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
951
952		/*
953		 * Insert type statistics for each CPU.
954		 */
955		for (i = 0; i < MAXCPU; i++) {
956			(void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
957			    sizeof(mtip->mti_stats[i]));
958		}
959	}
960	mtx_unlock(&malloc_mtx);
961	error = sbuf_finish(&sbuf);
962	sbuf_delete(&sbuf);
963	return (error);
964}
965
966SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
967    0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
968    "Return malloc types");
969
970SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
971    "Count of kernel malloc types");
972
973void
974malloc_type_list(malloc_type_list_func_t *func, void *arg)
975{
976	struct malloc_type *mtp, **bufmtp;
977	int count, i;
978	size_t buflen;
979
980	mtx_lock(&malloc_mtx);
981restart:
982	mtx_assert(&malloc_mtx, MA_OWNED);
983	count = kmemcount;
984	mtx_unlock(&malloc_mtx);
985
986	buflen = sizeof(struct malloc_type *) * count;
987	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
988
989	mtx_lock(&malloc_mtx);
990
991	if (count < kmemcount) {
992		free(bufmtp, M_TEMP);
993		goto restart;
994	}
995
996	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
997		bufmtp[i] = mtp;
998
999	mtx_unlock(&malloc_mtx);
1000
1001	for (i = 0; i < count; i++)
1002		(func)(bufmtp[i], arg);
1003
1004	free(bufmtp, M_TEMP);
1005}
1006
1007#ifdef DDB
1008DB_SHOW_COMMAND(malloc, db_show_malloc)
1009{
1010	struct malloc_type_internal *mtip;
1011	struct malloc_type *mtp;
1012	uint64_t allocs, frees;
1013	uint64_t alloced, freed;
1014	int i;
1015
1016	db_printf("%18s %12s  %12s %12s\n", "Type", "InUse", "MemUse",
1017	    "Requests");
1018	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1019		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1020		allocs = 0;
1021		frees = 0;
1022		alloced = 0;
1023		freed = 0;
1024		for (i = 0; i < MAXCPU; i++) {
1025			allocs += mtip->mti_stats[i].mts_numallocs;
1026			frees += mtip->mti_stats[i].mts_numfrees;
1027			alloced += mtip->mti_stats[i].mts_memalloced;
1028			freed += mtip->mti_stats[i].mts_memfreed;
1029		}
1030		db_printf("%18s %12ju %12juK %12ju\n",
1031		    mtp->ks_shortdesc, allocs - frees,
1032		    (alloced - freed + 1023) / 1024, allocs);
1033		if (db_pager_quit)
1034			break;
1035	}
1036}
1037
1038#if MALLOC_DEBUG_MAXZONES > 1
1039DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1040{
1041	struct malloc_type_internal *mtip;
1042	struct malloc_type *mtp;
1043	u_int subzone;
1044
1045	if (!have_addr) {
1046		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1047		return;
1048	}
1049	mtp = (void *)addr;
1050	if (mtp->ks_magic != M_MAGIC) {
1051		db_printf("Magic %lx does not match expected %x\n",
1052		    mtp->ks_magic, M_MAGIC);
1053		return;
1054	}
1055
1056	mtip = mtp->ks_handle;
1057	subzone = mtip->mti_zone;
1058
1059	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1060		mtip = mtp->ks_handle;
1061		if (mtip->mti_zone != subzone)
1062			continue;
1063		db_printf("%s\n", mtp->ks_shortdesc);
1064		if (db_pager_quit)
1065			break;
1066	}
1067}
1068#endif /* MALLOC_DEBUG_MAXZONES > 1 */
1069#endif /* DDB */
1070
1071#ifdef MALLOC_PROFILE
1072
1073static int
1074sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1075{
1076	struct sbuf sbuf;
1077	uint64_t count;
1078	uint64_t waste;
1079	uint64_t mem;
1080	int error;
1081	int rsize;
1082	int size;
1083	int i;
1084
1085	waste = 0;
1086	mem = 0;
1087
1088	error = sysctl_wire_old_buffer(req, 0);
1089	if (error != 0)
1090		return (error);
1091	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1092	sbuf_printf(&sbuf,
1093	    "\n  Size                    Requests  Real Size\n");
1094	for (i = 0; i < KMEM_ZSIZE; i++) {
1095		size = i << KMEM_ZSHIFT;
1096		rsize = kmemzones[kmemsize[i]].kz_size;
1097		count = (long long unsigned)krequests[i];
1098
1099		sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1100		    (unsigned long long)count, rsize);
1101
1102		if ((rsize * count) > (size * count))
1103			waste += (rsize * count) - (size * count);
1104		mem += (rsize * count);
1105	}
1106	sbuf_printf(&sbuf,
1107	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1108	    (unsigned long long)mem, (unsigned long long)waste);
1109	error = sbuf_finish(&sbuf);
1110	sbuf_delete(&sbuf);
1111	return (error);
1112}
1113
1114SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1115    NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1116#endif /* MALLOC_PROFILE */
1117