kern_malloc.c revision 266204
1/*-
2 * Copyright (c) 1987, 1991, 1993
3 *	The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
32 */
33
34/*
35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
36 * based on memory types.  Back end is implemented using the UMA(9) zone
37 * allocator.  A set of fixed-size buckets are used for smaller allocations,
38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type.  Statistics are maintained per-CPU for performance
41 * reasons.  See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/10/sys/kern/kern_malloc.c 266204 2014-05-16 01:30:30Z ian $");
47
48#include "opt_ddb.h"
49#include "opt_kdtrace.h"
50#include "opt_vm.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/kdb.h>
55#include <sys/kernel.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/mutex.h>
59#include <sys/vmmeter.h>
60#include <sys/proc.h>
61#include <sys/sbuf.h>
62#include <sys/sysctl.h>
63#include <sys/time.h>
64#include <sys/vmem.h>
65
66#include <vm/vm.h>
67#include <vm/pmap.h>
68#include <vm/vm_pageout.h>
69#include <vm/vm_param.h>
70#include <vm/vm_kern.h>
71#include <vm/vm_extern.h>
72#include <vm/vm_map.h>
73#include <vm/vm_page.h>
74#include <vm/uma.h>
75#include <vm/uma_int.h>
76#include <vm/uma_dbg.h>
77
78#ifdef DEBUG_MEMGUARD
79#include <vm/memguard.h>
80#endif
81#ifdef DEBUG_REDZONE
82#include <vm/redzone.h>
83#endif
84
85#if defined(INVARIANTS) && defined(__i386__)
86#include <machine/cpu.h>
87#endif
88
89#include <ddb/ddb.h>
90
91#ifdef KDTRACE_HOOKS
92#include <sys/dtrace_bsd.h>
93
94dtrace_malloc_probe_func_t	dtrace_malloc_probe;
95#endif
96
97/*
98 * When realloc() is called, if the new size is sufficiently smaller than
99 * the old size, realloc() will allocate a new, smaller block to avoid
100 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
101 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
102 */
103#ifndef REALLOC_FRACTION
104#define	REALLOC_FRACTION	1	/* new block if <= half the size */
105#endif
106
107/*
108 * Centrally define some common malloc types.
109 */
110MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
111MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
112MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
113
114MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
115MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
116
117static struct malloc_type *kmemstatistics;
118static int kmemcount;
119
120#define KMEM_ZSHIFT	4
121#define KMEM_ZBASE	16
122#define KMEM_ZMASK	(KMEM_ZBASE - 1)
123
124#define KMEM_ZMAX	PAGE_SIZE
125#define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
126static uint8_t kmemsize[KMEM_ZSIZE + 1];
127
128#ifndef MALLOC_DEBUG_MAXZONES
129#define	MALLOC_DEBUG_MAXZONES	1
130#endif
131static int numzones = MALLOC_DEBUG_MAXZONES;
132
133/*
134 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
135 * of various sizes.
136 *
137 * XXX: The comment here used to read "These won't be powers of two for
138 * long."  It's possible that a significant amount of wasted memory could be
139 * recovered by tuning the sizes of these buckets.
140 */
141struct {
142	int kz_size;
143	char *kz_name;
144	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
145} kmemzones[] = {
146	{16, "16", },
147	{32, "32", },
148	{64, "64", },
149	{128, "128", },
150	{256, "256", },
151	{512, "512", },
152	{1024, "1024", },
153	{2048, "2048", },
154	{4096, "4096", },
155#if PAGE_SIZE > 4096
156	{8192, "8192", },
157#if PAGE_SIZE > 8192
158	{16384, "16384", },
159#if PAGE_SIZE > 16384
160	{32768, "32768", },
161#if PAGE_SIZE > 32768
162	{65536, "65536", },
163#if PAGE_SIZE > 65536
164#error	"Unsupported PAGE_SIZE"
165#endif	/* 65536 */
166#endif	/* 32768 */
167#endif	/* 16384 */
168#endif	/* 8192 */
169#endif	/* 4096 */
170	{0, NULL},
171};
172
173/*
174 * Zone to allocate malloc type descriptions from.  For ABI reasons, memory
175 * types are described by a data structure passed by the declaring code, but
176 * the malloc(9) implementation has its own data structure describing the
177 * type and statistics.  This permits the malloc(9)-internal data structures
178 * to be modified without breaking binary-compiled kernel modules that
179 * declare malloc types.
180 */
181static uma_zone_t mt_zone;
182
183u_long vm_kmem_size;
184SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
185    "Size of kernel memory");
186
187static u_long vm_kmem_size_min;
188SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
189    "Minimum size of kernel memory");
190
191static u_long vm_kmem_size_max;
192SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
193    "Maximum size of kernel memory");
194
195static u_int vm_kmem_size_scale;
196SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
197    "Scale factor for kernel memory size");
198
199static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
200SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
201    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
202    sysctl_kmem_map_size, "LU", "Current kmem allocation size");
203
204static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
205SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
206    CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
207    sysctl_kmem_map_free, "LU", "Free space in kmem");
208
209/*
210 * The malloc_mtx protects the kmemstatistics linked list.
211 */
212struct mtx malloc_mtx;
213
214#ifdef MALLOC_PROFILE
215uint64_t krequests[KMEM_ZSIZE + 1];
216
217static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
218#endif
219
220static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
221
222/*
223 * time_uptime of the last malloc(9) failure (induced or real).
224 */
225static time_t t_malloc_fail;
226
227#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
228static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
229    "Kernel malloc debugging options");
230#endif
231
232/*
233 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
234 * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
235 */
236#ifdef MALLOC_MAKE_FAILURES
237static int malloc_failure_rate;
238static int malloc_nowait_count;
239static int malloc_failure_count;
240SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
241    &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
242TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
243SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
244    &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
245#endif
246
247static int
248sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
249{
250	u_long size;
251
252	size = vmem_size(kmem_arena, VMEM_ALLOC);
253	return (sysctl_handle_long(oidp, &size, 0, req));
254}
255
256static int
257sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
258{
259	u_long size;
260
261	size = vmem_size(kmem_arena, VMEM_FREE);
262	return (sysctl_handle_long(oidp, &size, 0, req));
263}
264
265/*
266 * malloc(9) uma zone separation -- sub-page buffer overruns in one
267 * malloc type will affect only a subset of other malloc types.
268 */
269#if MALLOC_DEBUG_MAXZONES > 1
270static void
271tunable_set_numzones(void)
272{
273
274	TUNABLE_INT_FETCH("debug.malloc.numzones",
275	    &numzones);
276
277	/* Sanity check the number of malloc uma zones. */
278	if (numzones <= 0)
279		numzones = 1;
280	if (numzones > MALLOC_DEBUG_MAXZONES)
281		numzones = MALLOC_DEBUG_MAXZONES;
282}
283SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
284SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN,
285    &numzones, 0, "Number of malloc uma subzones");
286
287/*
288 * Any number that changes regularly is an okay choice for the
289 * offset.  Build numbers are pretty good of you have them.
290 */
291static u_int zone_offset = __FreeBSD_version;
292TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
293SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
294    &zone_offset, 0, "Separate malloc types by examining the "
295    "Nth character in the malloc type short description.");
296
297static u_int
298mtp_get_subzone(const char *desc)
299{
300	size_t len;
301	u_int val;
302
303	if (desc == NULL || (len = strlen(desc)) == 0)
304		return (0);
305	val = desc[zone_offset % len];
306	return (val % numzones);
307}
308#elif MALLOC_DEBUG_MAXZONES == 0
309#error "MALLOC_DEBUG_MAXZONES must be positive."
310#else
311static inline u_int
312mtp_get_subzone(const char *desc)
313{
314
315	return (0);
316}
317#endif /* MALLOC_DEBUG_MAXZONES > 1 */
318
319int
320malloc_last_fail(void)
321{
322
323	return (time_uptime - t_malloc_fail);
324}
325
326/*
327 * An allocation has succeeded -- update malloc type statistics for the
328 * amount of bucket size.  Occurs within a critical section so that the
329 * thread isn't preempted and doesn't migrate while updating per-PCU
330 * statistics.
331 */
332static void
333malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
334    int zindx)
335{
336	struct malloc_type_internal *mtip;
337	struct malloc_type_stats *mtsp;
338
339	critical_enter();
340	mtip = mtp->ks_handle;
341	mtsp = &mtip->mti_stats[curcpu];
342	if (size > 0) {
343		mtsp->mts_memalloced += size;
344		mtsp->mts_numallocs++;
345	}
346	if (zindx != -1)
347		mtsp->mts_size |= 1 << zindx;
348
349#ifdef KDTRACE_HOOKS
350	if (dtrace_malloc_probe != NULL) {
351		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
352		if (probe_id != 0)
353			(dtrace_malloc_probe)(probe_id,
354			    (uintptr_t) mtp, (uintptr_t) mtip,
355			    (uintptr_t) mtsp, size, zindx);
356	}
357#endif
358
359	critical_exit();
360}
361
362void
363malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
364{
365
366	if (size > 0)
367		malloc_type_zone_allocated(mtp, size, -1);
368}
369
370/*
371 * A free operation has occurred -- update malloc type statistics for the
372 * amount of the bucket size.  Occurs within a critical section so that the
373 * thread isn't preempted and doesn't migrate while updating per-CPU
374 * statistics.
375 */
376void
377malloc_type_freed(struct malloc_type *mtp, unsigned long size)
378{
379	struct malloc_type_internal *mtip;
380	struct malloc_type_stats *mtsp;
381
382	critical_enter();
383	mtip = mtp->ks_handle;
384	mtsp = &mtip->mti_stats[curcpu];
385	mtsp->mts_memfreed += size;
386	mtsp->mts_numfrees++;
387
388#ifdef KDTRACE_HOOKS
389	if (dtrace_malloc_probe != NULL) {
390		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
391		if (probe_id != 0)
392			(dtrace_malloc_probe)(probe_id,
393			    (uintptr_t) mtp, (uintptr_t) mtip,
394			    (uintptr_t) mtsp, size, 0);
395	}
396#endif
397
398	critical_exit();
399}
400
401/*
402 *	contigmalloc:
403 *
404 *	Allocate a block of physically contiguous memory.
405 *
406 *	If M_NOWAIT is set, this routine will not block and return NULL if
407 *	the allocation fails.
408 */
409void *
410contigmalloc(unsigned long size, struct malloc_type *type, int flags,
411    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
412    vm_paddr_t boundary)
413{
414	void *ret;
415
416	ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
417	    alignment, boundary, VM_MEMATTR_DEFAULT);
418	if (ret != NULL)
419		malloc_type_allocated(type, round_page(size));
420	return (ret);
421}
422
423/*
424 *	contigfree:
425 *
426 *	Free a block of memory allocated by contigmalloc.
427 *
428 *	This routine may not block.
429 */
430void
431contigfree(void *addr, unsigned long size, struct malloc_type *type)
432{
433
434	kmem_free(kernel_arena, (vm_offset_t)addr, size);
435	malloc_type_freed(type, round_page(size));
436}
437
438/*
439 *	malloc:
440 *
441 *	Allocate a block of memory.
442 *
443 *	If M_NOWAIT is set, this routine will not block and return NULL if
444 *	the allocation fails.
445 */
446void *
447malloc(unsigned long size, struct malloc_type *mtp, int flags)
448{
449	int indx;
450	struct malloc_type_internal *mtip;
451	caddr_t va;
452	uma_zone_t zone;
453#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
454	unsigned long osize = size;
455#endif
456
457#ifdef INVARIANTS
458	KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
459	/*
460	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
461	 */
462	indx = flags & (M_WAITOK | M_NOWAIT);
463	if (indx != M_NOWAIT && indx != M_WAITOK) {
464		static	struct timeval lasterr;
465		static	int curerr, once;
466		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
467			printf("Bad malloc flags: %x\n", indx);
468			kdb_backtrace();
469			flags |= M_WAITOK;
470			once++;
471		}
472	}
473#endif
474#ifdef MALLOC_MAKE_FAILURES
475	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
476		atomic_add_int(&malloc_nowait_count, 1);
477		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
478			atomic_add_int(&malloc_failure_count, 1);
479			t_malloc_fail = time_uptime;
480			return (NULL);
481		}
482	}
483#endif
484	if (flags & M_WAITOK)
485		KASSERT(curthread->td_intr_nesting_level == 0,
486		   ("malloc(M_WAITOK) in interrupt context"));
487
488#ifdef DEBUG_MEMGUARD
489	if (memguard_cmp_mtp(mtp, size)) {
490		va = memguard_alloc(size, flags);
491		if (va != NULL)
492			return (va);
493		/* This is unfortunate but should not be fatal. */
494	}
495#endif
496
497#ifdef DEBUG_REDZONE
498	size = redzone_size_ntor(size);
499#endif
500
501	if (size <= KMEM_ZMAX) {
502		mtip = mtp->ks_handle;
503		if (size & KMEM_ZMASK)
504			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
505		indx = kmemsize[size >> KMEM_ZSHIFT];
506		KASSERT(mtip->mti_zone < numzones,
507		    ("mti_zone %u out of range %d",
508		    mtip->mti_zone, numzones));
509		zone = kmemzones[indx].kz_zone[mtip->mti_zone];
510#ifdef MALLOC_PROFILE
511		krequests[size >> KMEM_ZSHIFT]++;
512#endif
513		va = uma_zalloc(zone, flags);
514		if (va != NULL)
515			size = zone->uz_size;
516		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
517	} else {
518		size = roundup(size, PAGE_SIZE);
519		zone = NULL;
520		va = uma_large_malloc(size, flags);
521		malloc_type_allocated(mtp, va == NULL ? 0 : size);
522	}
523	if (flags & M_WAITOK)
524		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
525	else if (va == NULL)
526		t_malloc_fail = time_uptime;
527#ifdef DIAGNOSTIC
528	if (va != NULL && !(flags & M_ZERO)) {
529		memset(va, 0x70, osize);
530	}
531#endif
532#ifdef DEBUG_REDZONE
533	if (va != NULL)
534		va = redzone_setup(va, osize);
535#endif
536	return ((void *) va);
537}
538
539/*
540 *	free:
541 *
542 *	Free a block of memory allocated by malloc.
543 *
544 *	This routine may not block.
545 */
546void
547free(void *addr, struct malloc_type *mtp)
548{
549	uma_slab_t slab;
550	u_long size;
551
552	KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
553
554	/* free(NULL, ...) does nothing */
555	if (addr == NULL)
556		return;
557
558#ifdef DEBUG_MEMGUARD
559	if (is_memguard_addr(addr)) {
560		memguard_free(addr);
561		return;
562	}
563#endif
564
565#ifdef DEBUG_REDZONE
566	redzone_check(addr);
567	addr = redzone_addr_ntor(addr);
568#endif
569
570	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
571
572	if (slab == NULL)
573		panic("free: address %p(%p) has not been allocated.\n",
574		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
575
576	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
577#ifdef INVARIANTS
578		struct malloc_type **mtpp = addr;
579#endif
580		size = slab->us_keg->uk_size;
581#ifdef INVARIANTS
582		/*
583		 * Cache a pointer to the malloc_type that most recently freed
584		 * this memory here.  This way we know who is most likely to
585		 * have stepped on it later.
586		 *
587		 * This code assumes that size is a multiple of 8 bytes for
588		 * 64 bit machines
589		 */
590		mtpp = (struct malloc_type **)
591		    ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
592		mtpp += (size - sizeof(struct malloc_type *)) /
593		    sizeof(struct malloc_type *);
594		*mtpp = mtp;
595#endif
596		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
597	} else {
598		size = slab->us_size;
599		uma_large_free(slab);
600	}
601	malloc_type_freed(mtp, size);
602}
603
604/*
605 *	realloc: change the size of a memory block
606 */
607void *
608realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
609{
610	uma_slab_t slab;
611	unsigned long alloc;
612	void *newaddr;
613
614	KASSERT(mtp->ks_magic == M_MAGIC,
615	    ("realloc: bad malloc type magic"));
616
617	/* realloc(NULL, ...) is equivalent to malloc(...) */
618	if (addr == NULL)
619		return (malloc(size, mtp, flags));
620
621	/*
622	 * XXX: Should report free of old memory and alloc of new memory to
623	 * per-CPU stats.
624	 */
625
626#ifdef DEBUG_MEMGUARD
627	if (is_memguard_addr(addr))
628		return (memguard_realloc(addr, size, mtp, flags));
629#endif
630
631#ifdef DEBUG_REDZONE
632	slab = NULL;
633	alloc = redzone_get_size(addr);
634#else
635	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
636
637	/* Sanity check */
638	KASSERT(slab != NULL,
639	    ("realloc: address %p out of range", (void *)addr));
640
641	/* Get the size of the original block */
642	if (!(slab->us_flags & UMA_SLAB_MALLOC))
643		alloc = slab->us_keg->uk_size;
644	else
645		alloc = slab->us_size;
646
647	/* Reuse the original block if appropriate */
648	if (size <= alloc
649	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
650		return (addr);
651#endif /* !DEBUG_REDZONE */
652
653	/* Allocate a new, bigger (or smaller) block */
654	if ((newaddr = malloc(size, mtp, flags)) == NULL)
655		return (NULL);
656
657	/* Copy over original contents */
658	bcopy(addr, newaddr, min(size, alloc));
659	free(addr, mtp);
660	return (newaddr);
661}
662
663/*
664 *	reallocf: same as realloc() but free memory on failure.
665 */
666void *
667reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
668{
669	void *mem;
670
671	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
672		free(addr, mtp);
673	return (mem);
674}
675
676/*
677 * Wake the page daemon when we exhaust KVA.  It will call the lowmem handler
678 * and uma_reclaim() callbacks in a context that is safe.
679 */
680static void
681kmem_reclaim(vmem_t *vm, int flags)
682{
683
684	pagedaemon_wakeup();
685}
686
687CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
688
689/*
690 * Initialize the kernel memory (kmem) arena.
691 */
692void
693kmeminit(void)
694{
695	u_long mem_size, tmp;
696
697	/*
698	 * Calculate the amount of kernel virtual address (KVA) space that is
699	 * preallocated to the kmem arena.  In order to support a wide range
700	 * of machines, it is a function of the physical memory size,
701	 * specifically,
702	 *
703	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
704	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
705	 *
706	 * Every architecture must define an integral value for
707	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
708	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
709	 * ceiling on this preallocation, are optional.  Typically,
710	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
711	 * a given architecture.
712	 */
713	mem_size = cnt.v_page_count;
714
715	vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
716	TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
717	if (vm_kmem_size_scale < 1)
718		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
719
720	vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
721
722#if defined(VM_KMEM_SIZE_MIN)
723	vm_kmem_size_min = VM_KMEM_SIZE_MIN;
724#endif
725	TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
726	if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
727		vm_kmem_size = vm_kmem_size_min;
728
729#if defined(VM_KMEM_SIZE_MAX)
730	vm_kmem_size_max = VM_KMEM_SIZE_MAX;
731#endif
732	TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
733	if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
734		vm_kmem_size = vm_kmem_size_max;
735
736	/*
737	 * Alternatively, the amount of KVA space that is preallocated to the
738	 * kmem arena can be set statically at compile-time or manually
739	 * through the kernel environment.  However, it is still limited to
740	 * twice the physical memory size, which has been sufficient to handle
741	 * the most severe cases of external fragmentation in the kmem arena.
742	 */
743#if defined(VM_KMEM_SIZE)
744	vm_kmem_size = VM_KMEM_SIZE;
745#endif
746	TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
747	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
748		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
749
750	vm_kmem_size = round_page(vm_kmem_size);
751#ifdef DEBUG_MEMGUARD
752	tmp = memguard_fudge(vm_kmem_size, kernel_map);
753#else
754	tmp = vm_kmem_size;
755#endif
756	vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
757	    0, 0);
758	vmem_set_reclaim(kmem_arena, kmem_reclaim);
759
760#ifdef DEBUG_MEMGUARD
761	/*
762	 * Initialize MemGuard if support compiled in.  MemGuard is a
763	 * replacement allocator used for detecting tamper-after-free
764	 * scenarios as they occur.  It is only used for debugging.
765	 */
766	memguard_init(kmem_arena);
767#endif
768}
769
770/*
771 * Initialize the kernel memory allocator
772 */
773/* ARGSUSED*/
774static void
775mallocinit(void *dummy)
776{
777	int i;
778	uint8_t indx;
779
780	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
781
782	kmeminit();
783
784	uma_startup2();
785
786	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
787#ifdef INVARIANTS
788	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
789#else
790	    NULL, NULL, NULL, NULL,
791#endif
792	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
793	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
794		int size = kmemzones[indx].kz_size;
795		char *name = kmemzones[indx].kz_name;
796		int subzone;
797
798		for (subzone = 0; subzone < numzones; subzone++) {
799			kmemzones[indx].kz_zone[subzone] =
800			    uma_zcreate(name, size,
801#ifdef INVARIANTS
802			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
803#else
804			    NULL, NULL, NULL, NULL,
805#endif
806			    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
807		}
808		for (;i <= size; i+= KMEM_ZBASE)
809			kmemsize[i >> KMEM_ZSHIFT] = indx;
810
811	}
812}
813SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, mallocinit, NULL);
814
815void
816malloc_init(void *data)
817{
818	struct malloc_type_internal *mtip;
819	struct malloc_type *mtp;
820
821	KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
822
823	mtp = data;
824	if (mtp->ks_magic != M_MAGIC)
825		panic("malloc_init: bad malloc type magic");
826
827	mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
828	mtp->ks_handle = mtip;
829	mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
830
831	mtx_lock(&malloc_mtx);
832	mtp->ks_next = kmemstatistics;
833	kmemstatistics = mtp;
834	kmemcount++;
835	mtx_unlock(&malloc_mtx);
836}
837
838void
839malloc_uninit(void *data)
840{
841	struct malloc_type_internal *mtip;
842	struct malloc_type_stats *mtsp;
843	struct malloc_type *mtp, *temp;
844	uma_slab_t slab;
845	long temp_allocs, temp_bytes;
846	int i;
847
848	mtp = data;
849	KASSERT(mtp->ks_magic == M_MAGIC,
850	    ("malloc_uninit: bad malloc type magic"));
851	KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
852
853	mtx_lock(&malloc_mtx);
854	mtip = mtp->ks_handle;
855	mtp->ks_handle = NULL;
856	if (mtp != kmemstatistics) {
857		for (temp = kmemstatistics; temp != NULL;
858		    temp = temp->ks_next) {
859			if (temp->ks_next == mtp) {
860				temp->ks_next = mtp->ks_next;
861				break;
862			}
863		}
864		KASSERT(temp,
865		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
866	} else
867		kmemstatistics = mtp->ks_next;
868	kmemcount--;
869	mtx_unlock(&malloc_mtx);
870
871	/*
872	 * Look for memory leaks.
873	 */
874	temp_allocs = temp_bytes = 0;
875	for (i = 0; i < MAXCPU; i++) {
876		mtsp = &mtip->mti_stats[i];
877		temp_allocs += mtsp->mts_numallocs;
878		temp_allocs -= mtsp->mts_numfrees;
879		temp_bytes += mtsp->mts_memalloced;
880		temp_bytes -= mtsp->mts_memfreed;
881	}
882	if (temp_allocs > 0 || temp_bytes > 0) {
883		printf("Warning: memory type %s leaked memory on destroy "
884		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
885		    temp_allocs, temp_bytes);
886	}
887
888	slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
889	uma_zfree_arg(mt_zone, mtip, slab);
890}
891
892struct malloc_type *
893malloc_desc2type(const char *desc)
894{
895	struct malloc_type *mtp;
896
897	mtx_assert(&malloc_mtx, MA_OWNED);
898	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
899		if (strcmp(mtp->ks_shortdesc, desc) == 0)
900			return (mtp);
901	}
902	return (NULL);
903}
904
905static int
906sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
907{
908	struct malloc_type_stream_header mtsh;
909	struct malloc_type_internal *mtip;
910	struct malloc_type_header mth;
911	struct malloc_type *mtp;
912	int error, i;
913	struct sbuf sbuf;
914
915	error = sysctl_wire_old_buffer(req, 0);
916	if (error != 0)
917		return (error);
918	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
919	mtx_lock(&malloc_mtx);
920
921	/*
922	 * Insert stream header.
923	 */
924	bzero(&mtsh, sizeof(mtsh));
925	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
926	mtsh.mtsh_maxcpus = MAXCPU;
927	mtsh.mtsh_count = kmemcount;
928	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
929
930	/*
931	 * Insert alternating sequence of type headers and type statistics.
932	 */
933	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
934		mtip = (struct malloc_type_internal *)mtp->ks_handle;
935
936		/*
937		 * Insert type header.
938		 */
939		bzero(&mth, sizeof(mth));
940		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
941		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
942
943		/*
944		 * Insert type statistics for each CPU.
945		 */
946		for (i = 0; i < MAXCPU; i++) {
947			(void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
948			    sizeof(mtip->mti_stats[i]));
949		}
950	}
951	mtx_unlock(&malloc_mtx);
952	error = sbuf_finish(&sbuf);
953	sbuf_delete(&sbuf);
954	return (error);
955}
956
957SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
958    0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
959    "Return malloc types");
960
961SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
962    "Count of kernel malloc types");
963
964void
965malloc_type_list(malloc_type_list_func_t *func, void *arg)
966{
967	struct malloc_type *mtp, **bufmtp;
968	int count, i;
969	size_t buflen;
970
971	mtx_lock(&malloc_mtx);
972restart:
973	mtx_assert(&malloc_mtx, MA_OWNED);
974	count = kmemcount;
975	mtx_unlock(&malloc_mtx);
976
977	buflen = sizeof(struct malloc_type *) * count;
978	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
979
980	mtx_lock(&malloc_mtx);
981
982	if (count < kmemcount) {
983		free(bufmtp, M_TEMP);
984		goto restart;
985	}
986
987	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
988		bufmtp[i] = mtp;
989
990	mtx_unlock(&malloc_mtx);
991
992	for (i = 0; i < count; i++)
993		(func)(bufmtp[i], arg);
994
995	free(bufmtp, M_TEMP);
996}
997
998#ifdef DDB
999DB_SHOW_COMMAND(malloc, db_show_malloc)
1000{
1001	struct malloc_type_internal *mtip;
1002	struct malloc_type *mtp;
1003	uint64_t allocs, frees;
1004	uint64_t alloced, freed;
1005	int i;
1006
1007	db_printf("%18s %12s  %12s %12s\n", "Type", "InUse", "MemUse",
1008	    "Requests");
1009	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1010		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1011		allocs = 0;
1012		frees = 0;
1013		alloced = 0;
1014		freed = 0;
1015		for (i = 0; i < MAXCPU; i++) {
1016			allocs += mtip->mti_stats[i].mts_numallocs;
1017			frees += mtip->mti_stats[i].mts_numfrees;
1018			alloced += mtip->mti_stats[i].mts_memalloced;
1019			freed += mtip->mti_stats[i].mts_memfreed;
1020		}
1021		db_printf("%18s %12ju %12juK %12ju\n",
1022		    mtp->ks_shortdesc, allocs - frees,
1023		    (alloced - freed + 1023) / 1024, allocs);
1024		if (db_pager_quit)
1025			break;
1026	}
1027}
1028
1029#if MALLOC_DEBUG_MAXZONES > 1
1030DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1031{
1032	struct malloc_type_internal *mtip;
1033	struct malloc_type *mtp;
1034	u_int subzone;
1035
1036	if (!have_addr) {
1037		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1038		return;
1039	}
1040	mtp = (void *)addr;
1041	if (mtp->ks_magic != M_MAGIC) {
1042		db_printf("Magic %lx does not match expected %x\n",
1043		    mtp->ks_magic, M_MAGIC);
1044		return;
1045	}
1046
1047	mtip = mtp->ks_handle;
1048	subzone = mtip->mti_zone;
1049
1050	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1051		mtip = mtp->ks_handle;
1052		if (mtip->mti_zone != subzone)
1053			continue;
1054		db_printf("%s\n", mtp->ks_shortdesc);
1055		if (db_pager_quit)
1056			break;
1057	}
1058}
1059#endif /* MALLOC_DEBUG_MAXZONES > 1 */
1060#endif /* DDB */
1061
1062#ifdef MALLOC_PROFILE
1063
1064static int
1065sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1066{
1067	struct sbuf sbuf;
1068	uint64_t count;
1069	uint64_t waste;
1070	uint64_t mem;
1071	int error;
1072	int rsize;
1073	int size;
1074	int i;
1075
1076	waste = 0;
1077	mem = 0;
1078
1079	error = sysctl_wire_old_buffer(req, 0);
1080	if (error != 0)
1081		return (error);
1082	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1083	sbuf_printf(&sbuf,
1084	    "\n  Size                    Requests  Real Size\n");
1085	for (i = 0; i < KMEM_ZSIZE; i++) {
1086		size = i << KMEM_ZSHIFT;
1087		rsize = kmemzones[kmemsize[i]].kz_size;
1088		count = (long long unsigned)krequests[i];
1089
1090		sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1091		    (unsigned long long)count, rsize);
1092
1093		if ((rsize * count) > (size * count))
1094			waste += (rsize * count) - (size * count);
1095		mem += (rsize * count);
1096	}
1097	sbuf_printf(&sbuf,
1098	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1099	    (unsigned long long)mem, (unsigned long long)waste);
1100	error = sbuf_finish(&sbuf);
1101	sbuf_delete(&sbuf);
1102	return (error);
1103}
1104
1105SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1106    NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1107#endif /* MALLOC_PROFILE */
1108