jemalloc.c revision 234569
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9    THREAD_ALLOCATED_INITIALIZER)
10
11const char	*__malloc_options_1_0;
12__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
13
14/* Runtime configuration options. */
15const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
16#ifdef JEMALLOC_DEBUG
17bool	opt_abort = true;
18#  ifdef JEMALLOC_FILL
19bool	opt_junk = true;
20#  else
21bool	opt_junk = false;
22#  endif
23#else
24bool	opt_abort = false;
25bool	opt_junk = false;
26#endif
27size_t	opt_quarantine = ZU(0);
28bool	opt_redzone = false;
29bool	opt_utrace = false;
30bool	opt_valgrind = false;
31bool	opt_xmalloc = false;
32bool	opt_zero = false;
33size_t	opt_narenas = 0;
34
35unsigned	ncpus;
36
37malloc_mutex_t		arenas_lock;
38arena_t			**arenas;
39unsigned		narenas;
40
41/* Set to true once the allocator has been initialized. */
42static bool		malloc_initialized = false;
43
44#ifdef JEMALLOC_THREADED_INIT
45/* Used to let the initializing thread recursively allocate. */
46#  define NO_INITIALIZER	((unsigned long)0)
47#  define INITIALIZER		pthread_self()
48#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
49static pthread_t		malloc_initializer = NO_INITIALIZER;
50#else
51#  define NO_INITIALIZER	false
52#  define INITIALIZER		true
53#  define IS_INITIALIZER	malloc_initializer
54static bool			malloc_initializer = NO_INITIALIZER;
55#endif
56
57/* Used to avoid initialization races. */
58static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
59
60typedef struct {
61	void	*p;	/* Input pointer (as in realloc(p, s)). */
62	size_t	s;	/* Request size. */
63	void	*r;	/* Result pointer. */
64} malloc_utrace_t;
65
66#ifdef JEMALLOC_UTRACE
67#  define UTRACE(a, b, c) do {						\
68	if (opt_utrace) {						\
69		malloc_utrace_t ut;					\
70		ut.p = (a);						\
71		ut.s = (b);						\
72		ut.r = (c);						\
73		utrace(&ut, sizeof(ut));				\
74	}								\
75} while (0)
76#else
77#  define UTRACE(a, b, c)
78#endif
79
80/******************************************************************************/
81/* Function prototypes for non-inline static functions. */
82
83static void	stats_print_atexit(void);
84static unsigned	malloc_ncpus(void);
85static bool	malloc_conf_next(char const **opts_p, char const **k_p,
86    size_t *klen_p, char const **v_p, size_t *vlen_p);
87static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
88    const char *v, size_t vlen);
89static void	malloc_conf_init(void);
90static bool	malloc_init_hard(void);
91static int	imemalign(void **memptr, size_t alignment, size_t size,
92    size_t min_alignment);
93
94/******************************************************************************/
95/*
96 * Begin miscellaneous support functions.
97 */
98
99/* Create a new arena and insert it into the arenas array at index ind. */
100arena_t *
101arenas_extend(unsigned ind)
102{
103	arena_t *ret;
104
105	ret = (arena_t *)base_alloc(sizeof(arena_t));
106	if (ret != NULL && arena_new(ret, ind) == false) {
107		arenas[ind] = ret;
108		return (ret);
109	}
110	/* Only reached if there is an OOM error. */
111
112	/*
113	 * OOM here is quite inconvenient to propagate, since dealing with it
114	 * would require a check for failure in the fast path.  Instead, punt
115	 * by using arenas[0].  In practice, this is an extremely unlikely
116	 * failure.
117	 */
118	malloc_write("<jemalloc>: Error initializing arena\n");
119	if (opt_abort)
120		abort();
121
122	return (arenas[0]);
123}
124
125/* Slow path, called only by choose_arena(). */
126arena_t *
127choose_arena_hard(void)
128{
129	arena_t *ret;
130
131	if (narenas > 1) {
132		unsigned i, choose, first_null;
133
134		choose = 0;
135		first_null = narenas;
136		malloc_mutex_lock(&arenas_lock);
137		assert(arenas[0] != NULL);
138		for (i = 1; i < narenas; i++) {
139			if (arenas[i] != NULL) {
140				/*
141				 * Choose the first arena that has the lowest
142				 * number of threads assigned to it.
143				 */
144				if (arenas[i]->nthreads <
145				    arenas[choose]->nthreads)
146					choose = i;
147			} else if (first_null == narenas) {
148				/*
149				 * Record the index of the first uninitialized
150				 * arena, in case all extant arenas are in use.
151				 *
152				 * NB: It is possible for there to be
153				 * discontinuities in terms of initialized
154				 * versus uninitialized arenas, due to the
155				 * "thread.arena" mallctl.
156				 */
157				first_null = i;
158			}
159		}
160
161		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
162			/*
163			 * Use an unloaded arena, or the least loaded arena if
164			 * all arenas are already initialized.
165			 */
166			ret = arenas[choose];
167		} else {
168			/* Initialize a new arena. */
169			ret = arenas_extend(first_null);
170		}
171		ret->nthreads++;
172		malloc_mutex_unlock(&arenas_lock);
173	} else {
174		ret = arenas[0];
175		malloc_mutex_lock(&arenas_lock);
176		ret->nthreads++;
177		malloc_mutex_unlock(&arenas_lock);
178	}
179
180	arenas_tsd_set(&ret);
181
182	return (ret);
183}
184
185static void
186stats_print_atexit(void)
187{
188
189	if (config_tcache && config_stats) {
190		unsigned i;
191
192		/*
193		 * Merge stats from extant threads.  This is racy, since
194		 * individual threads do not lock when recording tcache stats
195		 * events.  As a consequence, the final stats may be slightly
196		 * out of date by the time they are reported, if other threads
197		 * continue to allocate.
198		 */
199		for (i = 0; i < narenas; i++) {
200			arena_t *arena = arenas[i];
201			if (arena != NULL) {
202				tcache_t *tcache;
203
204				/*
205				 * tcache_stats_merge() locks bins, so if any
206				 * code is introduced that acquires both arena
207				 * and bin locks in the opposite order,
208				 * deadlocks may result.
209				 */
210				malloc_mutex_lock(&arena->lock);
211				ql_foreach(tcache, &arena->tcache_ql, link) {
212					tcache_stats_merge(tcache, arena);
213				}
214				malloc_mutex_unlock(&arena->lock);
215			}
216		}
217	}
218	je_malloc_stats_print(NULL, NULL, NULL);
219}
220
221/*
222 * End miscellaneous support functions.
223 */
224/******************************************************************************/
225/*
226 * Begin initialization functions.
227 */
228
229static unsigned
230malloc_ncpus(void)
231{
232	unsigned ret;
233	long result;
234
235	result = sysconf(_SC_NPROCESSORS_ONLN);
236	if (result == -1) {
237		/* Error. */
238		ret = 1;
239	}
240	ret = (unsigned)result;
241
242	return (ret);
243}
244
245void
246arenas_cleanup(void *arg)
247{
248	arena_t *arena = *(arena_t **)arg;
249
250	malloc_mutex_lock(&arenas_lock);
251	arena->nthreads--;
252	malloc_mutex_unlock(&arenas_lock);
253}
254
255static inline bool
256malloc_init(void)
257{
258
259	if (malloc_initialized == false)
260		return (malloc_init_hard());
261
262	return (false);
263}
264
265static bool
266malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
267    char const **v_p, size_t *vlen_p)
268{
269	bool accept;
270	const char *opts = *opts_p;
271
272	*k_p = opts;
273
274	for (accept = false; accept == false;) {
275		switch (*opts) {
276		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
277		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
278		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
279		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
280		case 'Y': case 'Z':
281		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
282		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
283		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
284		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
285		case 'y': case 'z':
286		case '0': case '1': case '2': case '3': case '4': case '5':
287		case '6': case '7': case '8': case '9':
288		case '_':
289			opts++;
290			break;
291		case ':':
292			opts++;
293			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
294			*v_p = opts;
295			accept = true;
296			break;
297		case '\0':
298			if (opts != *opts_p) {
299				malloc_write("<jemalloc>: Conf string ends "
300				    "with key\n");
301			}
302			return (true);
303		default:
304			malloc_write("<jemalloc>: Malformed conf string\n");
305			return (true);
306		}
307	}
308
309	for (accept = false; accept == false;) {
310		switch (*opts) {
311		case ',':
312			opts++;
313			/*
314			 * Look ahead one character here, because the next time
315			 * this function is called, it will assume that end of
316			 * input has been cleanly reached if no input remains,
317			 * but we have optimistically already consumed the
318			 * comma if one exists.
319			 */
320			if (*opts == '\0') {
321				malloc_write("<jemalloc>: Conf string ends "
322				    "with comma\n");
323			}
324			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
325			accept = true;
326			break;
327		case '\0':
328			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
329			accept = true;
330			break;
331		default:
332			opts++;
333			break;
334		}
335	}
336
337	*opts_p = opts;
338	return (false);
339}
340
341static void
342malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
343    size_t vlen)
344{
345
346	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
347	    (int)vlen, v);
348}
349
350static void
351malloc_conf_init(void)
352{
353	unsigned i;
354	char buf[PATH_MAX + 1];
355	const char *opts, *k, *v;
356	size_t klen, vlen;
357
358	for (i = 0; i < 3; i++) {
359		/* Get runtime configuration. */
360		switch (i) {
361		case 0:
362			if (je_malloc_conf != NULL) {
363				/*
364				 * Use options that were compiled into the
365				 * program.
366				 */
367				opts = je_malloc_conf;
368			} else {
369				/* No configuration specified. */
370				buf[0] = '\0';
371				opts = buf;
372			}
373			break;
374		case 1: {
375			int linklen;
376			const char *linkname =
377#ifdef JEMALLOC_PREFIX
378			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
379#else
380			    "/etc/malloc.conf"
381#endif
382			    ;
383
384			if ((linklen = readlink(linkname, buf,
385			    sizeof(buf) - 1)) != -1) {
386				/*
387				 * Use the contents of the "/etc/malloc.conf"
388				 * symbolic link's name.
389				 */
390				buf[linklen] = '\0';
391				opts = buf;
392			} else {
393				/* No configuration specified. */
394				buf[0] = '\0';
395				opts = buf;
396			}
397			break;
398		} case 2: {
399			const char *envname =
400#ifdef JEMALLOC_PREFIX
401			    JEMALLOC_CPREFIX"MALLOC_CONF"
402#else
403			    "MALLOC_CONF"
404#endif
405			    ;
406
407			if (issetugid() == 0 && (opts = getenv(envname)) !=
408			    NULL) {
409				/*
410				 * Do nothing; opts is already initialized to
411				 * the value of the MALLOC_CONF environment
412				 * variable.
413				 */
414			} else {
415				/* No configuration specified. */
416				buf[0] = '\0';
417				opts = buf;
418			}
419			break;
420		} default:
421			/* NOTREACHED */
422			assert(false);
423			buf[0] = '\0';
424			opts = buf;
425		}
426
427		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
428		    &vlen) == false) {
429#define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
430			if (sizeof(n)-1 == klen && strncmp(n, k,	\
431			    klen) == 0) {				\
432				if (strncmp("true", v, vlen) == 0 &&	\
433				    vlen == sizeof("true")-1)		\
434					o = true;			\
435				else if (strncmp("false", v, vlen) ==	\
436				    0 && vlen == sizeof("false")-1)	\
437					o = false;			\
438				else {					\
439					malloc_conf_error(		\
440					    "Invalid conf value",	\
441					    k, klen, v, vlen);		\
442				}					\
443				hit = true;				\
444			} else						\
445				hit = false;
446#define	CONF_HANDLE_BOOL(o, n) {					\
447			bool hit;					\
448			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
449			if (hit)					\
450				continue;				\
451}
452#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
453			if (sizeof(n)-1 == klen && strncmp(n, k,	\
454			    klen) == 0) {				\
455				uintmax_t um;				\
456				char *end;				\
457									\
458				errno = 0;				\
459				um = malloc_strtoumax(v, &end, 0);	\
460				if (errno != 0 || (uintptr_t)end -	\
461				    (uintptr_t)v != vlen) {		\
462					malloc_conf_error(		\
463					    "Invalid conf value",	\
464					    k, klen, v, vlen);		\
465				} else if (um < min || um > max) {	\
466					malloc_conf_error(		\
467					    "Out-of-range conf value",	\
468					    k, klen, v, vlen);		\
469				} else					\
470					o = um;				\
471				continue;				\
472			}
473#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
474			if (sizeof(n)-1 == klen && strncmp(n, k,	\
475			    klen) == 0) {				\
476				long l;					\
477				char *end;				\
478									\
479				errno = 0;				\
480				l = strtol(v, &end, 0);			\
481				if (errno != 0 || (uintptr_t)end -	\
482				    (uintptr_t)v != vlen) {		\
483					malloc_conf_error(		\
484					    "Invalid conf value",	\
485					    k, klen, v, vlen);		\
486				} else if (l < (ssize_t)min || l >	\
487				    (ssize_t)max) {			\
488					malloc_conf_error(		\
489					    "Out-of-range conf value",	\
490					    k, klen, v, vlen);		\
491				} else					\
492					o = l;				\
493				continue;				\
494			}
495#define	CONF_HANDLE_CHAR_P(o, n, d)					\
496			if (sizeof(n)-1 == klen && strncmp(n, k,	\
497			    klen) == 0) {				\
498				size_t cpylen = (vlen <=		\
499				    sizeof(o)-1) ? vlen :		\
500				    sizeof(o)-1;			\
501				strncpy(o, v, cpylen);			\
502				o[cpylen] = '\0';			\
503				continue;				\
504			}
505
506			CONF_HANDLE_BOOL(opt_abort, "abort")
507			/*
508			 * Chunks always require at least one header page, plus
509			 * one data page in the absence of redzones, or three
510			 * pages in the presence of redzones.  In order to
511			 * simplify options processing, fix the limit based on
512			 * config_fill.
513			 */
514			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
515			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
516			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
517			    SIZE_T_MAX)
518			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
519			    -1, (sizeof(size_t) << 3) - 1)
520			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
521			if (config_fill) {
522				CONF_HANDLE_BOOL(opt_junk, "junk")
523				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
524				    0, SIZE_T_MAX)
525				CONF_HANDLE_BOOL(opt_redzone, "redzone")
526				CONF_HANDLE_BOOL(opt_zero, "zero")
527			}
528			if (config_utrace) {
529				CONF_HANDLE_BOOL(opt_utrace, "utrace")
530			}
531			if (config_valgrind) {
532				bool hit;
533				CONF_HANDLE_BOOL_HIT(opt_valgrind,
534				    "valgrind", hit)
535				if (config_fill && opt_valgrind && hit) {
536					opt_junk = false;
537					opt_zero = false;
538					if (opt_quarantine == 0) {
539						opt_quarantine =
540						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
541					}
542					opt_redzone = true;
543				}
544				if (hit)
545					continue;
546			}
547			if (config_xmalloc) {
548				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
549			}
550			if (config_tcache) {
551				CONF_HANDLE_BOOL(opt_tcache, "tcache")
552				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
553				    "lg_tcache_max", -1,
554				    (sizeof(size_t) << 3) - 1)
555			}
556			if (config_prof) {
557				CONF_HANDLE_BOOL(opt_prof, "prof")
558				CONF_HANDLE_CHAR_P(opt_prof_prefix,
559				    "prof_prefix", "jeprof")
560				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
561				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
562				    "lg_prof_sample", 0,
563				    (sizeof(uint64_t) << 3) - 1)
564				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
565				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
566				    "lg_prof_interval", -1,
567				    (sizeof(uint64_t) << 3) - 1)
568				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
569				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
570				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
571			}
572			malloc_conf_error("Invalid conf pair", k, klen, v,
573			    vlen);
574#undef CONF_HANDLE_BOOL
575#undef CONF_HANDLE_SIZE_T
576#undef CONF_HANDLE_SSIZE_T
577#undef CONF_HANDLE_CHAR_P
578		}
579	}
580}
581
582static bool
583malloc_init_hard(void)
584{
585	arena_t *init_arenas[1];
586
587	malloc_mutex_lock(&init_lock);
588	if (malloc_initialized || IS_INITIALIZER) {
589		/*
590		 * Another thread initialized the allocator before this one
591		 * acquired init_lock, or this thread is the initializing
592		 * thread, and it is recursively allocating.
593		 */
594		malloc_mutex_unlock(&init_lock);
595		return (false);
596	}
597#ifdef JEMALLOC_THREADED_INIT
598	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
599		/* Busy-wait until the initializing thread completes. */
600		do {
601			malloc_mutex_unlock(&init_lock);
602			CPU_SPINWAIT;
603			malloc_mutex_lock(&init_lock);
604		} while (malloc_initialized == false);
605		malloc_mutex_unlock(&init_lock);
606		return (false);
607	}
608#endif
609	malloc_initializer = INITIALIZER;
610
611	malloc_tsd_boot();
612	if (config_prof)
613		prof_boot0();
614
615	malloc_conf_init();
616
617#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
618	/* Register fork handlers. */
619	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
620	    jemalloc_postfork_child) != 0) {
621		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
622		if (opt_abort)
623			abort();
624	}
625#endif
626
627	if (opt_stats_print) {
628		/* Print statistics at exit. */
629		if (atexit(stats_print_atexit) != 0) {
630			malloc_write("<jemalloc>: Error in atexit()\n");
631			if (opt_abort)
632				abort();
633		}
634	}
635
636	if (base_boot()) {
637		malloc_mutex_unlock(&init_lock);
638		return (true);
639	}
640
641	if (chunk_boot()) {
642		malloc_mutex_unlock(&init_lock);
643		return (true);
644	}
645
646	if (ctl_boot()) {
647		malloc_mutex_unlock(&init_lock);
648		return (true);
649	}
650
651	if (config_prof)
652		prof_boot1();
653
654	arena_boot();
655
656	if (config_tcache && tcache_boot0()) {
657		malloc_mutex_unlock(&init_lock);
658		return (true);
659	}
660
661	if (huge_boot()) {
662		malloc_mutex_unlock(&init_lock);
663		return (true);
664	}
665
666	if (malloc_mutex_init(&arenas_lock))
667		return (true);
668
669	/*
670	 * Create enough scaffolding to allow recursive allocation in
671	 * malloc_ncpus().
672	 */
673	narenas = 1;
674	arenas = init_arenas;
675	memset(arenas, 0, sizeof(arena_t *) * narenas);
676
677	/*
678	 * Initialize one arena here.  The rest are lazily created in
679	 * choose_arena_hard().
680	 */
681	arenas_extend(0);
682	if (arenas[0] == NULL) {
683		malloc_mutex_unlock(&init_lock);
684		return (true);
685	}
686
687	/* Initialize allocation counters before any allocations can occur. */
688	if (config_stats && thread_allocated_tsd_boot()) {
689		malloc_mutex_unlock(&init_lock);
690		return (true);
691	}
692
693	if (arenas_tsd_boot()) {
694		malloc_mutex_unlock(&init_lock);
695		return (true);
696	}
697
698	if (config_tcache && tcache_boot1()) {
699		malloc_mutex_unlock(&init_lock);
700		return (true);
701	}
702
703	if (config_fill && quarantine_boot()) {
704		malloc_mutex_unlock(&init_lock);
705		return (true);
706	}
707
708	if (config_prof && prof_boot2()) {
709		malloc_mutex_unlock(&init_lock);
710		return (true);
711	}
712
713	/* Get number of CPUs. */
714	malloc_mutex_unlock(&init_lock);
715	ncpus = malloc_ncpus();
716	malloc_mutex_lock(&init_lock);
717
718	if (mutex_boot()) {
719		malloc_mutex_unlock(&init_lock);
720		return (true);
721	}
722
723	if (opt_narenas == 0) {
724		/*
725		 * For SMP systems, create more than one arena per CPU by
726		 * default.
727		 */
728		if (ncpus > 1)
729			opt_narenas = ncpus << 2;
730		else
731			opt_narenas = 1;
732	}
733	narenas = opt_narenas;
734	/*
735	 * Make sure that the arenas array can be allocated.  In practice, this
736	 * limit is enough to allow the allocator to function, but the ctl
737	 * machinery will fail to allocate memory at far lower limits.
738	 */
739	if (narenas > chunksize / sizeof(arena_t *)) {
740		narenas = chunksize / sizeof(arena_t *);
741		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
742		    narenas);
743	}
744
745	/* Allocate and initialize arenas. */
746	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
747	if (arenas == NULL) {
748		malloc_mutex_unlock(&init_lock);
749		return (true);
750	}
751	/*
752	 * Zero the array.  In practice, this should always be pre-zeroed,
753	 * since it was just mmap()ed, but let's be sure.
754	 */
755	memset(arenas, 0, sizeof(arena_t *) * narenas);
756	/* Copy the pointer to the one arena that was already initialized. */
757	arenas[0] = init_arenas[0];
758
759	malloc_initialized = true;
760	malloc_mutex_unlock(&init_lock);
761	return (false);
762}
763
764/*
765 * End initialization functions.
766 */
767/******************************************************************************/
768/*
769 * Begin malloc(3)-compatible functions.
770 */
771
772JEMALLOC_ATTR(malloc)
773JEMALLOC_ATTR(visibility("default"))
774void *
775je_malloc(size_t size)
776{
777	void *ret;
778	size_t usize;
779	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
780
781	if (malloc_init()) {
782		ret = NULL;
783		goto label_oom;
784	}
785
786	if (size == 0)
787		size = 1;
788
789	if (config_prof && opt_prof) {
790		usize = s2u(size);
791		PROF_ALLOC_PREP(1, usize, cnt);
792		if (cnt == NULL) {
793			ret = NULL;
794			goto label_oom;
795		}
796		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
797		    SMALL_MAXCLASS) {
798			ret = imalloc(SMALL_MAXCLASS+1);
799			if (ret != NULL)
800				arena_prof_promoted(ret, usize);
801		} else
802			ret = imalloc(size);
803	} else {
804		if (config_stats || (config_valgrind && opt_valgrind))
805			usize = s2u(size);
806		ret = imalloc(size);
807	}
808
809label_oom:
810	if (ret == NULL) {
811		if (config_xmalloc && opt_xmalloc) {
812			malloc_write("<jemalloc>: Error in malloc(): "
813			    "out of memory\n");
814			abort();
815		}
816		errno = ENOMEM;
817	}
818	if (config_prof && opt_prof && ret != NULL)
819		prof_malloc(ret, usize, cnt);
820	if (config_stats && ret != NULL) {
821		assert(usize == isalloc(ret, config_prof));
822		thread_allocated_tsd_get()->allocated += usize;
823	}
824	UTRACE(0, size, ret);
825	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
826	return (ret);
827}
828
829JEMALLOC_ATTR(nonnull(1))
830#ifdef JEMALLOC_PROF
831/*
832 * Avoid any uncertainty as to how many backtrace frames to ignore in
833 * PROF_ALLOC_PREP().
834 */
835JEMALLOC_ATTR(noinline)
836#endif
837static int
838imemalign(void **memptr, size_t alignment, size_t size,
839    size_t min_alignment)
840{
841	int ret;
842	size_t usize;
843	void *result;
844	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
845
846	assert(min_alignment != 0);
847
848	if (malloc_init())
849		result = NULL;
850	else {
851		if (size == 0)
852			size = 1;
853
854		/* Make sure that alignment is a large enough power of 2. */
855		if (((alignment - 1) & alignment) != 0
856		    || (alignment < min_alignment)) {
857			if (config_xmalloc && opt_xmalloc) {
858				malloc_write("<jemalloc>: Error allocating "
859				    "aligned memory: invalid alignment\n");
860				abort();
861			}
862			result = NULL;
863			ret = EINVAL;
864			goto label_return;
865		}
866
867		usize = sa2u(size, alignment);
868		if (usize == 0) {
869			result = NULL;
870			ret = ENOMEM;
871			goto label_return;
872		}
873
874		if (config_prof && opt_prof) {
875			PROF_ALLOC_PREP(2, usize, cnt);
876			if (cnt == NULL) {
877				result = NULL;
878				ret = EINVAL;
879			} else {
880				if (prof_promote && (uintptr_t)cnt !=
881				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
882					assert(sa2u(SMALL_MAXCLASS+1,
883					    alignment) != 0);
884					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
885					    alignment), alignment, false);
886					if (result != NULL) {
887						arena_prof_promoted(result,
888						    usize);
889					}
890				} else {
891					result = ipalloc(usize, alignment,
892					    false);
893				}
894			}
895		} else
896			result = ipalloc(usize, alignment, false);
897	}
898
899	if (result == NULL) {
900		if (config_xmalloc && opt_xmalloc) {
901			malloc_write("<jemalloc>: Error allocating aligned "
902			    "memory: out of memory\n");
903			abort();
904		}
905		ret = ENOMEM;
906		goto label_return;
907	}
908
909	*memptr = result;
910	ret = 0;
911
912label_return:
913	if (config_stats && result != NULL) {
914		assert(usize == isalloc(result, config_prof));
915		thread_allocated_tsd_get()->allocated += usize;
916	}
917	if (config_prof && opt_prof && result != NULL)
918		prof_malloc(result, usize, cnt);
919	UTRACE(0, size, result);
920	return (ret);
921}
922
923JEMALLOC_ATTR(nonnull(1))
924JEMALLOC_ATTR(visibility("default"))
925int
926je_posix_memalign(void **memptr, size_t alignment, size_t size)
927{
928	int ret = imemalign(memptr, alignment, size, sizeof(void *));
929	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
930	    config_prof), false);
931	return (ret);
932}
933
934JEMALLOC_ATTR(malloc)
935JEMALLOC_ATTR(visibility("default"))
936void *
937je_aligned_alloc(size_t alignment, size_t size)
938{
939	void *ret;
940	int err;
941
942	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
943		ret = NULL;
944		errno = err;
945	}
946	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
947	    false);
948	return (ret);
949}
950
951JEMALLOC_ATTR(malloc)
952JEMALLOC_ATTR(visibility("default"))
953void *
954je_calloc(size_t num, size_t size)
955{
956	void *ret;
957	size_t num_size;
958	size_t usize;
959	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
960
961	if (malloc_init()) {
962		num_size = 0;
963		ret = NULL;
964		goto label_return;
965	}
966
967	num_size = num * size;
968	if (num_size == 0) {
969		if (num == 0 || size == 0)
970			num_size = 1;
971		else {
972			ret = NULL;
973			goto label_return;
974		}
975	/*
976	 * Try to avoid division here.  We know that it isn't possible to
977	 * overflow during multiplication if neither operand uses any of the
978	 * most significant half of the bits in a size_t.
979	 */
980	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
981	    && (num_size / size != num)) {
982		/* size_t overflow. */
983		ret = NULL;
984		goto label_return;
985	}
986
987	if (config_prof && opt_prof) {
988		usize = s2u(num_size);
989		PROF_ALLOC_PREP(1, usize, cnt);
990		if (cnt == NULL) {
991			ret = NULL;
992			goto label_return;
993		}
994		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
995		    <= SMALL_MAXCLASS) {
996			ret = icalloc(SMALL_MAXCLASS+1);
997			if (ret != NULL)
998				arena_prof_promoted(ret, usize);
999		} else
1000			ret = icalloc(num_size);
1001	} else {
1002		if (config_stats || (config_valgrind && opt_valgrind))
1003			usize = s2u(num_size);
1004		ret = icalloc(num_size);
1005	}
1006
1007label_return:
1008	if (ret == NULL) {
1009		if (config_xmalloc && opt_xmalloc) {
1010			malloc_write("<jemalloc>: Error in calloc(): out of "
1011			    "memory\n");
1012			abort();
1013		}
1014		errno = ENOMEM;
1015	}
1016
1017	if (config_prof && opt_prof && ret != NULL)
1018		prof_malloc(ret, usize, cnt);
1019	if (config_stats && ret != NULL) {
1020		assert(usize == isalloc(ret, config_prof));
1021		thread_allocated_tsd_get()->allocated += usize;
1022	}
1023	UTRACE(0, num_size, ret);
1024	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1025	return (ret);
1026}
1027
1028JEMALLOC_ATTR(visibility("default"))
1029void *
1030je_realloc(void *ptr, size_t size)
1031{
1032	void *ret;
1033	size_t usize;
1034	size_t old_size = 0;
1035	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1036	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1037	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1038
1039	if (size == 0) {
1040		if (ptr != NULL) {
1041			/* realloc(ptr, 0) is equivalent to free(p). */
1042			if (config_prof) {
1043				old_size = isalloc(ptr, true);
1044				if (config_valgrind && opt_valgrind)
1045					old_rzsize = p2rz(ptr);
1046			} else if (config_stats) {
1047				old_size = isalloc(ptr, false);
1048				if (config_valgrind && opt_valgrind)
1049					old_rzsize = u2rz(old_size);
1050			} else if (config_valgrind && opt_valgrind) {
1051				old_size = isalloc(ptr, false);
1052				old_rzsize = u2rz(old_size);
1053			}
1054			if (config_prof && opt_prof) {
1055				old_ctx = prof_ctx_get(ptr);
1056				cnt = NULL;
1057			}
1058			iqalloc(ptr);
1059			ret = NULL;
1060			goto label_return;
1061		} else
1062			size = 1;
1063	}
1064
1065	if (ptr != NULL) {
1066		assert(malloc_initialized || IS_INITIALIZER);
1067
1068		if (config_prof) {
1069			old_size = isalloc(ptr, true);
1070			if (config_valgrind && opt_valgrind)
1071				old_rzsize = p2rz(ptr);
1072		} else if (config_stats) {
1073			old_size = isalloc(ptr, false);
1074			if (config_valgrind && opt_valgrind)
1075				old_rzsize = u2rz(old_size);
1076		} else if (config_valgrind && opt_valgrind) {
1077			old_size = isalloc(ptr, false);
1078			old_rzsize = u2rz(old_size);
1079		}
1080		if (config_prof && opt_prof) {
1081			usize = s2u(size);
1082			old_ctx = prof_ctx_get(ptr);
1083			PROF_ALLOC_PREP(1, usize, cnt);
1084			if (cnt == NULL) {
1085				old_ctx = NULL;
1086				ret = NULL;
1087				goto label_oom;
1088			}
1089			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1090			    usize <= SMALL_MAXCLASS) {
1091				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1092				    false, false);
1093				if (ret != NULL)
1094					arena_prof_promoted(ret, usize);
1095				else
1096					old_ctx = NULL;
1097			} else {
1098				ret = iralloc(ptr, size, 0, 0, false, false);
1099				if (ret == NULL)
1100					old_ctx = NULL;
1101			}
1102		} else {
1103			if (config_stats || (config_valgrind && opt_valgrind))
1104				usize = s2u(size);
1105			ret = iralloc(ptr, size, 0, 0, false, false);
1106		}
1107
1108label_oom:
1109		if (ret == NULL) {
1110			if (config_xmalloc && opt_xmalloc) {
1111				malloc_write("<jemalloc>: Error in realloc(): "
1112				    "out of memory\n");
1113				abort();
1114			}
1115			errno = ENOMEM;
1116		}
1117	} else {
1118		/* realloc(NULL, size) is equivalent to malloc(size). */
1119		if (config_prof && opt_prof)
1120			old_ctx = NULL;
1121		if (malloc_init()) {
1122			if (config_prof && opt_prof)
1123				cnt = NULL;
1124			ret = NULL;
1125		} else {
1126			if (config_prof && opt_prof) {
1127				usize = s2u(size);
1128				PROF_ALLOC_PREP(1, usize, cnt);
1129				if (cnt == NULL)
1130					ret = NULL;
1131				else {
1132					if (prof_promote && (uintptr_t)cnt !=
1133					    (uintptr_t)1U && usize <=
1134					    SMALL_MAXCLASS) {
1135						ret = imalloc(SMALL_MAXCLASS+1);
1136						if (ret != NULL) {
1137							arena_prof_promoted(ret,
1138							    usize);
1139						}
1140					} else
1141						ret = imalloc(size);
1142				}
1143			} else {
1144				if (config_stats || (config_valgrind &&
1145				    opt_valgrind))
1146					usize = s2u(size);
1147				ret = imalloc(size);
1148			}
1149		}
1150
1151		if (ret == NULL) {
1152			if (config_xmalloc && opt_xmalloc) {
1153				malloc_write("<jemalloc>: Error in realloc(): "
1154				    "out of memory\n");
1155				abort();
1156			}
1157			errno = ENOMEM;
1158		}
1159	}
1160
1161label_return:
1162	if (config_prof && opt_prof)
1163		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1164	if (config_stats && ret != NULL) {
1165		thread_allocated_t *ta;
1166		assert(usize == isalloc(ret, config_prof));
1167		ta = thread_allocated_tsd_get();
1168		ta->allocated += usize;
1169		ta->deallocated += old_size;
1170	}
1171	UTRACE(ptr, size, ret);
1172	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1173	return (ret);
1174}
1175
1176JEMALLOC_ATTR(visibility("default"))
1177void
1178je_free(void *ptr)
1179{
1180
1181	UTRACE(ptr, 0, 0);
1182	if (ptr != NULL) {
1183		size_t usize;
1184		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1185
1186		assert(malloc_initialized || IS_INITIALIZER);
1187
1188		if (config_prof && opt_prof) {
1189			usize = isalloc(ptr, config_prof);
1190			prof_free(ptr, usize);
1191		} else if (config_stats || config_valgrind)
1192			usize = isalloc(ptr, config_prof);
1193		if (config_stats)
1194			thread_allocated_tsd_get()->deallocated += usize;
1195		if (config_valgrind && opt_valgrind)
1196			rzsize = p2rz(ptr);
1197		iqalloc(ptr);
1198		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1199	}
1200}
1201
1202/*
1203 * End malloc(3)-compatible functions.
1204 */
1205/******************************************************************************/
1206/*
1207 * Begin non-standard override functions.
1208 */
1209
1210#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1211JEMALLOC_ATTR(malloc)
1212JEMALLOC_ATTR(visibility("default"))
1213void *
1214je_memalign(size_t alignment, size_t size)
1215{
1216	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1217	imemalign(&ret, alignment, size, 1);
1218	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1219	return (ret);
1220}
1221#endif
1222
1223#ifdef JEMALLOC_OVERRIDE_VALLOC
1224JEMALLOC_ATTR(malloc)
1225JEMALLOC_ATTR(visibility("default"))
1226void *
1227je_valloc(size_t size)
1228{
1229	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1230	imemalign(&ret, PAGE, size, 1);
1231	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1232	return (ret);
1233}
1234#endif
1235
1236/*
1237 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1238 * #define je_malloc malloc
1239 */
1240#define	malloc_is_malloc 1
1241#define	is_malloc_(a) malloc_is_ ## a
1242#define	is_malloc(a) is_malloc_(a)
1243
1244#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1245/*
1246 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1247 * to inconsistently reference libc's malloc(3)-compatible functions
1248 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1249 *
1250 * These definitions interpose hooks in glibc.  The functions are actually
1251 * passed an extra argument for the caller return address, which will be
1252 * ignored.
1253 */
1254JEMALLOC_ATTR(visibility("default"))
1255void (* const __free_hook)(void *ptr) = je_free;
1256
1257JEMALLOC_ATTR(visibility("default"))
1258void *(* const __malloc_hook)(size_t size) = je_malloc;
1259
1260JEMALLOC_ATTR(visibility("default"))
1261void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
1262
1263JEMALLOC_ATTR(visibility("default"))
1264void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
1265#endif
1266
1267/*
1268 * End non-standard override functions.
1269 */
1270/******************************************************************************/
1271/*
1272 * Begin non-standard functions.
1273 */
1274
1275JEMALLOC_ATTR(visibility("default"))
1276size_t
1277je_malloc_usable_size(const void *ptr)
1278{
1279	size_t ret;
1280
1281	assert(malloc_initialized || IS_INITIALIZER);
1282
1283	if (config_ivsalloc)
1284		ret = ivsalloc(ptr, config_prof);
1285	else
1286		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1287
1288	return (ret);
1289}
1290
1291JEMALLOC_ATTR(visibility("default"))
1292void
1293je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1294    const char *opts)
1295{
1296
1297	stats_print(write_cb, cbopaque, opts);
1298}
1299
1300JEMALLOC_ATTR(visibility("default"))
1301int
1302je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1303    size_t newlen)
1304{
1305
1306	if (malloc_init())
1307		return (EAGAIN);
1308
1309	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1310}
1311
1312JEMALLOC_ATTR(visibility("default"))
1313int
1314je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1315{
1316
1317	if (malloc_init())
1318		return (EAGAIN);
1319
1320	return (ctl_nametomib(name, mibp, miblenp));
1321}
1322
1323JEMALLOC_ATTR(visibility("default"))
1324int
1325je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1326  void *newp, size_t newlen)
1327{
1328
1329	if (malloc_init())
1330		return (EAGAIN);
1331
1332	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1333}
1334
1335/*
1336 * End non-standard functions.
1337 */
1338/******************************************************************************/
1339/*
1340 * Begin experimental functions.
1341 */
1342#ifdef JEMALLOC_EXPERIMENTAL
1343
1344JEMALLOC_INLINE void *
1345iallocm(size_t usize, size_t alignment, bool zero)
1346{
1347
1348	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1349	    alignment)));
1350
1351	if (alignment != 0)
1352		return (ipalloc(usize, alignment, zero));
1353	else if (zero)
1354		return (icalloc(usize));
1355	else
1356		return (imalloc(usize));
1357}
1358
1359JEMALLOC_ATTR(nonnull(1))
1360JEMALLOC_ATTR(visibility("default"))
1361int
1362je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1363{
1364	void *p;
1365	size_t usize;
1366	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1367	    & (SIZE_T_MAX-1));
1368	bool zero = flags & ALLOCM_ZERO;
1369	prof_thr_cnt_t *cnt;
1370
1371	assert(ptr != NULL);
1372	assert(size != 0);
1373
1374	if (malloc_init())
1375		goto label_oom;
1376
1377	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1378	if (usize == 0)
1379		goto label_oom;
1380
1381	if (config_prof && opt_prof) {
1382		PROF_ALLOC_PREP(1, usize, cnt);
1383		if (cnt == NULL)
1384			goto label_oom;
1385		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1386		    SMALL_MAXCLASS) {
1387			size_t usize_promoted = (alignment == 0) ?
1388			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1389			    alignment);
1390			assert(usize_promoted != 0);
1391			p = iallocm(usize_promoted, alignment, zero);
1392			if (p == NULL)
1393				goto label_oom;
1394			arena_prof_promoted(p, usize);
1395		} else {
1396			p = iallocm(usize, alignment, zero);
1397			if (p == NULL)
1398				goto label_oom;
1399		}
1400		prof_malloc(p, usize, cnt);
1401	} else {
1402		p = iallocm(usize, alignment, zero);
1403		if (p == NULL)
1404			goto label_oom;
1405	}
1406	if (rsize != NULL)
1407		*rsize = usize;
1408
1409	*ptr = p;
1410	if (config_stats) {
1411		assert(usize == isalloc(p, config_prof));
1412		thread_allocated_tsd_get()->allocated += usize;
1413	}
1414	UTRACE(0, size, p);
1415	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1416	return (ALLOCM_SUCCESS);
1417label_oom:
1418	if (config_xmalloc && opt_xmalloc) {
1419		malloc_write("<jemalloc>: Error in allocm(): "
1420		    "out of memory\n");
1421		abort();
1422	}
1423	*ptr = NULL;
1424	UTRACE(0, size, 0);
1425	return (ALLOCM_ERR_OOM);
1426}
1427
1428JEMALLOC_ATTR(nonnull(1))
1429JEMALLOC_ATTR(visibility("default"))
1430int
1431je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1432{
1433	void *p, *q;
1434	size_t usize;
1435	size_t old_size;
1436	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1437	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1438	    & (SIZE_T_MAX-1));
1439	bool zero = flags & ALLOCM_ZERO;
1440	bool no_move = flags & ALLOCM_NO_MOVE;
1441	prof_thr_cnt_t *cnt;
1442
1443	assert(ptr != NULL);
1444	assert(*ptr != NULL);
1445	assert(size != 0);
1446	assert(SIZE_T_MAX - size >= extra);
1447	assert(malloc_initialized || IS_INITIALIZER);
1448
1449	p = *ptr;
1450	if (config_prof && opt_prof) {
1451		/*
1452		 * usize isn't knowable before iralloc() returns when extra is
1453		 * non-zero.  Therefore, compute its maximum possible value and
1454		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1455		 * backtrace.  prof_realloc() will use the actual usize to
1456		 * decide whether to sample.
1457		 */
1458		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1459		    sa2u(size+extra, alignment);
1460		prof_ctx_t *old_ctx = prof_ctx_get(p);
1461		old_size = isalloc(p, true);
1462		if (config_valgrind && opt_valgrind)
1463			old_rzsize = p2rz(p);
1464		PROF_ALLOC_PREP(1, max_usize, cnt);
1465		if (cnt == NULL)
1466			goto label_oom;
1467		/*
1468		 * Use minimum usize to determine whether promotion may happen.
1469		 */
1470		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1471		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1472		    <= SMALL_MAXCLASS) {
1473			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1474			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1475			    alignment, zero, no_move);
1476			if (q == NULL)
1477				goto label_err;
1478			if (max_usize < PAGE) {
1479				usize = max_usize;
1480				arena_prof_promoted(q, usize);
1481			} else
1482				usize = isalloc(q, config_prof);
1483		} else {
1484			q = iralloc(p, size, extra, alignment, zero, no_move);
1485			if (q == NULL)
1486				goto label_err;
1487			usize = isalloc(q, config_prof);
1488		}
1489		prof_realloc(q, usize, cnt, old_size, old_ctx);
1490		if (rsize != NULL)
1491			*rsize = usize;
1492	} else {
1493		if (config_stats) {
1494			old_size = isalloc(p, false);
1495			if (config_valgrind && opt_valgrind)
1496				old_rzsize = u2rz(old_size);
1497		} else if (config_valgrind && opt_valgrind) {
1498			old_size = isalloc(p, false);
1499			old_rzsize = u2rz(old_size);
1500		}
1501		q = iralloc(p, size, extra, alignment, zero, no_move);
1502		if (q == NULL)
1503			goto label_err;
1504		if (config_stats)
1505			usize = isalloc(q, config_prof);
1506		if (rsize != NULL) {
1507			if (config_stats == false)
1508				usize = isalloc(q, config_prof);
1509			*rsize = usize;
1510		}
1511	}
1512
1513	*ptr = q;
1514	if (config_stats) {
1515		thread_allocated_t *ta;
1516		ta = thread_allocated_tsd_get();
1517		ta->allocated += usize;
1518		ta->deallocated += old_size;
1519	}
1520	UTRACE(p, size, q);
1521	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1522	return (ALLOCM_SUCCESS);
1523label_err:
1524	if (no_move) {
1525		UTRACE(p, size, q);
1526		return (ALLOCM_ERR_NOT_MOVED);
1527	}
1528label_oom:
1529	if (config_xmalloc && opt_xmalloc) {
1530		malloc_write("<jemalloc>: Error in rallocm(): "
1531		    "out of memory\n");
1532		abort();
1533	}
1534	UTRACE(p, size, 0);
1535	return (ALLOCM_ERR_OOM);
1536}
1537
1538JEMALLOC_ATTR(nonnull(1))
1539JEMALLOC_ATTR(visibility("default"))
1540int
1541je_sallocm(const void *ptr, size_t *rsize, int flags)
1542{
1543	size_t sz;
1544
1545	assert(malloc_initialized || IS_INITIALIZER);
1546
1547	if (config_ivsalloc)
1548		sz = ivsalloc(ptr, config_prof);
1549	else {
1550		assert(ptr != NULL);
1551		sz = isalloc(ptr, config_prof);
1552	}
1553	assert(rsize != NULL);
1554	*rsize = sz;
1555
1556	return (ALLOCM_SUCCESS);
1557}
1558
1559JEMALLOC_ATTR(nonnull(1))
1560JEMALLOC_ATTR(visibility("default"))
1561int
1562je_dallocm(void *ptr, int flags)
1563{
1564	size_t usize;
1565	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1566
1567	assert(ptr != NULL);
1568	assert(malloc_initialized || IS_INITIALIZER);
1569
1570	UTRACE(ptr, 0, 0);
1571	if (config_stats || config_valgrind)
1572		usize = isalloc(ptr, config_prof);
1573	if (config_prof && opt_prof) {
1574		if (config_stats == false && config_valgrind == false)
1575			usize = isalloc(ptr, config_prof);
1576		prof_free(ptr, usize);
1577	}
1578	if (config_stats)
1579		thread_allocated_tsd_get()->deallocated += usize;
1580	if (config_valgrind && opt_valgrind)
1581		rzsize = p2rz(ptr);
1582	iqalloc(ptr);
1583	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1584
1585	return (ALLOCM_SUCCESS);
1586}
1587
1588JEMALLOC_ATTR(visibility("default"))
1589int
1590je_nallocm(size_t *rsize, size_t size, int flags)
1591{
1592	size_t usize;
1593	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1594	    & (SIZE_T_MAX-1));
1595
1596	assert(size != 0);
1597
1598	if (malloc_init())
1599		return (ALLOCM_ERR_OOM);
1600
1601	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1602	if (usize == 0)
1603		return (ALLOCM_ERR_OOM);
1604
1605	if (rsize != NULL)
1606		*rsize = usize;
1607	return (ALLOCM_SUCCESS);
1608}
1609
1610#endif
1611/*
1612 * End experimental functions.
1613 */
1614/******************************************************************************/
1615/*
1616 * The following functions are used by threading libraries for protection of
1617 * malloc during fork().
1618 */
1619
1620#ifndef JEMALLOC_MUTEX_INIT_CB
1621void
1622jemalloc_prefork(void)
1623#else
1624JEMALLOC_ATTR(visibility("default"))
1625void
1626_malloc_prefork(void)
1627#endif
1628{
1629	unsigned i;
1630
1631	/* Acquire all mutexes in a safe order. */
1632	malloc_mutex_prefork(&arenas_lock);
1633	for (i = 0; i < narenas; i++) {
1634		if (arenas[i] != NULL)
1635			arena_prefork(arenas[i]);
1636	}
1637	base_prefork();
1638	huge_prefork();
1639	chunk_dss_prefork();
1640}
1641
1642#ifndef JEMALLOC_MUTEX_INIT_CB
1643void
1644jemalloc_postfork_parent(void)
1645#else
1646JEMALLOC_ATTR(visibility("default"))
1647void
1648_malloc_postfork(void)
1649#endif
1650{
1651	unsigned i;
1652
1653	/* Release all mutexes, now that fork() has completed. */
1654	chunk_dss_postfork_parent();
1655	huge_postfork_parent();
1656	base_postfork_parent();
1657	for (i = 0; i < narenas; i++) {
1658		if (arenas[i] != NULL)
1659			arena_postfork_parent(arenas[i]);
1660	}
1661	malloc_mutex_postfork_parent(&arenas_lock);
1662}
1663
1664void
1665jemalloc_postfork_child(void)
1666{
1667	unsigned i;
1668
1669	/* Release all mutexes, now that fork() has completed. */
1670	chunk_dss_postfork_child();
1671	huge_postfork_child();
1672	base_postfork_child();
1673	for (i = 0; i < narenas; i++) {
1674		if (arenas[i] != NULL)
1675			arena_postfork_child(arenas[i]);
1676	}
1677	malloc_mutex_postfork_child(&arenas_lock);
1678}
1679
1680/******************************************************************************/
1681/*
1682 * The following functions are used for TLS allocation/deallocation in static
1683 * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1684 * is that these avoid accessing TLS variables.
1685 */
1686
1687static void *
1688a0alloc(size_t size, bool zero)
1689{
1690
1691	if (malloc_init())
1692		return (NULL);
1693
1694	if (size == 0)
1695		size = 1;
1696
1697	if (size <= arena_maxclass)
1698		return (arena_malloc(arenas[0], size, zero, false));
1699	else
1700		return (huge_malloc(size, zero));
1701}
1702
1703void *
1704a0malloc(size_t size)
1705{
1706
1707	return (a0alloc(size, false));
1708}
1709
1710void *
1711a0calloc(size_t num, size_t size)
1712{
1713
1714	return (a0alloc(num * size, true));
1715}
1716
1717void
1718a0free(void *ptr)
1719{
1720	arena_chunk_t *chunk;
1721
1722	if (ptr == NULL)
1723		return;
1724
1725	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1726	if (chunk != ptr)
1727		arena_dalloc(chunk->arena, chunk, ptr, false);
1728	else
1729		huge_dalloc(ptr, true);
1730}
1731
1732/******************************************************************************/
1733