jemalloc.c revision 234658
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9    THREAD_ALLOCATED_INITIALIZER)
10
11/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12const char	*__malloc_options_1_0 = NULL;
13__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14
15/* Runtime configuration options. */
16const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
17#ifdef JEMALLOC_DEBUG
18bool	opt_abort = true;
19#  ifdef JEMALLOC_FILL
20bool	opt_junk = true;
21#  else
22bool	opt_junk = false;
23#  endif
24#else
25bool	opt_abort = false;
26bool	opt_junk = false;
27#endif
28size_t	opt_quarantine = ZU(0);
29bool	opt_redzone = false;
30bool	opt_utrace = false;
31bool	opt_valgrind = false;
32bool	opt_xmalloc = false;
33bool	opt_zero = false;
34size_t	opt_narenas = 0;
35
36unsigned	ncpus;
37
38malloc_mutex_t		arenas_lock;
39arena_t			**arenas;
40unsigned		narenas;
41
42/* Set to true once the allocator has been initialized. */
43static bool		malloc_initialized = false;
44
45#ifdef JEMALLOC_THREADED_INIT
46/* Used to let the initializing thread recursively allocate. */
47#  define NO_INITIALIZER	((unsigned long)0)
48#  define INITIALIZER		pthread_self()
49#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
50static pthread_t		malloc_initializer = NO_INITIALIZER;
51#else
52#  define NO_INITIALIZER	false
53#  define INITIALIZER		true
54#  define IS_INITIALIZER	malloc_initializer
55static bool			malloc_initializer = NO_INITIALIZER;
56#endif
57
58/* Used to avoid initialization races. */
59static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
60
61typedef struct {
62	void	*p;	/* Input pointer (as in realloc(p, s)). */
63	size_t	s;	/* Request size. */
64	void	*r;	/* Result pointer. */
65} malloc_utrace_t;
66
67#ifdef JEMALLOC_UTRACE
68#  define UTRACE(a, b, c) do {						\
69	if (opt_utrace) {						\
70		malloc_utrace_t ut;					\
71		ut.p = (a);						\
72		ut.s = (b);						\
73		ut.r = (c);						\
74		utrace(&ut, sizeof(ut));				\
75	}								\
76} while (0)
77#else
78#  define UTRACE(a, b, c)
79#endif
80
81/******************************************************************************/
82/* Function prototypes for non-inline static functions. */
83
84static void	stats_print_atexit(void);
85static unsigned	malloc_ncpus(void);
86static bool	malloc_conf_next(char const **opts_p, char const **k_p,
87    size_t *klen_p, char const **v_p, size_t *vlen_p);
88static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
89    const char *v, size_t vlen);
90static void	malloc_conf_init(void);
91static bool	malloc_init_hard(void);
92static int	imemalign(void **memptr, size_t alignment, size_t size,
93    size_t min_alignment);
94
95/******************************************************************************/
96/*
97 * Begin miscellaneous support functions.
98 */
99
100/* Create a new arena and insert it into the arenas array at index ind. */
101arena_t *
102arenas_extend(unsigned ind)
103{
104	arena_t *ret;
105
106	ret = (arena_t *)base_alloc(sizeof(arena_t));
107	if (ret != NULL && arena_new(ret, ind) == false) {
108		arenas[ind] = ret;
109		return (ret);
110	}
111	/* Only reached if there is an OOM error. */
112
113	/*
114	 * OOM here is quite inconvenient to propagate, since dealing with it
115	 * would require a check for failure in the fast path.  Instead, punt
116	 * by using arenas[0].  In practice, this is an extremely unlikely
117	 * failure.
118	 */
119	malloc_write("<jemalloc>: Error initializing arena\n");
120	if (opt_abort)
121		abort();
122
123	return (arenas[0]);
124}
125
126/* Slow path, called only by choose_arena(). */
127arena_t *
128choose_arena_hard(void)
129{
130	arena_t *ret;
131
132	if (narenas > 1) {
133		unsigned i, choose, first_null;
134
135		choose = 0;
136		first_null = narenas;
137		malloc_mutex_lock(&arenas_lock);
138		assert(arenas[0] != NULL);
139		for (i = 1; i < narenas; i++) {
140			if (arenas[i] != NULL) {
141				/*
142				 * Choose the first arena that has the lowest
143				 * number of threads assigned to it.
144				 */
145				if (arenas[i]->nthreads <
146				    arenas[choose]->nthreads)
147					choose = i;
148			} else if (first_null == narenas) {
149				/*
150				 * Record the index of the first uninitialized
151				 * arena, in case all extant arenas are in use.
152				 *
153				 * NB: It is possible for there to be
154				 * discontinuities in terms of initialized
155				 * versus uninitialized arenas, due to the
156				 * "thread.arena" mallctl.
157				 */
158				first_null = i;
159			}
160		}
161
162		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
163			/*
164			 * Use an unloaded arena, or the least loaded arena if
165			 * all arenas are already initialized.
166			 */
167			ret = arenas[choose];
168		} else {
169			/* Initialize a new arena. */
170			ret = arenas_extend(first_null);
171		}
172		ret->nthreads++;
173		malloc_mutex_unlock(&arenas_lock);
174	} else {
175		ret = arenas[0];
176		malloc_mutex_lock(&arenas_lock);
177		ret->nthreads++;
178		malloc_mutex_unlock(&arenas_lock);
179	}
180
181	arenas_tsd_set(&ret);
182
183	return (ret);
184}
185
186static void
187stats_print_atexit(void)
188{
189
190	if (config_tcache && config_stats) {
191		unsigned i;
192
193		/*
194		 * Merge stats from extant threads.  This is racy, since
195		 * individual threads do not lock when recording tcache stats
196		 * events.  As a consequence, the final stats may be slightly
197		 * out of date by the time they are reported, if other threads
198		 * continue to allocate.
199		 */
200		for (i = 0; i < narenas; i++) {
201			arena_t *arena = arenas[i];
202			if (arena != NULL) {
203				tcache_t *tcache;
204
205				/*
206				 * tcache_stats_merge() locks bins, so if any
207				 * code is introduced that acquires both arena
208				 * and bin locks in the opposite order,
209				 * deadlocks may result.
210				 */
211				malloc_mutex_lock(&arena->lock);
212				ql_foreach(tcache, &arena->tcache_ql, link) {
213					tcache_stats_merge(tcache, arena);
214				}
215				malloc_mutex_unlock(&arena->lock);
216			}
217		}
218	}
219	je_malloc_stats_print(NULL, NULL, NULL);
220}
221
222/*
223 * End miscellaneous support functions.
224 */
225/******************************************************************************/
226/*
227 * Begin initialization functions.
228 */
229
230static unsigned
231malloc_ncpus(void)
232{
233	unsigned ret;
234	long result;
235
236	result = sysconf(_SC_NPROCESSORS_ONLN);
237	if (result == -1) {
238		/* Error. */
239		ret = 1;
240	}
241	ret = (unsigned)result;
242
243	return (ret);
244}
245
246void
247arenas_cleanup(void *arg)
248{
249	arena_t *arena = *(arena_t **)arg;
250
251	malloc_mutex_lock(&arenas_lock);
252	arena->nthreads--;
253	malloc_mutex_unlock(&arenas_lock);
254}
255
256static inline bool
257malloc_init(void)
258{
259
260	if (malloc_initialized == false)
261		return (malloc_init_hard());
262
263	return (false);
264}
265
266static bool
267malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
268    char const **v_p, size_t *vlen_p)
269{
270	bool accept;
271	const char *opts = *opts_p;
272
273	*k_p = opts;
274
275	for (accept = false; accept == false;) {
276		switch (*opts) {
277		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
278		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
279		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
280		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
281		case 'Y': case 'Z':
282		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
283		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
284		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
285		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
286		case 'y': case 'z':
287		case '0': case '1': case '2': case '3': case '4': case '5':
288		case '6': case '7': case '8': case '9':
289		case '_':
290			opts++;
291			break;
292		case ':':
293			opts++;
294			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
295			*v_p = opts;
296			accept = true;
297			break;
298		case '\0':
299			if (opts != *opts_p) {
300				malloc_write("<jemalloc>: Conf string ends "
301				    "with key\n");
302			}
303			return (true);
304		default:
305			malloc_write("<jemalloc>: Malformed conf string\n");
306			return (true);
307		}
308	}
309
310	for (accept = false; accept == false;) {
311		switch (*opts) {
312		case ',':
313			opts++;
314			/*
315			 * Look ahead one character here, because the next time
316			 * this function is called, it will assume that end of
317			 * input has been cleanly reached if no input remains,
318			 * but we have optimistically already consumed the
319			 * comma if one exists.
320			 */
321			if (*opts == '\0') {
322				malloc_write("<jemalloc>: Conf string ends "
323				    "with comma\n");
324			}
325			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
326			accept = true;
327			break;
328		case '\0':
329			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
330			accept = true;
331			break;
332		default:
333			opts++;
334			break;
335		}
336	}
337
338	*opts_p = opts;
339	return (false);
340}
341
342static void
343malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
344    size_t vlen)
345{
346
347	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
348	    (int)vlen, v);
349}
350
351static void
352malloc_conf_init(void)
353{
354	unsigned i;
355	char buf[PATH_MAX + 1];
356	const char *opts, *k, *v;
357	size_t klen, vlen;
358
359	for (i = 0; i < 3; i++) {
360		/* Get runtime configuration. */
361		switch (i) {
362		case 0:
363			if (je_malloc_conf != NULL) {
364				/*
365				 * Use options that were compiled into the
366				 * program.
367				 */
368				opts = je_malloc_conf;
369			} else {
370				/* No configuration specified. */
371				buf[0] = '\0';
372				opts = buf;
373			}
374			break;
375		case 1: {
376			int linklen;
377			const char *linkname =
378#ifdef JEMALLOC_PREFIX
379			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
380#else
381			    "/etc/malloc.conf"
382#endif
383			    ;
384
385			if ((linklen = readlink(linkname, buf,
386			    sizeof(buf) - 1)) != -1) {
387				/*
388				 * Use the contents of the "/etc/malloc.conf"
389				 * symbolic link's name.
390				 */
391				buf[linklen] = '\0';
392				opts = buf;
393			} else {
394				/* No configuration specified. */
395				buf[0] = '\0';
396				opts = buf;
397			}
398			break;
399		} case 2: {
400			const char *envname =
401#ifdef JEMALLOC_PREFIX
402			    JEMALLOC_CPREFIX"MALLOC_CONF"
403#else
404			    "MALLOC_CONF"
405#endif
406			    ;
407
408			if (issetugid() == 0 && (opts = getenv(envname)) !=
409			    NULL) {
410				/*
411				 * Do nothing; opts is already initialized to
412				 * the value of the MALLOC_CONF environment
413				 * variable.
414				 */
415			} else {
416				/* No configuration specified. */
417				buf[0] = '\0';
418				opts = buf;
419			}
420			break;
421		} default:
422			/* NOTREACHED */
423			assert(false);
424			buf[0] = '\0';
425			opts = buf;
426		}
427
428		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
429		    &vlen) == false) {
430#define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
431			if (sizeof(n)-1 == klen && strncmp(n, k,	\
432			    klen) == 0) {				\
433				if (strncmp("true", v, vlen) == 0 &&	\
434				    vlen == sizeof("true")-1)		\
435					o = true;			\
436				else if (strncmp("false", v, vlen) ==	\
437				    0 && vlen == sizeof("false")-1)	\
438					o = false;			\
439				else {					\
440					malloc_conf_error(		\
441					    "Invalid conf value",	\
442					    k, klen, v, vlen);		\
443				}					\
444				hit = true;				\
445			} else						\
446				hit = false;
447#define	CONF_HANDLE_BOOL(o, n) {					\
448			bool hit;					\
449			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
450			if (hit)					\
451				continue;				\
452}
453#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
454			if (sizeof(n)-1 == klen && strncmp(n, k,	\
455			    klen) == 0) {				\
456				uintmax_t um;				\
457				char *end;				\
458									\
459				errno = 0;				\
460				um = malloc_strtoumax(v, &end, 0);	\
461				if (errno != 0 || (uintptr_t)end -	\
462				    (uintptr_t)v != vlen) {		\
463					malloc_conf_error(		\
464					    "Invalid conf value",	\
465					    k, klen, v, vlen);		\
466				} else if (um < min || um > max) {	\
467					malloc_conf_error(		\
468					    "Out-of-range conf value",	\
469					    k, klen, v, vlen);		\
470				} else					\
471					o = um;				\
472				continue;				\
473			}
474#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
475			if (sizeof(n)-1 == klen && strncmp(n, k,	\
476			    klen) == 0) {				\
477				long l;					\
478				char *end;				\
479									\
480				errno = 0;				\
481				l = strtol(v, &end, 0);			\
482				if (errno != 0 || (uintptr_t)end -	\
483				    (uintptr_t)v != vlen) {		\
484					malloc_conf_error(		\
485					    "Invalid conf value",	\
486					    k, klen, v, vlen);		\
487				} else if (l < (ssize_t)min || l >	\
488				    (ssize_t)max) {			\
489					malloc_conf_error(		\
490					    "Out-of-range conf value",	\
491					    k, klen, v, vlen);		\
492				} else					\
493					o = l;				\
494				continue;				\
495			}
496#define	CONF_HANDLE_CHAR_P(o, n, d)					\
497			if (sizeof(n)-1 == klen && strncmp(n, k,	\
498			    klen) == 0) {				\
499				size_t cpylen = (vlen <=		\
500				    sizeof(o)-1) ? vlen :		\
501				    sizeof(o)-1;			\
502				strncpy(o, v, cpylen);			\
503				o[cpylen] = '\0';			\
504				continue;				\
505			}
506
507			CONF_HANDLE_BOOL(opt_abort, "abort")
508			/*
509			 * Chunks always require at least one header page, plus
510			 * one data page in the absence of redzones, or three
511			 * pages in the presence of redzones.  In order to
512			 * simplify options processing, fix the limit based on
513			 * config_fill.
514			 */
515			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
516			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
517			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
518			    SIZE_T_MAX)
519			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
520			    -1, (sizeof(size_t) << 3) - 1)
521			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
522			if (config_fill) {
523				CONF_HANDLE_BOOL(opt_junk, "junk")
524				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
525				    0, SIZE_T_MAX)
526				CONF_HANDLE_BOOL(opt_redzone, "redzone")
527				CONF_HANDLE_BOOL(opt_zero, "zero")
528			}
529			if (config_utrace) {
530				CONF_HANDLE_BOOL(opt_utrace, "utrace")
531			}
532			if (config_valgrind) {
533				bool hit;
534				CONF_HANDLE_BOOL_HIT(opt_valgrind,
535				    "valgrind", hit)
536				if (config_fill && opt_valgrind && hit) {
537					opt_junk = false;
538					opt_zero = false;
539					if (opt_quarantine == 0) {
540						opt_quarantine =
541						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
542					}
543					opt_redzone = true;
544				}
545				if (hit)
546					continue;
547			}
548			if (config_xmalloc) {
549				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
550			}
551			if (config_tcache) {
552				CONF_HANDLE_BOOL(opt_tcache, "tcache")
553				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
554				    "lg_tcache_max", -1,
555				    (sizeof(size_t) << 3) - 1)
556			}
557			if (config_prof) {
558				CONF_HANDLE_BOOL(opt_prof, "prof")
559				CONF_HANDLE_CHAR_P(opt_prof_prefix,
560				    "prof_prefix", "jeprof")
561				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
562				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
563				    "lg_prof_sample", 0,
564				    (sizeof(uint64_t) << 3) - 1)
565				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
566				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
567				    "lg_prof_interval", -1,
568				    (sizeof(uint64_t) << 3) - 1)
569				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
570				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
571				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
572			}
573			malloc_conf_error("Invalid conf pair", k, klen, v,
574			    vlen);
575#undef CONF_HANDLE_BOOL
576#undef CONF_HANDLE_SIZE_T
577#undef CONF_HANDLE_SSIZE_T
578#undef CONF_HANDLE_CHAR_P
579		}
580	}
581}
582
583static bool
584malloc_init_hard(void)
585{
586	arena_t *init_arenas[1];
587
588	malloc_mutex_lock(&init_lock);
589	if (malloc_initialized || IS_INITIALIZER) {
590		/*
591		 * Another thread initialized the allocator before this one
592		 * acquired init_lock, or this thread is the initializing
593		 * thread, and it is recursively allocating.
594		 */
595		malloc_mutex_unlock(&init_lock);
596		return (false);
597	}
598#ifdef JEMALLOC_THREADED_INIT
599	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
600		/* Busy-wait until the initializing thread completes. */
601		do {
602			malloc_mutex_unlock(&init_lock);
603			CPU_SPINWAIT;
604			malloc_mutex_lock(&init_lock);
605		} while (malloc_initialized == false);
606		malloc_mutex_unlock(&init_lock);
607		return (false);
608	}
609#endif
610	malloc_initializer = INITIALIZER;
611
612	malloc_tsd_boot();
613	if (config_prof)
614		prof_boot0();
615
616	malloc_conf_init();
617
618#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
619	/* Register fork handlers. */
620	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
621	    jemalloc_postfork_child) != 0) {
622		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
623		if (opt_abort)
624			abort();
625	}
626#endif
627
628	if (opt_stats_print) {
629		/* Print statistics at exit. */
630		if (atexit(stats_print_atexit) != 0) {
631			malloc_write("<jemalloc>: Error in atexit()\n");
632			if (opt_abort)
633				abort();
634		}
635	}
636
637	if (base_boot()) {
638		malloc_mutex_unlock(&init_lock);
639		return (true);
640	}
641
642	if (chunk_boot()) {
643		malloc_mutex_unlock(&init_lock);
644		return (true);
645	}
646
647	if (ctl_boot()) {
648		malloc_mutex_unlock(&init_lock);
649		return (true);
650	}
651
652	if (config_prof)
653		prof_boot1();
654
655	arena_boot();
656
657	if (config_tcache && tcache_boot0()) {
658		malloc_mutex_unlock(&init_lock);
659		return (true);
660	}
661
662	if (huge_boot()) {
663		malloc_mutex_unlock(&init_lock);
664		return (true);
665	}
666
667	if (malloc_mutex_init(&arenas_lock))
668		return (true);
669
670	/*
671	 * Create enough scaffolding to allow recursive allocation in
672	 * malloc_ncpus().
673	 */
674	narenas = 1;
675	arenas = init_arenas;
676	memset(arenas, 0, sizeof(arena_t *) * narenas);
677
678	/*
679	 * Initialize one arena here.  The rest are lazily created in
680	 * choose_arena_hard().
681	 */
682	arenas_extend(0);
683	if (arenas[0] == NULL) {
684		malloc_mutex_unlock(&init_lock);
685		return (true);
686	}
687
688	/* Initialize allocation counters before any allocations can occur. */
689	if (config_stats && thread_allocated_tsd_boot()) {
690		malloc_mutex_unlock(&init_lock);
691		return (true);
692	}
693
694	if (arenas_tsd_boot()) {
695		malloc_mutex_unlock(&init_lock);
696		return (true);
697	}
698
699	if (config_tcache && tcache_boot1()) {
700		malloc_mutex_unlock(&init_lock);
701		return (true);
702	}
703
704	if (config_fill && quarantine_boot()) {
705		malloc_mutex_unlock(&init_lock);
706		return (true);
707	}
708
709	if (config_prof && prof_boot2()) {
710		malloc_mutex_unlock(&init_lock);
711		return (true);
712	}
713
714	/* Get number of CPUs. */
715	malloc_mutex_unlock(&init_lock);
716	ncpus = malloc_ncpus();
717	malloc_mutex_lock(&init_lock);
718
719	if (mutex_boot()) {
720		malloc_mutex_unlock(&init_lock);
721		return (true);
722	}
723
724	if (opt_narenas == 0) {
725		/*
726		 * For SMP systems, create more than one arena per CPU by
727		 * default.
728		 */
729		if (ncpus > 1)
730			opt_narenas = ncpus << 2;
731		else
732			opt_narenas = 1;
733	}
734	narenas = opt_narenas;
735	/*
736	 * Make sure that the arenas array can be allocated.  In practice, this
737	 * limit is enough to allow the allocator to function, but the ctl
738	 * machinery will fail to allocate memory at far lower limits.
739	 */
740	if (narenas > chunksize / sizeof(arena_t *)) {
741		narenas = chunksize / sizeof(arena_t *);
742		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
743		    narenas);
744	}
745
746	/* Allocate and initialize arenas. */
747	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
748	if (arenas == NULL) {
749		malloc_mutex_unlock(&init_lock);
750		return (true);
751	}
752	/*
753	 * Zero the array.  In practice, this should always be pre-zeroed,
754	 * since it was just mmap()ed, but let's be sure.
755	 */
756	memset(arenas, 0, sizeof(arena_t *) * narenas);
757	/* Copy the pointer to the one arena that was already initialized. */
758	arenas[0] = init_arenas[0];
759
760	malloc_initialized = true;
761	malloc_mutex_unlock(&init_lock);
762	return (false);
763}
764
765/*
766 * End initialization functions.
767 */
768/******************************************************************************/
769/*
770 * Begin malloc(3)-compatible functions.
771 */
772
773JEMALLOC_ATTR(malloc)
774JEMALLOC_ATTR(visibility("default"))
775void *
776je_malloc(size_t size)
777{
778	void *ret;
779	size_t usize;
780	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
781
782	if (malloc_init()) {
783		ret = NULL;
784		goto label_oom;
785	}
786
787	if (size == 0)
788		size = 1;
789
790	if (config_prof && opt_prof) {
791		usize = s2u(size);
792		PROF_ALLOC_PREP(1, usize, cnt);
793		if (cnt == NULL) {
794			ret = NULL;
795			goto label_oom;
796		}
797		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
798		    SMALL_MAXCLASS) {
799			ret = imalloc(SMALL_MAXCLASS+1);
800			if (ret != NULL)
801				arena_prof_promoted(ret, usize);
802		} else
803			ret = imalloc(size);
804	} else {
805		if (config_stats || (config_valgrind && opt_valgrind))
806			usize = s2u(size);
807		ret = imalloc(size);
808	}
809
810label_oom:
811	if (ret == NULL) {
812		if (config_xmalloc && opt_xmalloc) {
813			malloc_write("<jemalloc>: Error in malloc(): "
814			    "out of memory\n");
815			abort();
816		}
817		errno = ENOMEM;
818	}
819	if (config_prof && opt_prof && ret != NULL)
820		prof_malloc(ret, usize, cnt);
821	if (config_stats && ret != NULL) {
822		assert(usize == isalloc(ret, config_prof));
823		thread_allocated_tsd_get()->allocated += usize;
824	}
825	UTRACE(0, size, ret);
826	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
827	return (ret);
828}
829
830JEMALLOC_ATTR(nonnull(1))
831#ifdef JEMALLOC_PROF
832/*
833 * Avoid any uncertainty as to how many backtrace frames to ignore in
834 * PROF_ALLOC_PREP().
835 */
836JEMALLOC_ATTR(noinline)
837#endif
838static int
839imemalign(void **memptr, size_t alignment, size_t size,
840    size_t min_alignment)
841{
842	int ret;
843	size_t usize;
844	void *result;
845	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
846
847	assert(min_alignment != 0);
848
849	if (malloc_init())
850		result = NULL;
851	else {
852		if (size == 0)
853			size = 1;
854
855		/* Make sure that alignment is a large enough power of 2. */
856		if (((alignment - 1) & alignment) != 0
857		    || (alignment < min_alignment)) {
858			if (config_xmalloc && opt_xmalloc) {
859				malloc_write("<jemalloc>: Error allocating "
860				    "aligned memory: invalid alignment\n");
861				abort();
862			}
863			result = NULL;
864			ret = EINVAL;
865			goto label_return;
866		}
867
868		usize = sa2u(size, alignment);
869		if (usize == 0) {
870			result = NULL;
871			ret = ENOMEM;
872			goto label_return;
873		}
874
875		if (config_prof && opt_prof) {
876			PROF_ALLOC_PREP(2, usize, cnt);
877			if (cnt == NULL) {
878				result = NULL;
879				ret = EINVAL;
880			} else {
881				if (prof_promote && (uintptr_t)cnt !=
882				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
883					assert(sa2u(SMALL_MAXCLASS+1,
884					    alignment) != 0);
885					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
886					    alignment), alignment, false);
887					if (result != NULL) {
888						arena_prof_promoted(result,
889						    usize);
890					}
891				} else {
892					result = ipalloc(usize, alignment,
893					    false);
894				}
895			}
896		} else
897			result = ipalloc(usize, alignment, false);
898	}
899
900	if (result == NULL) {
901		if (config_xmalloc && opt_xmalloc) {
902			malloc_write("<jemalloc>: Error allocating aligned "
903			    "memory: out of memory\n");
904			abort();
905		}
906		ret = ENOMEM;
907		goto label_return;
908	}
909
910	*memptr = result;
911	ret = 0;
912
913label_return:
914	if (config_stats && result != NULL) {
915		assert(usize == isalloc(result, config_prof));
916		thread_allocated_tsd_get()->allocated += usize;
917	}
918	if (config_prof && opt_prof && result != NULL)
919		prof_malloc(result, usize, cnt);
920	UTRACE(0, size, result);
921	return (ret);
922}
923
924JEMALLOC_ATTR(nonnull(1))
925JEMALLOC_ATTR(visibility("default"))
926int
927je_posix_memalign(void **memptr, size_t alignment, size_t size)
928{
929	int ret = imemalign(memptr, alignment, size, sizeof(void *));
930	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
931	    config_prof), false);
932	return (ret);
933}
934
935JEMALLOC_ATTR(malloc)
936JEMALLOC_ATTR(visibility("default"))
937void *
938je_aligned_alloc(size_t alignment, size_t size)
939{
940	void *ret;
941	int err;
942
943	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
944		ret = NULL;
945		errno = err;
946	}
947	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
948	    false);
949	return (ret);
950}
951
952JEMALLOC_ATTR(malloc)
953JEMALLOC_ATTR(visibility("default"))
954void *
955je_calloc(size_t num, size_t size)
956{
957	void *ret;
958	size_t num_size;
959	size_t usize;
960	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
961
962	if (malloc_init()) {
963		num_size = 0;
964		ret = NULL;
965		goto label_return;
966	}
967
968	num_size = num * size;
969	if (num_size == 0) {
970		if (num == 0 || size == 0)
971			num_size = 1;
972		else {
973			ret = NULL;
974			goto label_return;
975		}
976	/*
977	 * Try to avoid division here.  We know that it isn't possible to
978	 * overflow during multiplication if neither operand uses any of the
979	 * most significant half of the bits in a size_t.
980	 */
981	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
982	    && (num_size / size != num)) {
983		/* size_t overflow. */
984		ret = NULL;
985		goto label_return;
986	}
987
988	if (config_prof && opt_prof) {
989		usize = s2u(num_size);
990		PROF_ALLOC_PREP(1, usize, cnt);
991		if (cnt == NULL) {
992			ret = NULL;
993			goto label_return;
994		}
995		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
996		    <= SMALL_MAXCLASS) {
997			ret = icalloc(SMALL_MAXCLASS+1);
998			if (ret != NULL)
999				arena_prof_promoted(ret, usize);
1000		} else
1001			ret = icalloc(num_size);
1002	} else {
1003		if (config_stats || (config_valgrind && opt_valgrind))
1004			usize = s2u(num_size);
1005		ret = icalloc(num_size);
1006	}
1007
1008label_return:
1009	if (ret == NULL) {
1010		if (config_xmalloc && opt_xmalloc) {
1011			malloc_write("<jemalloc>: Error in calloc(): out of "
1012			    "memory\n");
1013			abort();
1014		}
1015		errno = ENOMEM;
1016	}
1017
1018	if (config_prof && opt_prof && ret != NULL)
1019		prof_malloc(ret, usize, cnt);
1020	if (config_stats && ret != NULL) {
1021		assert(usize == isalloc(ret, config_prof));
1022		thread_allocated_tsd_get()->allocated += usize;
1023	}
1024	UTRACE(0, num_size, ret);
1025	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1026	return (ret);
1027}
1028
1029JEMALLOC_ATTR(visibility("default"))
1030void *
1031je_realloc(void *ptr, size_t size)
1032{
1033	void *ret;
1034	size_t usize;
1035	size_t old_size = 0;
1036	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1037	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1038	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1039
1040	if (size == 0) {
1041		if (ptr != NULL) {
1042			/* realloc(ptr, 0) is equivalent to free(p). */
1043			if (config_prof) {
1044				old_size = isalloc(ptr, true);
1045				if (config_valgrind && opt_valgrind)
1046					old_rzsize = p2rz(ptr);
1047			} else if (config_stats) {
1048				old_size = isalloc(ptr, false);
1049				if (config_valgrind && opt_valgrind)
1050					old_rzsize = u2rz(old_size);
1051			} else if (config_valgrind && opt_valgrind) {
1052				old_size = isalloc(ptr, false);
1053				old_rzsize = u2rz(old_size);
1054			}
1055			if (config_prof && opt_prof) {
1056				old_ctx = prof_ctx_get(ptr);
1057				cnt = NULL;
1058			}
1059			iqalloc(ptr);
1060			ret = NULL;
1061			goto label_return;
1062		} else
1063			size = 1;
1064	}
1065
1066	if (ptr != NULL) {
1067		assert(malloc_initialized || IS_INITIALIZER);
1068
1069		if (config_prof) {
1070			old_size = isalloc(ptr, true);
1071			if (config_valgrind && opt_valgrind)
1072				old_rzsize = p2rz(ptr);
1073		} else if (config_stats) {
1074			old_size = isalloc(ptr, false);
1075			if (config_valgrind && opt_valgrind)
1076				old_rzsize = u2rz(old_size);
1077		} else if (config_valgrind && opt_valgrind) {
1078			old_size = isalloc(ptr, false);
1079			old_rzsize = u2rz(old_size);
1080		}
1081		if (config_prof && opt_prof) {
1082			usize = s2u(size);
1083			old_ctx = prof_ctx_get(ptr);
1084			PROF_ALLOC_PREP(1, usize, cnt);
1085			if (cnt == NULL) {
1086				old_ctx = NULL;
1087				ret = NULL;
1088				goto label_oom;
1089			}
1090			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1091			    usize <= SMALL_MAXCLASS) {
1092				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1093				    false, false);
1094				if (ret != NULL)
1095					arena_prof_promoted(ret, usize);
1096				else
1097					old_ctx = NULL;
1098			} else {
1099				ret = iralloc(ptr, size, 0, 0, false, false);
1100				if (ret == NULL)
1101					old_ctx = NULL;
1102			}
1103		} else {
1104			if (config_stats || (config_valgrind && opt_valgrind))
1105				usize = s2u(size);
1106			ret = iralloc(ptr, size, 0, 0, false, false);
1107		}
1108
1109label_oom:
1110		if (ret == NULL) {
1111			if (config_xmalloc && opt_xmalloc) {
1112				malloc_write("<jemalloc>: Error in realloc(): "
1113				    "out of memory\n");
1114				abort();
1115			}
1116			errno = ENOMEM;
1117		}
1118	} else {
1119		/* realloc(NULL, size) is equivalent to malloc(size). */
1120		if (config_prof && opt_prof)
1121			old_ctx = NULL;
1122		if (malloc_init()) {
1123			if (config_prof && opt_prof)
1124				cnt = NULL;
1125			ret = NULL;
1126		} else {
1127			if (config_prof && opt_prof) {
1128				usize = s2u(size);
1129				PROF_ALLOC_PREP(1, usize, cnt);
1130				if (cnt == NULL)
1131					ret = NULL;
1132				else {
1133					if (prof_promote && (uintptr_t)cnt !=
1134					    (uintptr_t)1U && usize <=
1135					    SMALL_MAXCLASS) {
1136						ret = imalloc(SMALL_MAXCLASS+1);
1137						if (ret != NULL) {
1138							arena_prof_promoted(ret,
1139							    usize);
1140						}
1141					} else
1142						ret = imalloc(size);
1143				}
1144			} else {
1145				if (config_stats || (config_valgrind &&
1146				    opt_valgrind))
1147					usize = s2u(size);
1148				ret = imalloc(size);
1149			}
1150		}
1151
1152		if (ret == NULL) {
1153			if (config_xmalloc && opt_xmalloc) {
1154				malloc_write("<jemalloc>: Error in realloc(): "
1155				    "out of memory\n");
1156				abort();
1157			}
1158			errno = ENOMEM;
1159		}
1160	}
1161
1162label_return:
1163	if (config_prof && opt_prof)
1164		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1165	if (config_stats && ret != NULL) {
1166		thread_allocated_t *ta;
1167		assert(usize == isalloc(ret, config_prof));
1168		ta = thread_allocated_tsd_get();
1169		ta->allocated += usize;
1170		ta->deallocated += old_size;
1171	}
1172	UTRACE(ptr, size, ret);
1173	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1174	return (ret);
1175}
1176
1177JEMALLOC_ATTR(visibility("default"))
1178void
1179je_free(void *ptr)
1180{
1181
1182	UTRACE(ptr, 0, 0);
1183	if (ptr != NULL) {
1184		size_t usize;
1185		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1186
1187		assert(malloc_initialized || IS_INITIALIZER);
1188
1189		if (config_prof && opt_prof) {
1190			usize = isalloc(ptr, config_prof);
1191			prof_free(ptr, usize);
1192		} else if (config_stats || config_valgrind)
1193			usize = isalloc(ptr, config_prof);
1194		if (config_stats)
1195			thread_allocated_tsd_get()->deallocated += usize;
1196		if (config_valgrind && opt_valgrind)
1197			rzsize = p2rz(ptr);
1198		iqalloc(ptr);
1199		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1200	}
1201}
1202
1203/*
1204 * End malloc(3)-compatible functions.
1205 */
1206/******************************************************************************/
1207/*
1208 * Begin non-standard override functions.
1209 */
1210
1211#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1212JEMALLOC_ATTR(malloc)
1213JEMALLOC_ATTR(visibility("default"))
1214void *
1215je_memalign(size_t alignment, size_t size)
1216{
1217	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1218	imemalign(&ret, alignment, size, 1);
1219	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1220	return (ret);
1221}
1222#endif
1223
1224#ifdef JEMALLOC_OVERRIDE_VALLOC
1225JEMALLOC_ATTR(malloc)
1226JEMALLOC_ATTR(visibility("default"))
1227void *
1228je_valloc(size_t size)
1229{
1230	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1231	imemalign(&ret, PAGE, size, 1);
1232	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1233	return (ret);
1234}
1235#endif
1236
1237/*
1238 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1239 * #define je_malloc malloc
1240 */
1241#define	malloc_is_malloc 1
1242#define	is_malloc_(a) malloc_is_ ## a
1243#define	is_malloc(a) is_malloc_(a)
1244
1245#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1246/*
1247 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1248 * to inconsistently reference libc's malloc(3)-compatible functions
1249 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1250 *
1251 * These definitions interpose hooks in glibc.  The functions are actually
1252 * passed an extra argument for the caller return address, which will be
1253 * ignored.
1254 */
1255JEMALLOC_ATTR(visibility("default"))
1256void (* const __free_hook)(void *ptr) = je_free;
1257
1258JEMALLOC_ATTR(visibility("default"))
1259void *(* const __malloc_hook)(size_t size) = je_malloc;
1260
1261JEMALLOC_ATTR(visibility("default"))
1262void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
1263
1264JEMALLOC_ATTR(visibility("default"))
1265void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
1266#endif
1267
1268/*
1269 * End non-standard override functions.
1270 */
1271/******************************************************************************/
1272/*
1273 * Begin non-standard functions.
1274 */
1275
1276JEMALLOC_ATTR(visibility("default"))
1277size_t
1278je_malloc_usable_size(const void *ptr)
1279{
1280	size_t ret;
1281
1282	assert(malloc_initialized || IS_INITIALIZER);
1283
1284	if (config_ivsalloc)
1285		ret = ivsalloc(ptr, config_prof);
1286	else
1287		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1288
1289	return (ret);
1290}
1291
1292JEMALLOC_ATTR(visibility("default"))
1293void
1294je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1295    const char *opts)
1296{
1297
1298	stats_print(write_cb, cbopaque, opts);
1299}
1300
1301JEMALLOC_ATTR(visibility("default"))
1302int
1303je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1304    size_t newlen)
1305{
1306
1307	if (malloc_init())
1308		return (EAGAIN);
1309
1310	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1311}
1312
1313JEMALLOC_ATTR(visibility("default"))
1314int
1315je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1316{
1317
1318	if (malloc_init())
1319		return (EAGAIN);
1320
1321	return (ctl_nametomib(name, mibp, miblenp));
1322}
1323
1324JEMALLOC_ATTR(visibility("default"))
1325int
1326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1327  void *newp, size_t newlen)
1328{
1329
1330	if (malloc_init())
1331		return (EAGAIN);
1332
1333	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1334}
1335
1336/*
1337 * End non-standard functions.
1338 */
1339/******************************************************************************/
1340/*
1341 * Begin experimental functions.
1342 */
1343#ifdef JEMALLOC_EXPERIMENTAL
1344
1345JEMALLOC_INLINE void *
1346iallocm(size_t usize, size_t alignment, bool zero)
1347{
1348
1349	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1350	    alignment)));
1351
1352	if (alignment != 0)
1353		return (ipalloc(usize, alignment, zero));
1354	else if (zero)
1355		return (icalloc(usize));
1356	else
1357		return (imalloc(usize));
1358}
1359
1360JEMALLOC_ATTR(nonnull(1))
1361JEMALLOC_ATTR(visibility("default"))
1362int
1363je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1364{
1365	void *p;
1366	size_t usize;
1367	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1368	    & (SIZE_T_MAX-1));
1369	bool zero = flags & ALLOCM_ZERO;
1370	prof_thr_cnt_t *cnt;
1371
1372	assert(ptr != NULL);
1373	assert(size != 0);
1374
1375	if (malloc_init())
1376		goto label_oom;
1377
1378	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1379	if (usize == 0)
1380		goto label_oom;
1381
1382	if (config_prof && opt_prof) {
1383		PROF_ALLOC_PREP(1, usize, cnt);
1384		if (cnt == NULL)
1385			goto label_oom;
1386		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1387		    SMALL_MAXCLASS) {
1388			size_t usize_promoted = (alignment == 0) ?
1389			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1390			    alignment);
1391			assert(usize_promoted != 0);
1392			p = iallocm(usize_promoted, alignment, zero);
1393			if (p == NULL)
1394				goto label_oom;
1395			arena_prof_promoted(p, usize);
1396		} else {
1397			p = iallocm(usize, alignment, zero);
1398			if (p == NULL)
1399				goto label_oom;
1400		}
1401		prof_malloc(p, usize, cnt);
1402	} else {
1403		p = iallocm(usize, alignment, zero);
1404		if (p == NULL)
1405			goto label_oom;
1406	}
1407	if (rsize != NULL)
1408		*rsize = usize;
1409
1410	*ptr = p;
1411	if (config_stats) {
1412		assert(usize == isalloc(p, config_prof));
1413		thread_allocated_tsd_get()->allocated += usize;
1414	}
1415	UTRACE(0, size, p);
1416	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1417	return (ALLOCM_SUCCESS);
1418label_oom:
1419	if (config_xmalloc && opt_xmalloc) {
1420		malloc_write("<jemalloc>: Error in allocm(): "
1421		    "out of memory\n");
1422		abort();
1423	}
1424	*ptr = NULL;
1425	UTRACE(0, size, 0);
1426	return (ALLOCM_ERR_OOM);
1427}
1428
1429JEMALLOC_ATTR(nonnull(1))
1430JEMALLOC_ATTR(visibility("default"))
1431int
1432je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1433{
1434	void *p, *q;
1435	size_t usize;
1436	size_t old_size;
1437	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1438	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1439	    & (SIZE_T_MAX-1));
1440	bool zero = flags & ALLOCM_ZERO;
1441	bool no_move = flags & ALLOCM_NO_MOVE;
1442	prof_thr_cnt_t *cnt;
1443
1444	assert(ptr != NULL);
1445	assert(*ptr != NULL);
1446	assert(size != 0);
1447	assert(SIZE_T_MAX - size >= extra);
1448	assert(malloc_initialized || IS_INITIALIZER);
1449
1450	p = *ptr;
1451	if (config_prof && opt_prof) {
1452		/*
1453		 * usize isn't knowable before iralloc() returns when extra is
1454		 * non-zero.  Therefore, compute its maximum possible value and
1455		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1456		 * backtrace.  prof_realloc() will use the actual usize to
1457		 * decide whether to sample.
1458		 */
1459		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1460		    sa2u(size+extra, alignment);
1461		prof_ctx_t *old_ctx = prof_ctx_get(p);
1462		old_size = isalloc(p, true);
1463		if (config_valgrind && opt_valgrind)
1464			old_rzsize = p2rz(p);
1465		PROF_ALLOC_PREP(1, max_usize, cnt);
1466		if (cnt == NULL)
1467			goto label_oom;
1468		/*
1469		 * Use minimum usize to determine whether promotion may happen.
1470		 */
1471		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1472		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1473		    <= SMALL_MAXCLASS) {
1474			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1475			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1476			    alignment, zero, no_move);
1477			if (q == NULL)
1478				goto label_err;
1479			if (max_usize < PAGE) {
1480				usize = max_usize;
1481				arena_prof_promoted(q, usize);
1482			} else
1483				usize = isalloc(q, config_prof);
1484		} else {
1485			q = iralloc(p, size, extra, alignment, zero, no_move);
1486			if (q == NULL)
1487				goto label_err;
1488			usize = isalloc(q, config_prof);
1489		}
1490		prof_realloc(q, usize, cnt, old_size, old_ctx);
1491		if (rsize != NULL)
1492			*rsize = usize;
1493	} else {
1494		if (config_stats) {
1495			old_size = isalloc(p, false);
1496			if (config_valgrind && opt_valgrind)
1497				old_rzsize = u2rz(old_size);
1498		} else if (config_valgrind && opt_valgrind) {
1499			old_size = isalloc(p, false);
1500			old_rzsize = u2rz(old_size);
1501		}
1502		q = iralloc(p, size, extra, alignment, zero, no_move);
1503		if (q == NULL)
1504			goto label_err;
1505		if (config_stats)
1506			usize = isalloc(q, config_prof);
1507		if (rsize != NULL) {
1508			if (config_stats == false)
1509				usize = isalloc(q, config_prof);
1510			*rsize = usize;
1511		}
1512	}
1513
1514	*ptr = q;
1515	if (config_stats) {
1516		thread_allocated_t *ta;
1517		ta = thread_allocated_tsd_get();
1518		ta->allocated += usize;
1519		ta->deallocated += old_size;
1520	}
1521	UTRACE(p, size, q);
1522	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1523	return (ALLOCM_SUCCESS);
1524label_err:
1525	if (no_move) {
1526		UTRACE(p, size, q);
1527		return (ALLOCM_ERR_NOT_MOVED);
1528	}
1529label_oom:
1530	if (config_xmalloc && opt_xmalloc) {
1531		malloc_write("<jemalloc>: Error in rallocm(): "
1532		    "out of memory\n");
1533		abort();
1534	}
1535	UTRACE(p, size, 0);
1536	return (ALLOCM_ERR_OOM);
1537}
1538
1539JEMALLOC_ATTR(nonnull(1))
1540JEMALLOC_ATTR(visibility("default"))
1541int
1542je_sallocm(const void *ptr, size_t *rsize, int flags)
1543{
1544	size_t sz;
1545
1546	assert(malloc_initialized || IS_INITIALIZER);
1547
1548	if (config_ivsalloc)
1549		sz = ivsalloc(ptr, config_prof);
1550	else {
1551		assert(ptr != NULL);
1552		sz = isalloc(ptr, config_prof);
1553	}
1554	assert(rsize != NULL);
1555	*rsize = sz;
1556
1557	return (ALLOCM_SUCCESS);
1558}
1559
1560JEMALLOC_ATTR(nonnull(1))
1561JEMALLOC_ATTR(visibility("default"))
1562int
1563je_dallocm(void *ptr, int flags)
1564{
1565	size_t usize;
1566	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1567
1568	assert(ptr != NULL);
1569	assert(malloc_initialized || IS_INITIALIZER);
1570
1571	UTRACE(ptr, 0, 0);
1572	if (config_stats || config_valgrind)
1573		usize = isalloc(ptr, config_prof);
1574	if (config_prof && opt_prof) {
1575		if (config_stats == false && config_valgrind == false)
1576			usize = isalloc(ptr, config_prof);
1577		prof_free(ptr, usize);
1578	}
1579	if (config_stats)
1580		thread_allocated_tsd_get()->deallocated += usize;
1581	if (config_valgrind && opt_valgrind)
1582		rzsize = p2rz(ptr);
1583	iqalloc(ptr);
1584	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1585
1586	return (ALLOCM_SUCCESS);
1587}
1588
1589JEMALLOC_ATTR(visibility("default"))
1590int
1591je_nallocm(size_t *rsize, size_t size, int flags)
1592{
1593	size_t usize;
1594	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1595	    & (SIZE_T_MAX-1));
1596
1597	assert(size != 0);
1598
1599	if (malloc_init())
1600		return (ALLOCM_ERR_OOM);
1601
1602	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1603	if (usize == 0)
1604		return (ALLOCM_ERR_OOM);
1605
1606	if (rsize != NULL)
1607		*rsize = usize;
1608	return (ALLOCM_SUCCESS);
1609}
1610
1611#endif
1612/*
1613 * End experimental functions.
1614 */
1615/******************************************************************************/
1616/*
1617 * The following functions are used by threading libraries for protection of
1618 * malloc during fork().
1619 */
1620
1621#ifndef JEMALLOC_MUTEX_INIT_CB
1622void
1623jemalloc_prefork(void)
1624#else
1625JEMALLOC_ATTR(visibility("default"))
1626void
1627_malloc_prefork(void)
1628#endif
1629{
1630	unsigned i;
1631
1632	/* Acquire all mutexes in a safe order. */
1633	malloc_mutex_prefork(&arenas_lock);
1634	for (i = 0; i < narenas; i++) {
1635		if (arenas[i] != NULL)
1636			arena_prefork(arenas[i]);
1637	}
1638	base_prefork();
1639	huge_prefork();
1640	chunk_dss_prefork();
1641}
1642
1643#ifndef JEMALLOC_MUTEX_INIT_CB
1644void
1645jemalloc_postfork_parent(void)
1646#else
1647JEMALLOC_ATTR(visibility("default"))
1648void
1649_malloc_postfork(void)
1650#endif
1651{
1652	unsigned i;
1653
1654	/* Release all mutexes, now that fork() has completed. */
1655	chunk_dss_postfork_parent();
1656	huge_postfork_parent();
1657	base_postfork_parent();
1658	for (i = 0; i < narenas; i++) {
1659		if (arenas[i] != NULL)
1660			arena_postfork_parent(arenas[i]);
1661	}
1662	malloc_mutex_postfork_parent(&arenas_lock);
1663}
1664
1665void
1666jemalloc_postfork_child(void)
1667{
1668	unsigned i;
1669
1670	/* Release all mutexes, now that fork() has completed. */
1671	chunk_dss_postfork_child();
1672	huge_postfork_child();
1673	base_postfork_child();
1674	for (i = 0; i < narenas; i++) {
1675		if (arenas[i] != NULL)
1676			arena_postfork_child(arenas[i]);
1677	}
1678	malloc_mutex_postfork_child(&arenas_lock);
1679}
1680
1681/******************************************************************************/
1682/*
1683 * The following functions are used for TLS allocation/deallocation in static
1684 * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1685 * is that these avoid accessing TLS variables.
1686 */
1687
1688static void *
1689a0alloc(size_t size, bool zero)
1690{
1691
1692	if (malloc_init())
1693		return (NULL);
1694
1695	if (size == 0)
1696		size = 1;
1697
1698	if (size <= arena_maxclass)
1699		return (arena_malloc(arenas[0], size, zero, false));
1700	else
1701		return (huge_malloc(size, zero));
1702}
1703
1704void *
1705a0malloc(size_t size)
1706{
1707
1708	return (a0alloc(size, false));
1709}
1710
1711void *
1712a0calloc(size_t num, size_t size)
1713{
1714
1715	return (a0alloc(num * size, true));
1716}
1717
1718void
1719a0free(void *ptr)
1720{
1721	arena_chunk_t *chunk;
1722
1723	if (ptr == NULL)
1724		return;
1725
1726	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1727	if (chunk != ptr)
1728		arena_dalloc(chunk->arena, chunk, ptr, false);
1729	else
1730		huge_dalloc(ptr, true);
1731}
1732
1733/******************************************************************************/
1734