1234370Sjasone#define	JEMALLOC_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7234370Sjasonemalloc_tsd_data(, arenas, arena_t *, NULL)
8234370Sjasonemalloc_tsd_data(, thread_allocated, thread_allocated_t,
9234370Sjasone    THREAD_ALLOCATED_INITIALIZER)
10234370Sjasone
11234658Sdim/* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12234658Sdimconst char	*__malloc_options_1_0 = NULL;
13234370Sjasone__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14234370Sjasone
15234370Sjasone/* Runtime configuration options. */
16235238Sjasoneconst char	*je_malloc_conf;
17245868Sjasonebool	opt_abort =
18234370Sjasone#ifdef JEMALLOC_DEBUG
19245868Sjasone    true
20234370Sjasone#else
21245868Sjasone    false
22234370Sjasone#endif
23245868Sjasone    ;
24245868Sjasonebool	opt_junk =
25245868Sjasone#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
26245868Sjasone    true
27245868Sjasone#else
28245868Sjasone    false
29245868Sjasone#endif
30245868Sjasone    ;
31234370Sjasonesize_t	opt_quarantine = ZU(0);
32234370Sjasonebool	opt_redzone = false;
33234370Sjasonebool	opt_utrace = false;
34234370Sjasonebool	opt_valgrind = false;
35234370Sjasonebool	opt_xmalloc = false;
36234370Sjasonebool	opt_zero = false;
37234370Sjasonesize_t	opt_narenas = 0;
38234370Sjasone
39234370Sjasoneunsigned	ncpus;
40234370Sjasone
41234370Sjasonemalloc_mutex_t		arenas_lock;
42234370Sjasonearena_t			**arenas;
43242844Sjasoneunsigned		narenas_total;
44242844Sjasoneunsigned		narenas_auto;
45234370Sjasone
46234370Sjasone/* Set to true once the allocator has been initialized. */
47234370Sjasonestatic bool		malloc_initialized = false;
48234370Sjasone
49234370Sjasone#ifdef JEMALLOC_THREADED_INIT
50234370Sjasone/* Used to let the initializing thread recursively allocate. */
51234370Sjasone#  define NO_INITIALIZER	((unsigned long)0)
52234370Sjasone#  define INITIALIZER		pthread_self()
53234370Sjasone#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
54234370Sjasonestatic pthread_t		malloc_initializer = NO_INITIALIZER;
55234370Sjasone#else
56234370Sjasone#  define NO_INITIALIZER	false
57234370Sjasone#  define INITIALIZER		true
58234370Sjasone#  define IS_INITIALIZER	malloc_initializer
59234370Sjasonestatic bool			malloc_initializer = NO_INITIALIZER;
60234370Sjasone#endif
61234370Sjasone
62234370Sjasone/* Used to avoid initialization races. */
63235238Sjasone#ifdef _WIN32
64235238Sjasonestatic malloc_mutex_t	init_lock;
65235238Sjasone
66235238SjasoneJEMALLOC_ATTR(constructor)
67235238Sjasonestatic void WINAPI
68235238Sjasone_init_init_lock(void)
69235238Sjasone{
70235238Sjasone
71235238Sjasone	malloc_mutex_init(&init_lock);
72235238Sjasone}
73235238Sjasone
74235238Sjasone#ifdef _MSC_VER
75235238Sjasone#  pragma section(".CRT$XCU", read)
76235238SjasoneJEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77235238Sjasonestatic const void (WINAPI *init_init_lock)(void) = _init_init_lock;
78235238Sjasone#endif
79235238Sjasone
80235238Sjasone#else
81234370Sjasonestatic malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
82235238Sjasone#endif
83234370Sjasone
84234370Sjasonetypedef struct {
85234370Sjasone	void	*p;	/* Input pointer (as in realloc(p, s)). */
86234370Sjasone	size_t	s;	/* Request size. */
87234370Sjasone	void	*r;	/* Result pointer. */
88234370Sjasone} malloc_utrace_t;
89234370Sjasone
90234370Sjasone#ifdef JEMALLOC_UTRACE
91234370Sjasone#  define UTRACE(a, b, c) do {						\
92234370Sjasone	if (opt_utrace) {						\
93245868Sjasone		int utrace_serrno = errno;				\
94234370Sjasone		malloc_utrace_t ut;					\
95234370Sjasone		ut.p = (a);						\
96234370Sjasone		ut.s = (b);						\
97234370Sjasone		ut.r = (c);						\
98234370Sjasone		utrace(&ut, sizeof(ut));				\
99245868Sjasone		errno = utrace_serrno;					\
100234370Sjasone	}								\
101234370Sjasone} while (0)
102234370Sjasone#else
103234370Sjasone#  define UTRACE(a, b, c)
104234370Sjasone#endif
105234370Sjasone
106234370Sjasone/******************************************************************************/
107234370Sjasone/* Function prototypes for non-inline static functions. */
108234370Sjasone
109234370Sjasonestatic void	stats_print_atexit(void);
110234370Sjasonestatic unsigned	malloc_ncpus(void);
111234370Sjasonestatic bool	malloc_conf_next(char const **opts_p, char const **k_p,
112234370Sjasone    size_t *klen_p, char const **v_p, size_t *vlen_p);
113234370Sjasonestatic void	malloc_conf_error(const char *msg, const char *k, size_t klen,
114234370Sjasone    const char *v, size_t vlen);
115234370Sjasonestatic void	malloc_conf_init(void);
116234370Sjasonestatic bool	malloc_init_hard(void);
117234370Sjasonestatic int	imemalign(void **memptr, size_t alignment, size_t size,
118234370Sjasone    size_t min_alignment);
119234370Sjasone
120234370Sjasone/******************************************************************************/
121234370Sjasone/*
122234370Sjasone * Begin miscellaneous support functions.
123234370Sjasone */
124234370Sjasone
125234370Sjasone/* Create a new arena and insert it into the arenas array at index ind. */
126234370Sjasonearena_t *
127234370Sjasonearenas_extend(unsigned ind)
128234370Sjasone{
129234370Sjasone	arena_t *ret;
130234370Sjasone
131234370Sjasone	ret = (arena_t *)base_alloc(sizeof(arena_t));
132234370Sjasone	if (ret != NULL && arena_new(ret, ind) == false) {
133234370Sjasone		arenas[ind] = ret;
134234370Sjasone		return (ret);
135234370Sjasone	}
136234370Sjasone	/* Only reached if there is an OOM error. */
137234370Sjasone
138234370Sjasone	/*
139234370Sjasone	 * OOM here is quite inconvenient to propagate, since dealing with it
140234370Sjasone	 * would require a check for failure in the fast path.  Instead, punt
141234370Sjasone	 * by using arenas[0].  In practice, this is an extremely unlikely
142234370Sjasone	 * failure.
143234370Sjasone	 */
144234370Sjasone	malloc_write("<jemalloc>: Error initializing arena\n");
145234370Sjasone	if (opt_abort)
146234370Sjasone		abort();
147234370Sjasone
148234370Sjasone	return (arenas[0]);
149234370Sjasone}
150234370Sjasone
151234370Sjasone/* Slow path, called only by choose_arena(). */
152234370Sjasonearena_t *
153234370Sjasonechoose_arena_hard(void)
154234370Sjasone{
155234370Sjasone	arena_t *ret;
156234370Sjasone
157242844Sjasone	if (narenas_auto > 1) {
158234370Sjasone		unsigned i, choose, first_null;
159234370Sjasone
160234370Sjasone		choose = 0;
161242844Sjasone		first_null = narenas_auto;
162234370Sjasone		malloc_mutex_lock(&arenas_lock);
163234370Sjasone		assert(arenas[0] != NULL);
164242844Sjasone		for (i = 1; i < narenas_auto; i++) {
165234370Sjasone			if (arenas[i] != NULL) {
166234370Sjasone				/*
167234370Sjasone				 * Choose the first arena that has the lowest
168234370Sjasone				 * number of threads assigned to it.
169234370Sjasone				 */
170234370Sjasone				if (arenas[i]->nthreads <
171234370Sjasone				    arenas[choose]->nthreads)
172234370Sjasone					choose = i;
173242844Sjasone			} else if (first_null == narenas_auto) {
174234370Sjasone				/*
175234370Sjasone				 * Record the index of the first uninitialized
176234370Sjasone				 * arena, in case all extant arenas are in use.
177234370Sjasone				 *
178234370Sjasone				 * NB: It is possible for there to be
179234370Sjasone				 * discontinuities in terms of initialized
180234370Sjasone				 * versus uninitialized arenas, due to the
181234370Sjasone				 * "thread.arena" mallctl.
182234370Sjasone				 */
183234370Sjasone				first_null = i;
184234370Sjasone			}
185234370Sjasone		}
186234370Sjasone
187242844Sjasone		if (arenas[choose]->nthreads == 0
188242844Sjasone		    || first_null == narenas_auto) {
189234370Sjasone			/*
190234370Sjasone			 * Use an unloaded arena, or the least loaded arena if
191234370Sjasone			 * all arenas are already initialized.
192234370Sjasone			 */
193234370Sjasone			ret = arenas[choose];
194234370Sjasone		} else {
195234370Sjasone			/* Initialize a new arena. */
196234370Sjasone			ret = arenas_extend(first_null);
197234370Sjasone		}
198234370Sjasone		ret->nthreads++;
199234370Sjasone		malloc_mutex_unlock(&arenas_lock);
200234370Sjasone	} else {
201234370Sjasone		ret = arenas[0];
202234370Sjasone		malloc_mutex_lock(&arenas_lock);
203234370Sjasone		ret->nthreads++;
204234370Sjasone		malloc_mutex_unlock(&arenas_lock);
205234370Sjasone	}
206234370Sjasone
207234370Sjasone	arenas_tsd_set(&ret);
208234370Sjasone
209234370Sjasone	return (ret);
210234370Sjasone}
211234370Sjasone
212234370Sjasonestatic void
213234370Sjasonestats_print_atexit(void)
214234370Sjasone{
215234370Sjasone
216234370Sjasone	if (config_tcache && config_stats) {
217242844Sjasone		unsigned narenas, i;
218234370Sjasone
219234370Sjasone		/*
220234370Sjasone		 * Merge stats from extant threads.  This is racy, since
221234370Sjasone		 * individual threads do not lock when recording tcache stats
222234370Sjasone		 * events.  As a consequence, the final stats may be slightly
223234370Sjasone		 * out of date by the time they are reported, if other threads
224234370Sjasone		 * continue to allocate.
225234370Sjasone		 */
226242844Sjasone		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
227234370Sjasone			arena_t *arena = arenas[i];
228234370Sjasone			if (arena != NULL) {
229234370Sjasone				tcache_t *tcache;
230234370Sjasone
231234370Sjasone				/*
232234370Sjasone				 * tcache_stats_merge() locks bins, so if any
233234370Sjasone				 * code is introduced that acquires both arena
234234370Sjasone				 * and bin locks in the opposite order,
235234370Sjasone				 * deadlocks may result.
236234370Sjasone				 */
237234370Sjasone				malloc_mutex_lock(&arena->lock);
238234370Sjasone				ql_foreach(tcache, &arena->tcache_ql, link) {
239234370Sjasone					tcache_stats_merge(tcache, arena);
240234370Sjasone				}
241234370Sjasone				malloc_mutex_unlock(&arena->lock);
242234370Sjasone			}
243234370Sjasone		}
244234370Sjasone	}
245234370Sjasone	je_malloc_stats_print(NULL, NULL, NULL);
246234370Sjasone}
247234370Sjasone
248234370Sjasone/*
249234370Sjasone * End miscellaneous support functions.
250234370Sjasone */
251234370Sjasone/******************************************************************************/
252234370Sjasone/*
253234370Sjasone * Begin initialization functions.
254234370Sjasone */
255234370Sjasone
256234370Sjasonestatic unsigned
257234370Sjasonemalloc_ncpus(void)
258234370Sjasone{
259234370Sjasone	unsigned ret;
260234370Sjasone	long result;
261234370Sjasone
262235238Sjasone#ifdef _WIN32
263235238Sjasone	SYSTEM_INFO si;
264235238Sjasone	GetSystemInfo(&si);
265235238Sjasone	result = si.dwNumberOfProcessors;
266235238Sjasone#else
267234370Sjasone	result = sysconf(_SC_NPROCESSORS_ONLN);
268242844Sjasone#endif
269234370Sjasone	if (result == -1) {
270234370Sjasone		/* Error. */
271234370Sjasone		ret = 1;
272242844Sjasone	}  else {
273242844Sjasone    ret = (unsigned)result;
274242844Sjasone  }
275234370Sjasone
276234370Sjasone	return (ret);
277234370Sjasone}
278234370Sjasone
279234370Sjasonevoid
280234370Sjasonearenas_cleanup(void *arg)
281234370Sjasone{
282234370Sjasone	arena_t *arena = *(arena_t **)arg;
283234370Sjasone
284234370Sjasone	malloc_mutex_lock(&arenas_lock);
285234370Sjasone	arena->nthreads--;
286234370Sjasone	malloc_mutex_unlock(&arenas_lock);
287234370Sjasone}
288234370Sjasone
289251300Sjasonestatic JEMALLOC_ATTR(always_inline) void
290251300Sjasonemalloc_thread_init(void)
291251300Sjasone{
292251300Sjasone
293251300Sjasone	/*
294251300Sjasone	 * TSD initialization can't be safely done as a side effect of
295251300Sjasone	 * deallocation, because it is possible for a thread to do nothing but
296251300Sjasone	 * deallocate its TLS data via free(), in which case writing to TLS
297251300Sjasone	 * would cause write-after-free memory corruption.  The quarantine
298251300Sjasone	 * facility *only* gets used as a side effect of deallocation, so make
299251300Sjasone	 * a best effort attempt at initializing its TSD by hooking all
300251300Sjasone	 * allocation events.
301251300Sjasone	 */
302251300Sjasone	if (config_fill && opt_quarantine)
303251300Sjasone		quarantine_alloc_hook();
304251300Sjasone}
305251300Sjasone
306245868Sjasonestatic JEMALLOC_ATTR(always_inline) bool
307234370Sjasonemalloc_init(void)
308234370Sjasone{
309234370Sjasone
310251300Sjasone	if (malloc_initialized == false && malloc_init_hard())
311251300Sjasone		return (true);
312251300Sjasone	malloc_thread_init();
313234370Sjasone
314234370Sjasone	return (false);
315234370Sjasone}
316234370Sjasone
317234370Sjasonestatic bool
318234370Sjasonemalloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
319234370Sjasone    char const **v_p, size_t *vlen_p)
320234370Sjasone{
321234370Sjasone	bool accept;
322234370Sjasone	const char *opts = *opts_p;
323234370Sjasone
324234370Sjasone	*k_p = opts;
325234370Sjasone
326234370Sjasone	for (accept = false; accept == false;) {
327234370Sjasone		switch (*opts) {
328234370Sjasone		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
329234370Sjasone		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
330234370Sjasone		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
331234370Sjasone		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
332234370Sjasone		case 'Y': case 'Z':
333234370Sjasone		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
334234370Sjasone		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
335234370Sjasone		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
336234370Sjasone		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
337234370Sjasone		case 'y': case 'z':
338234370Sjasone		case '0': case '1': case '2': case '3': case '4': case '5':
339234370Sjasone		case '6': case '7': case '8': case '9':
340234370Sjasone		case '_':
341234370Sjasone			opts++;
342234370Sjasone			break;
343234370Sjasone		case ':':
344234370Sjasone			opts++;
345234370Sjasone			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
346234370Sjasone			*v_p = opts;
347234370Sjasone			accept = true;
348234370Sjasone			break;
349234370Sjasone		case '\0':
350234370Sjasone			if (opts != *opts_p) {
351234370Sjasone				malloc_write("<jemalloc>: Conf string ends "
352234370Sjasone				    "with key\n");
353234370Sjasone			}
354234370Sjasone			return (true);
355234370Sjasone		default:
356234370Sjasone			malloc_write("<jemalloc>: Malformed conf string\n");
357234370Sjasone			return (true);
358234370Sjasone		}
359234370Sjasone	}
360234370Sjasone
361234370Sjasone	for (accept = false; accept == false;) {
362234370Sjasone		switch (*opts) {
363234370Sjasone		case ',':
364234370Sjasone			opts++;
365234370Sjasone			/*
366234370Sjasone			 * Look ahead one character here, because the next time
367234370Sjasone			 * this function is called, it will assume that end of
368234370Sjasone			 * input has been cleanly reached if no input remains,
369234370Sjasone			 * but we have optimistically already consumed the
370234370Sjasone			 * comma if one exists.
371234370Sjasone			 */
372234370Sjasone			if (*opts == '\0') {
373234370Sjasone				malloc_write("<jemalloc>: Conf string ends "
374234370Sjasone				    "with comma\n");
375234370Sjasone			}
376234370Sjasone			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
377234370Sjasone			accept = true;
378234370Sjasone			break;
379234370Sjasone		case '\0':
380234370Sjasone			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
381234370Sjasone			accept = true;
382234370Sjasone			break;
383234370Sjasone		default:
384234370Sjasone			opts++;
385234370Sjasone			break;
386234370Sjasone		}
387234370Sjasone	}
388234370Sjasone
389234370Sjasone	*opts_p = opts;
390234370Sjasone	return (false);
391234370Sjasone}
392234370Sjasone
393234370Sjasonestatic void
394234370Sjasonemalloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
395234370Sjasone    size_t vlen)
396234370Sjasone{
397234370Sjasone
398234370Sjasone	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
399234370Sjasone	    (int)vlen, v);
400234370Sjasone}
401234370Sjasone
402234370Sjasonestatic void
403234370Sjasonemalloc_conf_init(void)
404234370Sjasone{
405234370Sjasone	unsigned i;
406234370Sjasone	char buf[PATH_MAX + 1];
407234370Sjasone	const char *opts, *k, *v;
408234370Sjasone	size_t klen, vlen;
409234370Sjasone
410242844Sjasone	/*
411242844Sjasone	 * Automatically configure valgrind before processing options.  The
412242844Sjasone	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
413242844Sjasone	 */
414242844Sjasone	if (config_valgrind) {
415242844Sjasone		opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
416242844Sjasone		if (config_fill && opt_valgrind) {
417242844Sjasone			opt_junk = false;
418242844Sjasone			assert(opt_zero == false);
419242844Sjasone			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
420242844Sjasone			opt_redzone = true;
421242844Sjasone		}
422242844Sjasone		if (config_tcache && opt_valgrind)
423242844Sjasone			opt_tcache = false;
424242844Sjasone	}
425242844Sjasone
426234370Sjasone	for (i = 0; i < 3; i++) {
427234370Sjasone		/* Get runtime configuration. */
428234370Sjasone		switch (i) {
429234370Sjasone		case 0:
430234370Sjasone			if (je_malloc_conf != NULL) {
431234370Sjasone				/*
432234370Sjasone				 * Use options that were compiled into the
433234370Sjasone				 * program.
434234370Sjasone				 */
435234370Sjasone				opts = je_malloc_conf;
436234370Sjasone			} else {
437234370Sjasone				/* No configuration specified. */
438234370Sjasone				buf[0] = '\0';
439234370Sjasone				opts = buf;
440234370Sjasone			}
441234370Sjasone			break;
442234370Sjasone		case 1: {
443235238Sjasone#ifndef _WIN32
444234370Sjasone			int linklen;
445234370Sjasone			const char *linkname =
446235238Sjasone#  ifdef JEMALLOC_PREFIX
447234370Sjasone			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
448235238Sjasone#  else
449234370Sjasone			    "/etc/malloc.conf"
450235238Sjasone#  endif
451234370Sjasone			    ;
452234370Sjasone
453234370Sjasone			if ((linklen = readlink(linkname, buf,
454234370Sjasone			    sizeof(buf) - 1)) != -1) {
455234370Sjasone				/*
456234370Sjasone				 * Use the contents of the "/etc/malloc.conf"
457234370Sjasone				 * symbolic link's name.
458234370Sjasone				 */
459234370Sjasone				buf[linklen] = '\0';
460234370Sjasone				opts = buf;
461235238Sjasone			} else
462235238Sjasone#endif
463235238Sjasone			{
464234370Sjasone				/* No configuration specified. */
465234370Sjasone				buf[0] = '\0';
466234370Sjasone				opts = buf;
467234370Sjasone			}
468234370Sjasone			break;
469234370Sjasone		} case 2: {
470234370Sjasone			const char *envname =
471234370Sjasone#ifdef JEMALLOC_PREFIX
472234370Sjasone			    JEMALLOC_CPREFIX"MALLOC_CONF"
473234370Sjasone#else
474234370Sjasone			    "MALLOC_CONF"
475234370Sjasone#endif
476234370Sjasone			    ;
477234370Sjasone
478234370Sjasone			if (issetugid() == 0 && (opts = getenv(envname)) !=
479234370Sjasone			    NULL) {
480234370Sjasone				/*
481234370Sjasone				 * Do nothing; opts is already initialized to
482234370Sjasone				 * the value of the MALLOC_CONF environment
483234370Sjasone				 * variable.
484234370Sjasone				 */
485234370Sjasone			} else {
486234370Sjasone				/* No configuration specified. */
487234370Sjasone				buf[0] = '\0';
488234370Sjasone				opts = buf;
489234370Sjasone			}
490234370Sjasone			break;
491234370Sjasone		} default:
492234370Sjasone			/* NOTREACHED */
493234370Sjasone			assert(false);
494234370Sjasone			buf[0] = '\0';
495234370Sjasone			opts = buf;
496234370Sjasone		}
497234370Sjasone
498234370Sjasone		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
499234370Sjasone		    &vlen) == false) {
500245868Sjasone#define	CONF_HANDLE_BOOL(o, n)						\
501234543Sjasone			if (sizeof(n)-1 == klen && strncmp(n, k,	\
502234370Sjasone			    klen) == 0) {				\
503234370Sjasone				if (strncmp("true", v, vlen) == 0 &&	\
504234370Sjasone				    vlen == sizeof("true")-1)		\
505234370Sjasone					o = true;			\
506234370Sjasone				else if (strncmp("false", v, vlen) ==	\
507234370Sjasone				    0 && vlen == sizeof("false")-1)	\
508234370Sjasone					o = false;			\
509234370Sjasone				else {					\
510234370Sjasone					malloc_conf_error(		\
511234370Sjasone					    "Invalid conf value",	\
512234370Sjasone					    k, klen, v, vlen);		\
513234370Sjasone				}					\
514234370Sjasone				continue;				\
515245868Sjasone			}
516245868Sjasone#define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
517234543Sjasone			if (sizeof(n)-1 == klen && strncmp(n, k,	\
518234370Sjasone			    klen) == 0) {				\
519234370Sjasone				uintmax_t um;				\
520234370Sjasone				char *end;				\
521234370Sjasone									\
522235238Sjasone				set_errno(0);				\
523234370Sjasone				um = malloc_strtoumax(v, &end, 0);	\
524235238Sjasone				if (get_errno() != 0 || (uintptr_t)end -\
525234370Sjasone				    (uintptr_t)v != vlen) {		\
526234370Sjasone					malloc_conf_error(		\
527234370Sjasone					    "Invalid conf value",	\
528234370Sjasone					    k, klen, v, vlen);		\
529245868Sjasone				} else if (clip) {			\
530245868Sjasone					if (um < min)			\
531245868Sjasone						o = min;		\
532245868Sjasone					else if (um > max)		\
533245868Sjasone						o = max;		\
534245868Sjasone					else				\
535245868Sjasone						o = um;			\
536245868Sjasone				} else {				\
537245868Sjasone					if (um < min || um > max) {	\
538245868Sjasone						malloc_conf_error(	\
539245868Sjasone						    "Out-of-range "	\
540245868Sjasone						    "conf value",	\
541245868Sjasone						    k, klen, v, vlen);	\
542245868Sjasone					} else				\
543245868Sjasone						o = um;			\
544245868Sjasone				}					\
545234370Sjasone				continue;				\
546234370Sjasone			}
547234370Sjasone#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
548234543Sjasone			if (sizeof(n)-1 == klen && strncmp(n, k,	\
549234370Sjasone			    klen) == 0) {				\
550234370Sjasone				long l;					\
551234370Sjasone				char *end;				\
552234370Sjasone									\
553235238Sjasone				set_errno(0);				\
554234370Sjasone				l = strtol(v, &end, 0);			\
555235238Sjasone				if (get_errno() != 0 || (uintptr_t)end -\
556234370Sjasone				    (uintptr_t)v != vlen) {		\
557234370Sjasone					malloc_conf_error(		\
558234370Sjasone					    "Invalid conf value",	\
559234370Sjasone					    k, klen, v, vlen);		\
560234370Sjasone				} else if (l < (ssize_t)min || l >	\
561234370Sjasone				    (ssize_t)max) {			\
562234370Sjasone					malloc_conf_error(		\
563234370Sjasone					    "Out-of-range conf value",	\
564234370Sjasone					    k, klen, v, vlen);		\
565234370Sjasone				} else					\
566234370Sjasone					o = l;				\
567234370Sjasone				continue;				\
568234370Sjasone			}
569234370Sjasone#define	CONF_HANDLE_CHAR_P(o, n, d)					\
570234543Sjasone			if (sizeof(n)-1 == klen && strncmp(n, k,	\
571234370Sjasone			    klen) == 0) {				\
572234370Sjasone				size_t cpylen = (vlen <=		\
573234370Sjasone				    sizeof(o)-1) ? vlen :		\
574234370Sjasone				    sizeof(o)-1;			\
575234370Sjasone				strncpy(o, v, cpylen);			\
576234370Sjasone				o[cpylen] = '\0';			\
577234370Sjasone				continue;				\
578234370Sjasone			}
579234370Sjasone
580234543Sjasone			CONF_HANDLE_BOOL(opt_abort, "abort")
581234370Sjasone			/*
582234370Sjasone			 * Chunks always require at least one header page, plus
583234370Sjasone			 * one data page in the absence of redzones, or three
584234370Sjasone			 * pages in the presence of redzones.  In order to
585234370Sjasone			 * simplify options processing, fix the limit based on
586234370Sjasone			 * config_fill.
587234370Sjasone			 */
588234543Sjasone			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
589245868Sjasone			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
590245868Sjasone			    true)
591242844Sjasone			if (strncmp("dss", k, klen) == 0) {
592242844Sjasone				int i;
593242844Sjasone				bool match = false;
594242844Sjasone				for (i = 0; i < dss_prec_limit; i++) {
595242844Sjasone					if (strncmp(dss_prec_names[i], v, vlen)
596242844Sjasone					    == 0) {
597242844Sjasone						if (chunk_dss_prec_set(i)) {
598242844Sjasone							malloc_conf_error(
599242844Sjasone							    "Error setting dss",
600242844Sjasone							    k, klen, v, vlen);
601242844Sjasone						} else {
602242844Sjasone							opt_dss =
603242844Sjasone							    dss_prec_names[i];
604242844Sjasone							match = true;
605242844Sjasone							break;
606242844Sjasone						}
607242844Sjasone					}
608242844Sjasone				}
609242844Sjasone				if (match == false) {
610242844Sjasone					malloc_conf_error("Invalid conf value",
611242844Sjasone					    k, klen, v, vlen);
612242844Sjasone				}
613242844Sjasone				continue;
614242844Sjasone			}
615234543Sjasone			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
616245868Sjasone			    SIZE_T_MAX, false)
617234543Sjasone			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
618234370Sjasone			    -1, (sizeof(size_t) << 3) - 1)
619234543Sjasone			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
620234370Sjasone			if (config_fill) {
621234543Sjasone				CONF_HANDLE_BOOL(opt_junk, "junk")
622234543Sjasone				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
623245868Sjasone				    0, SIZE_T_MAX, false)
624234543Sjasone				CONF_HANDLE_BOOL(opt_redzone, "redzone")
625234543Sjasone				CONF_HANDLE_BOOL(opt_zero, "zero")
626234370Sjasone			}
627234370Sjasone			if (config_utrace) {
628234543Sjasone				CONF_HANDLE_BOOL(opt_utrace, "utrace")
629234370Sjasone			}
630234370Sjasone			if (config_valgrind) {
631242844Sjasone				CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
632234370Sjasone			}
633234370Sjasone			if (config_xmalloc) {
634234543Sjasone				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
635234370Sjasone			}
636234370Sjasone			if (config_tcache) {
637234543Sjasone				CONF_HANDLE_BOOL(opt_tcache, "tcache")
638234370Sjasone				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
639234543Sjasone				    "lg_tcache_max", -1,
640234370Sjasone				    (sizeof(size_t) << 3) - 1)
641234370Sjasone			}
642234370Sjasone			if (config_prof) {
643234543Sjasone				CONF_HANDLE_BOOL(opt_prof, "prof")
644234543Sjasone				CONF_HANDLE_CHAR_P(opt_prof_prefix,
645234543Sjasone				    "prof_prefix", "jeprof")
646234543Sjasone				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
647234370Sjasone				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
648234543Sjasone				    "lg_prof_sample", 0,
649234370Sjasone				    (sizeof(uint64_t) << 3) - 1)
650234543Sjasone				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
651234370Sjasone				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
652234543Sjasone				    "lg_prof_interval", -1,
653234370Sjasone				    (sizeof(uint64_t) << 3) - 1)
654234543Sjasone				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
655234543Sjasone				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
656234543Sjasone				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
657234370Sjasone			}
658234370Sjasone			malloc_conf_error("Invalid conf pair", k, klen, v,
659234370Sjasone			    vlen);
660234370Sjasone#undef CONF_HANDLE_BOOL
661234370Sjasone#undef CONF_HANDLE_SIZE_T
662234370Sjasone#undef CONF_HANDLE_SSIZE_T
663234370Sjasone#undef CONF_HANDLE_CHAR_P
664234370Sjasone		}
665234370Sjasone	}
666234370Sjasone}
667234370Sjasone
668234370Sjasonestatic bool
669234370Sjasonemalloc_init_hard(void)
670234370Sjasone{
671234370Sjasone	arena_t *init_arenas[1];
672234370Sjasone
673234370Sjasone	malloc_mutex_lock(&init_lock);
674234370Sjasone	if (malloc_initialized || IS_INITIALIZER) {
675234370Sjasone		/*
676234370Sjasone		 * Another thread initialized the allocator before this one
677234370Sjasone		 * acquired init_lock, or this thread is the initializing
678234370Sjasone		 * thread, and it is recursively allocating.
679234370Sjasone		 */
680234370Sjasone		malloc_mutex_unlock(&init_lock);
681234370Sjasone		return (false);
682234370Sjasone	}
683234370Sjasone#ifdef JEMALLOC_THREADED_INIT
684234370Sjasone	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
685234370Sjasone		/* Busy-wait until the initializing thread completes. */
686234370Sjasone		do {
687234370Sjasone			malloc_mutex_unlock(&init_lock);
688234370Sjasone			CPU_SPINWAIT;
689234370Sjasone			malloc_mutex_lock(&init_lock);
690234370Sjasone		} while (malloc_initialized == false);
691234370Sjasone		malloc_mutex_unlock(&init_lock);
692234370Sjasone		return (false);
693234370Sjasone	}
694234370Sjasone#endif
695234370Sjasone	malloc_initializer = INITIALIZER;
696234370Sjasone
697234370Sjasone	malloc_tsd_boot();
698234370Sjasone	if (config_prof)
699234370Sjasone		prof_boot0();
700234370Sjasone
701234370Sjasone	malloc_conf_init();
702234370Sjasone
703235238Sjasone#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
704235238Sjasone    && !defined(_WIN32))
705234370Sjasone	/* Register fork handlers. */
706234370Sjasone	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
707234370Sjasone	    jemalloc_postfork_child) != 0) {
708234370Sjasone		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
709234370Sjasone		if (opt_abort)
710234370Sjasone			abort();
711234370Sjasone	}
712234370Sjasone#endif
713234370Sjasone
714234370Sjasone	if (opt_stats_print) {
715234370Sjasone		/* Print statistics at exit. */
716234370Sjasone		if (atexit(stats_print_atexit) != 0) {
717234370Sjasone			malloc_write("<jemalloc>: Error in atexit()\n");
718234370Sjasone			if (opt_abort)
719234370Sjasone				abort();
720234370Sjasone		}
721234370Sjasone	}
722234370Sjasone
723234370Sjasone	if (base_boot()) {
724234370Sjasone		malloc_mutex_unlock(&init_lock);
725234370Sjasone		return (true);
726234370Sjasone	}
727234370Sjasone
728234569Sjasone	if (chunk_boot()) {
729234370Sjasone		malloc_mutex_unlock(&init_lock);
730234370Sjasone		return (true);
731234370Sjasone	}
732234370Sjasone
733234370Sjasone	if (ctl_boot()) {
734234370Sjasone		malloc_mutex_unlock(&init_lock);
735234370Sjasone		return (true);
736234370Sjasone	}
737234370Sjasone
738234370Sjasone	if (config_prof)
739234370Sjasone		prof_boot1();
740234370Sjasone
741234370Sjasone	arena_boot();
742234370Sjasone
743234370Sjasone	if (config_tcache && tcache_boot0()) {
744234370Sjasone		malloc_mutex_unlock(&init_lock);
745234370Sjasone		return (true);
746234370Sjasone	}
747234370Sjasone
748234370Sjasone	if (huge_boot()) {
749234370Sjasone		malloc_mutex_unlock(&init_lock);
750234370Sjasone		return (true);
751234370Sjasone	}
752234370Sjasone
753234370Sjasone	if (malloc_mutex_init(&arenas_lock))
754234370Sjasone		return (true);
755234370Sjasone
756234370Sjasone	/*
757234370Sjasone	 * Create enough scaffolding to allow recursive allocation in
758234370Sjasone	 * malloc_ncpus().
759234370Sjasone	 */
760242844Sjasone	narenas_total = narenas_auto = 1;
761234370Sjasone	arenas = init_arenas;
762242844Sjasone	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
763234370Sjasone
764234370Sjasone	/*
765234370Sjasone	 * Initialize one arena here.  The rest are lazily created in
766234370Sjasone	 * choose_arena_hard().
767234370Sjasone	 */
768234370Sjasone	arenas_extend(0);
769234370Sjasone	if (arenas[0] == NULL) {
770234370Sjasone		malloc_mutex_unlock(&init_lock);
771234370Sjasone		return (true);
772234370Sjasone	}
773234370Sjasone
774234370Sjasone	/* Initialize allocation counters before any allocations can occur. */
775234370Sjasone	if (config_stats && thread_allocated_tsd_boot()) {
776234370Sjasone		malloc_mutex_unlock(&init_lock);
777234370Sjasone		return (true);
778234370Sjasone	}
779234370Sjasone
780234370Sjasone	if (arenas_tsd_boot()) {
781234370Sjasone		malloc_mutex_unlock(&init_lock);
782234370Sjasone		return (true);
783234370Sjasone	}
784234370Sjasone
785234370Sjasone	if (config_tcache && tcache_boot1()) {
786234370Sjasone		malloc_mutex_unlock(&init_lock);
787234370Sjasone		return (true);
788234370Sjasone	}
789234370Sjasone
790234370Sjasone	if (config_fill && quarantine_boot()) {
791234370Sjasone		malloc_mutex_unlock(&init_lock);
792234370Sjasone		return (true);
793234370Sjasone	}
794234370Sjasone
795234370Sjasone	if (config_prof && prof_boot2()) {
796234370Sjasone		malloc_mutex_unlock(&init_lock);
797234370Sjasone		return (true);
798234370Sjasone	}
799234370Sjasone
800234370Sjasone	/* Get number of CPUs. */
801234370Sjasone	malloc_mutex_unlock(&init_lock);
802234370Sjasone	ncpus = malloc_ncpus();
803234370Sjasone	malloc_mutex_lock(&init_lock);
804234370Sjasone
805234370Sjasone	if (mutex_boot()) {
806234370Sjasone		malloc_mutex_unlock(&init_lock);
807234370Sjasone		return (true);
808234370Sjasone	}
809234370Sjasone
810234370Sjasone	if (opt_narenas == 0) {
811234370Sjasone		/*
812234370Sjasone		 * For SMP systems, create more than one arena per CPU by
813234370Sjasone		 * default.
814234370Sjasone		 */
815234370Sjasone		if (ncpus > 1)
816234370Sjasone			opt_narenas = ncpus << 2;
817234370Sjasone		else
818234370Sjasone			opt_narenas = 1;
819234370Sjasone	}
820242844Sjasone	narenas_auto = opt_narenas;
821234370Sjasone	/*
822234370Sjasone	 * Make sure that the arenas array can be allocated.  In practice, this
823234370Sjasone	 * limit is enough to allow the allocator to function, but the ctl
824234370Sjasone	 * machinery will fail to allocate memory at far lower limits.
825234370Sjasone	 */
826242844Sjasone	if (narenas_auto > chunksize / sizeof(arena_t *)) {
827242844Sjasone		narenas_auto = chunksize / sizeof(arena_t *);
828234370Sjasone		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
829242844Sjasone		    narenas_auto);
830234370Sjasone	}
831242844Sjasone	narenas_total = narenas_auto;
832234370Sjasone
833234370Sjasone	/* Allocate and initialize arenas. */
834242844Sjasone	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
835234370Sjasone	if (arenas == NULL) {
836234370Sjasone		malloc_mutex_unlock(&init_lock);
837234370Sjasone		return (true);
838234370Sjasone	}
839234370Sjasone	/*
840234370Sjasone	 * Zero the array.  In practice, this should always be pre-zeroed,
841234370Sjasone	 * since it was just mmap()ed, but let's be sure.
842234370Sjasone	 */
843242844Sjasone	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
844234370Sjasone	/* Copy the pointer to the one arena that was already initialized. */
845234370Sjasone	arenas[0] = init_arenas[0];
846234370Sjasone
847234370Sjasone	malloc_initialized = true;
848234370Sjasone	malloc_mutex_unlock(&init_lock);
849234370Sjasone	return (false);
850234370Sjasone}
851234370Sjasone
852234370Sjasone/*
853234370Sjasone * End initialization functions.
854234370Sjasone */
855234370Sjasone/******************************************************************************/
856234370Sjasone/*
857234370Sjasone * Begin malloc(3)-compatible functions.
858234370Sjasone */
859234370Sjasone
860234370Sjasonevoid *
861234370Sjasoneje_malloc(size_t size)
862234370Sjasone{
863234370Sjasone	void *ret;
864235238Sjasone	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
865234370Sjasone	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
866234370Sjasone
867234370Sjasone	if (malloc_init()) {
868234370Sjasone		ret = NULL;
869234370Sjasone		goto label_oom;
870234370Sjasone	}
871234370Sjasone
872234370Sjasone	if (size == 0)
873234370Sjasone		size = 1;
874234370Sjasone
875234370Sjasone	if (config_prof && opt_prof) {
876234370Sjasone		usize = s2u(size);
877234370Sjasone		PROF_ALLOC_PREP(1, usize, cnt);
878234370Sjasone		if (cnt == NULL) {
879234370Sjasone			ret = NULL;
880234370Sjasone			goto label_oom;
881234370Sjasone		}
882234370Sjasone		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
883234370Sjasone		    SMALL_MAXCLASS) {
884234370Sjasone			ret = imalloc(SMALL_MAXCLASS+1);
885234370Sjasone			if (ret != NULL)
886234370Sjasone				arena_prof_promoted(ret, usize);
887234370Sjasone		} else
888234370Sjasone			ret = imalloc(size);
889234370Sjasone	} else {
890234370Sjasone		if (config_stats || (config_valgrind && opt_valgrind))
891234370Sjasone			usize = s2u(size);
892234370Sjasone		ret = imalloc(size);
893234370Sjasone	}
894234370Sjasone
895234370Sjasonelabel_oom:
896234370Sjasone	if (ret == NULL) {
897234370Sjasone		if (config_xmalloc && opt_xmalloc) {
898234370Sjasone			malloc_write("<jemalloc>: Error in malloc(): "
899234370Sjasone			    "out of memory\n");
900234370Sjasone			abort();
901234370Sjasone		}
902235238Sjasone		set_errno(ENOMEM);
903234370Sjasone	}
904234370Sjasone	if (config_prof && opt_prof && ret != NULL)
905234370Sjasone		prof_malloc(ret, usize, cnt);
906234370Sjasone	if (config_stats && ret != NULL) {
907234370Sjasone		assert(usize == isalloc(ret, config_prof));
908234370Sjasone		thread_allocated_tsd_get()->allocated += usize;
909234370Sjasone	}
910234370Sjasone	UTRACE(0, size, ret);
911234370Sjasone	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
912234370Sjasone	return (ret);
913234370Sjasone}
914234370Sjasone
915234370SjasoneJEMALLOC_ATTR(nonnull(1))
916234370Sjasone#ifdef JEMALLOC_PROF
917234370Sjasone/*
918234370Sjasone * Avoid any uncertainty as to how many backtrace frames to ignore in
919234370Sjasone * PROF_ALLOC_PREP().
920234370Sjasone */
921245868SjasoneJEMALLOC_NOINLINE
922234370Sjasone#endif
923234370Sjasonestatic int
924234370Sjasoneimemalign(void **memptr, size_t alignment, size_t size,
925234370Sjasone    size_t min_alignment)
926234370Sjasone{
927234370Sjasone	int ret;
928234370Sjasone	size_t usize;
929234370Sjasone	void *result;
930234370Sjasone	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
931234370Sjasone
932234370Sjasone	assert(min_alignment != 0);
933234370Sjasone
934234370Sjasone	if (malloc_init())
935234370Sjasone		result = NULL;
936234370Sjasone	else {
937234370Sjasone		if (size == 0)
938234370Sjasone			size = 1;
939234370Sjasone
940234370Sjasone		/* Make sure that alignment is a large enough power of 2. */
941234370Sjasone		if (((alignment - 1) & alignment) != 0
942234370Sjasone		    || (alignment < min_alignment)) {
943234370Sjasone			if (config_xmalloc && opt_xmalloc) {
944234370Sjasone				malloc_write("<jemalloc>: Error allocating "
945234370Sjasone				    "aligned memory: invalid alignment\n");
946234370Sjasone				abort();
947234370Sjasone			}
948234370Sjasone			result = NULL;
949234370Sjasone			ret = EINVAL;
950234370Sjasone			goto label_return;
951234370Sjasone		}
952234370Sjasone
953234370Sjasone		usize = sa2u(size, alignment);
954234370Sjasone		if (usize == 0) {
955234370Sjasone			result = NULL;
956234370Sjasone			ret = ENOMEM;
957234370Sjasone			goto label_return;
958234370Sjasone		}
959234370Sjasone
960234370Sjasone		if (config_prof && opt_prof) {
961234370Sjasone			PROF_ALLOC_PREP(2, usize, cnt);
962234370Sjasone			if (cnt == NULL) {
963234370Sjasone				result = NULL;
964234370Sjasone				ret = EINVAL;
965234370Sjasone			} else {
966234370Sjasone				if (prof_promote && (uintptr_t)cnt !=
967234370Sjasone				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
968234370Sjasone					assert(sa2u(SMALL_MAXCLASS+1,
969234370Sjasone					    alignment) != 0);
970234370Sjasone					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
971234370Sjasone					    alignment), alignment, false);
972234370Sjasone					if (result != NULL) {
973234370Sjasone						arena_prof_promoted(result,
974234370Sjasone						    usize);
975234370Sjasone					}
976234370Sjasone				} else {
977234370Sjasone					result = ipalloc(usize, alignment,
978234370Sjasone					    false);
979234370Sjasone				}
980234370Sjasone			}
981234370Sjasone		} else
982234370Sjasone			result = ipalloc(usize, alignment, false);
983234370Sjasone	}
984234370Sjasone
985234370Sjasone	if (result == NULL) {
986234370Sjasone		if (config_xmalloc && opt_xmalloc) {
987234370Sjasone			malloc_write("<jemalloc>: Error allocating aligned "
988234370Sjasone			    "memory: out of memory\n");
989234370Sjasone			abort();
990234370Sjasone		}
991234370Sjasone		ret = ENOMEM;
992234370Sjasone		goto label_return;
993234370Sjasone	}
994234370Sjasone
995234370Sjasone	*memptr = result;
996234370Sjasone	ret = 0;
997234370Sjasone
998234370Sjasonelabel_return:
999234370Sjasone	if (config_stats && result != NULL) {
1000234370Sjasone		assert(usize == isalloc(result, config_prof));
1001234370Sjasone		thread_allocated_tsd_get()->allocated += usize;
1002234370Sjasone	}
1003234370Sjasone	if (config_prof && opt_prof && result != NULL)
1004234370Sjasone		prof_malloc(result, usize, cnt);
1005234370Sjasone	UTRACE(0, size, result);
1006234370Sjasone	return (ret);
1007234370Sjasone}
1008234370Sjasone
1009234370Sjasoneint
1010234370Sjasoneje_posix_memalign(void **memptr, size_t alignment, size_t size)
1011234370Sjasone{
1012234370Sjasone	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1013234370Sjasone	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1014234370Sjasone	    config_prof), false);
1015234370Sjasone	return (ret);
1016234370Sjasone}
1017234370Sjasone
1018234370Sjasonevoid *
1019234370Sjasoneje_aligned_alloc(size_t alignment, size_t size)
1020234370Sjasone{
1021234370Sjasone	void *ret;
1022234370Sjasone	int err;
1023234370Sjasone
1024234370Sjasone	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1025234370Sjasone		ret = NULL;
1026235238Sjasone		set_errno(err);
1027234370Sjasone	}
1028234370Sjasone	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1029234370Sjasone	    false);
1030234370Sjasone	return (ret);
1031234370Sjasone}
1032234370Sjasone
1033234370Sjasonevoid *
1034234370Sjasoneje_calloc(size_t num, size_t size)
1035234370Sjasone{
1036234370Sjasone	void *ret;
1037234370Sjasone	size_t num_size;
1038235238Sjasone	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1039234370Sjasone	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1040234370Sjasone
1041234370Sjasone	if (malloc_init()) {
1042234370Sjasone		num_size = 0;
1043234370Sjasone		ret = NULL;
1044234370Sjasone		goto label_return;
1045234370Sjasone	}
1046234370Sjasone
1047234370Sjasone	num_size = num * size;
1048234370Sjasone	if (num_size == 0) {
1049234370Sjasone		if (num == 0 || size == 0)
1050234370Sjasone			num_size = 1;
1051234370Sjasone		else {
1052234370Sjasone			ret = NULL;
1053234370Sjasone			goto label_return;
1054234370Sjasone		}
1055234370Sjasone	/*
1056234370Sjasone	 * Try to avoid division here.  We know that it isn't possible to
1057234370Sjasone	 * overflow during multiplication if neither operand uses any of the
1058234370Sjasone	 * most significant half of the bits in a size_t.
1059234370Sjasone	 */
1060234370Sjasone	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1061234370Sjasone	    && (num_size / size != num)) {
1062234370Sjasone		/* size_t overflow. */
1063234370Sjasone		ret = NULL;
1064234370Sjasone		goto label_return;
1065234370Sjasone	}
1066234370Sjasone
1067234370Sjasone	if (config_prof && opt_prof) {
1068234370Sjasone		usize = s2u(num_size);
1069234370Sjasone		PROF_ALLOC_PREP(1, usize, cnt);
1070234370Sjasone		if (cnt == NULL) {
1071234370Sjasone			ret = NULL;
1072234370Sjasone			goto label_return;
1073234370Sjasone		}
1074234370Sjasone		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1075234370Sjasone		    <= SMALL_MAXCLASS) {
1076234370Sjasone			ret = icalloc(SMALL_MAXCLASS+1);
1077234370Sjasone			if (ret != NULL)
1078234370Sjasone				arena_prof_promoted(ret, usize);
1079234370Sjasone		} else
1080234370Sjasone			ret = icalloc(num_size);
1081234370Sjasone	} else {
1082234370Sjasone		if (config_stats || (config_valgrind && opt_valgrind))
1083234370Sjasone			usize = s2u(num_size);
1084234370Sjasone		ret = icalloc(num_size);
1085234370Sjasone	}
1086234370Sjasone
1087234370Sjasonelabel_return:
1088234370Sjasone	if (ret == NULL) {
1089234370Sjasone		if (config_xmalloc && opt_xmalloc) {
1090234370Sjasone			malloc_write("<jemalloc>: Error in calloc(): out of "
1091234370Sjasone			    "memory\n");
1092234370Sjasone			abort();
1093234370Sjasone		}
1094235238Sjasone		set_errno(ENOMEM);
1095234370Sjasone	}
1096234370Sjasone
1097234370Sjasone	if (config_prof && opt_prof && ret != NULL)
1098234370Sjasone		prof_malloc(ret, usize, cnt);
1099234370Sjasone	if (config_stats && ret != NULL) {
1100234370Sjasone		assert(usize == isalloc(ret, config_prof));
1101234370Sjasone		thread_allocated_tsd_get()->allocated += usize;
1102234370Sjasone	}
1103234370Sjasone	UTRACE(0, num_size, ret);
1104234370Sjasone	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1105234370Sjasone	return (ret);
1106234370Sjasone}
1107234370Sjasone
1108234370Sjasonevoid *
1109234370Sjasoneje_realloc(void *ptr, size_t size)
1110234370Sjasone{
1111234370Sjasone	void *ret;
1112235238Sjasone	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1113234370Sjasone	size_t old_size = 0;
1114234370Sjasone	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1115234370Sjasone	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1116234370Sjasone	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1117234370Sjasone
1118234370Sjasone	if (size == 0) {
1119234370Sjasone		if (ptr != NULL) {
1120234370Sjasone			/* realloc(ptr, 0) is equivalent to free(p). */
1121251300Sjasone			assert(malloc_initialized || IS_INITIALIZER);
1122234370Sjasone			if (config_prof) {
1123234370Sjasone				old_size = isalloc(ptr, true);
1124234370Sjasone				if (config_valgrind && opt_valgrind)
1125234370Sjasone					old_rzsize = p2rz(ptr);
1126234370Sjasone			} else if (config_stats) {
1127234370Sjasone				old_size = isalloc(ptr, false);
1128234370Sjasone				if (config_valgrind && opt_valgrind)
1129234370Sjasone					old_rzsize = u2rz(old_size);
1130234370Sjasone			} else if (config_valgrind && opt_valgrind) {
1131234370Sjasone				old_size = isalloc(ptr, false);
1132234370Sjasone				old_rzsize = u2rz(old_size);
1133234370Sjasone			}
1134234370Sjasone			if (config_prof && opt_prof) {
1135234370Sjasone				old_ctx = prof_ctx_get(ptr);
1136234370Sjasone				cnt = NULL;
1137234370Sjasone			}
1138234370Sjasone			iqalloc(ptr);
1139234370Sjasone			ret = NULL;
1140234370Sjasone			goto label_return;
1141234370Sjasone		} else
1142234370Sjasone			size = 1;
1143234370Sjasone	}
1144234370Sjasone
1145234370Sjasone	if (ptr != NULL) {
1146234370Sjasone		assert(malloc_initialized || IS_INITIALIZER);
1147251300Sjasone		malloc_thread_init();
1148234370Sjasone
1149234370Sjasone		if (config_prof) {
1150234370Sjasone			old_size = isalloc(ptr, true);
1151234370Sjasone			if (config_valgrind && opt_valgrind)
1152234370Sjasone				old_rzsize = p2rz(ptr);
1153234370Sjasone		} else if (config_stats) {
1154234370Sjasone			old_size = isalloc(ptr, false);
1155234370Sjasone			if (config_valgrind && opt_valgrind)
1156234370Sjasone				old_rzsize = u2rz(old_size);
1157234370Sjasone		} else if (config_valgrind && opt_valgrind) {
1158234370Sjasone			old_size = isalloc(ptr, false);
1159234370Sjasone			old_rzsize = u2rz(old_size);
1160234370Sjasone		}
1161234370Sjasone		if (config_prof && opt_prof) {
1162234370Sjasone			usize = s2u(size);
1163234370Sjasone			old_ctx = prof_ctx_get(ptr);
1164234370Sjasone			PROF_ALLOC_PREP(1, usize, cnt);
1165234370Sjasone			if (cnt == NULL) {
1166234370Sjasone				old_ctx = NULL;
1167234370Sjasone				ret = NULL;
1168234370Sjasone				goto label_oom;
1169234370Sjasone			}
1170234370Sjasone			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1171234370Sjasone			    usize <= SMALL_MAXCLASS) {
1172234370Sjasone				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1173234370Sjasone				    false, false);
1174234370Sjasone				if (ret != NULL)
1175234370Sjasone					arena_prof_promoted(ret, usize);
1176234370Sjasone				else
1177234370Sjasone					old_ctx = NULL;
1178234370Sjasone			} else {
1179234370Sjasone				ret = iralloc(ptr, size, 0, 0, false, false);
1180234370Sjasone				if (ret == NULL)
1181234370Sjasone					old_ctx = NULL;
1182234370Sjasone			}
1183234370Sjasone		} else {
1184234370Sjasone			if (config_stats || (config_valgrind && opt_valgrind))
1185234370Sjasone				usize = s2u(size);
1186234370Sjasone			ret = iralloc(ptr, size, 0, 0, false, false);
1187234370Sjasone		}
1188234370Sjasone
1189234370Sjasonelabel_oom:
1190234370Sjasone		if (ret == NULL) {
1191234370Sjasone			if (config_xmalloc && opt_xmalloc) {
1192234370Sjasone				malloc_write("<jemalloc>: Error in realloc(): "
1193234370Sjasone				    "out of memory\n");
1194234370Sjasone				abort();
1195234370Sjasone			}
1196235238Sjasone			set_errno(ENOMEM);
1197234370Sjasone		}
1198234370Sjasone	} else {
1199234370Sjasone		/* realloc(NULL, size) is equivalent to malloc(size). */
1200234370Sjasone		if (config_prof && opt_prof)
1201234370Sjasone			old_ctx = NULL;
1202234370Sjasone		if (malloc_init()) {
1203234370Sjasone			if (config_prof && opt_prof)
1204234370Sjasone				cnt = NULL;
1205234370Sjasone			ret = NULL;
1206234370Sjasone		} else {
1207234370Sjasone			if (config_prof && opt_prof) {
1208234370Sjasone				usize = s2u(size);
1209234370Sjasone				PROF_ALLOC_PREP(1, usize, cnt);
1210234370Sjasone				if (cnt == NULL)
1211234370Sjasone					ret = NULL;
1212234370Sjasone				else {
1213234370Sjasone					if (prof_promote && (uintptr_t)cnt !=
1214234370Sjasone					    (uintptr_t)1U && usize <=
1215234370Sjasone					    SMALL_MAXCLASS) {
1216234370Sjasone						ret = imalloc(SMALL_MAXCLASS+1);
1217234370Sjasone						if (ret != NULL) {
1218234370Sjasone							arena_prof_promoted(ret,
1219234370Sjasone							    usize);
1220234370Sjasone						}
1221234370Sjasone					} else
1222234370Sjasone						ret = imalloc(size);
1223234370Sjasone				}
1224234370Sjasone			} else {
1225234370Sjasone				if (config_stats || (config_valgrind &&
1226234370Sjasone				    opt_valgrind))
1227234370Sjasone					usize = s2u(size);
1228234370Sjasone				ret = imalloc(size);
1229234370Sjasone			}
1230234370Sjasone		}
1231234370Sjasone
1232234370Sjasone		if (ret == NULL) {
1233234370Sjasone			if (config_xmalloc && opt_xmalloc) {
1234234370Sjasone				malloc_write("<jemalloc>: Error in realloc(): "
1235234370Sjasone				    "out of memory\n");
1236234370Sjasone				abort();
1237234370Sjasone			}
1238235238Sjasone			set_errno(ENOMEM);
1239234370Sjasone		}
1240234370Sjasone	}
1241234370Sjasone
1242234370Sjasonelabel_return:
1243234370Sjasone	if (config_prof && opt_prof)
1244234370Sjasone		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1245234370Sjasone	if (config_stats && ret != NULL) {
1246234370Sjasone		thread_allocated_t *ta;
1247234370Sjasone		assert(usize == isalloc(ret, config_prof));
1248234370Sjasone		ta = thread_allocated_tsd_get();
1249234370Sjasone		ta->allocated += usize;
1250234370Sjasone		ta->deallocated += old_size;
1251234370Sjasone	}
1252234370Sjasone	UTRACE(ptr, size, ret);
1253234370Sjasone	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1254234370Sjasone	return (ret);
1255234370Sjasone}
1256234370Sjasone
1257234370Sjasonevoid
1258234370Sjasoneje_free(void *ptr)
1259234370Sjasone{
1260234370Sjasone
1261234370Sjasone	UTRACE(ptr, 0, 0);
1262234370Sjasone	if (ptr != NULL) {
1263234370Sjasone		size_t usize;
1264234370Sjasone		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1265234370Sjasone
1266234370Sjasone		assert(malloc_initialized || IS_INITIALIZER);
1267234370Sjasone
1268234370Sjasone		if (config_prof && opt_prof) {
1269234370Sjasone			usize = isalloc(ptr, config_prof);
1270234370Sjasone			prof_free(ptr, usize);
1271234370Sjasone		} else if (config_stats || config_valgrind)
1272234370Sjasone			usize = isalloc(ptr, config_prof);
1273234370Sjasone		if (config_stats)
1274234370Sjasone			thread_allocated_tsd_get()->deallocated += usize;
1275234370Sjasone		if (config_valgrind && opt_valgrind)
1276234370Sjasone			rzsize = p2rz(ptr);
1277234370Sjasone		iqalloc(ptr);
1278234370Sjasone		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1279234370Sjasone	}
1280234370Sjasone}
1281234370Sjasone
1282234370Sjasone/*
1283234370Sjasone * End malloc(3)-compatible functions.
1284234370Sjasone */
1285234370Sjasone/******************************************************************************/
1286234370Sjasone/*
1287234370Sjasone * Begin non-standard override functions.
1288234370Sjasone */
1289234370Sjasone
1290234370Sjasone#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1291234370Sjasonevoid *
1292234370Sjasoneje_memalign(size_t alignment, size_t size)
1293234370Sjasone{
1294234370Sjasone	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1295234370Sjasone	imemalign(&ret, alignment, size, 1);
1296234370Sjasone	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1297234370Sjasone	return (ret);
1298234370Sjasone}
1299234370Sjasone#endif
1300234370Sjasone
1301234370Sjasone#ifdef JEMALLOC_OVERRIDE_VALLOC
1302234370Sjasonevoid *
1303234370Sjasoneje_valloc(size_t size)
1304234370Sjasone{
1305234370Sjasone	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1306234370Sjasone	imemalign(&ret, PAGE, size, 1);
1307234370Sjasone	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1308234370Sjasone	return (ret);
1309234370Sjasone}
1310234370Sjasone#endif
1311234370Sjasone
1312234370Sjasone/*
1313234370Sjasone * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1314234370Sjasone * #define je_malloc malloc
1315234370Sjasone */
1316234370Sjasone#define	malloc_is_malloc 1
1317234370Sjasone#define	is_malloc_(a) malloc_is_ ## a
1318234370Sjasone#define	is_malloc(a) is_malloc_(a)
1319234370Sjasone
1320234370Sjasone#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1321234370Sjasone/*
1322234370Sjasone * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1323234370Sjasone * to inconsistently reference libc's malloc(3)-compatible functions
1324234370Sjasone * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1325234370Sjasone *
1326234370Sjasone * These definitions interpose hooks in glibc.  The functions are actually
1327234370Sjasone * passed an extra argument for the caller return address, which will be
1328234370Sjasone * ignored.
1329234370Sjasone */
1330242844SjasoneJEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1331242844SjasoneJEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1332242844SjasoneJEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1333242844SjasoneJEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1334235238Sjasone    je_memalign;
1335234370Sjasone#endif
1336234370Sjasone
1337234370Sjasone/*
1338234370Sjasone * End non-standard override functions.
1339234370Sjasone */
1340234370Sjasone/******************************************************************************/
1341234370Sjasone/*
1342234370Sjasone * Begin non-standard functions.
1343234370Sjasone */
1344234370Sjasone
1345234370Sjasonesize_t
1346242844Sjasoneje_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1347234370Sjasone{
1348234370Sjasone	size_t ret;
1349234370Sjasone
1350234370Sjasone	assert(malloc_initialized || IS_INITIALIZER);
1351251300Sjasone	malloc_thread_init();
1352234370Sjasone
1353234370Sjasone	if (config_ivsalloc)
1354234370Sjasone		ret = ivsalloc(ptr, config_prof);
1355234370Sjasone	else
1356234370Sjasone		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1357234370Sjasone
1358234370Sjasone	return (ret);
1359234370Sjasone}
1360234370Sjasone
1361234370Sjasonevoid
1362234370Sjasoneje_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1363234370Sjasone    const char *opts)
1364234370Sjasone{
1365234370Sjasone
1366234370Sjasone	stats_print(write_cb, cbopaque, opts);
1367234370Sjasone}
1368234370Sjasone
1369234370Sjasoneint
1370234370Sjasoneje_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1371234370Sjasone    size_t newlen)
1372234370Sjasone{
1373234370Sjasone
1374234370Sjasone	if (malloc_init())
1375234370Sjasone		return (EAGAIN);
1376234370Sjasone
1377234370Sjasone	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1378234370Sjasone}
1379234370Sjasone
1380234370Sjasoneint
1381234370Sjasoneje_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1382234370Sjasone{
1383234370Sjasone
1384234370Sjasone	if (malloc_init())
1385234370Sjasone		return (EAGAIN);
1386234370Sjasone
1387234370Sjasone	return (ctl_nametomib(name, mibp, miblenp));
1388234370Sjasone}
1389234370Sjasone
1390234370Sjasoneint
1391234370Sjasoneje_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1392234370Sjasone  void *newp, size_t newlen)
1393234370Sjasone{
1394234370Sjasone
1395234370Sjasone	if (malloc_init())
1396234370Sjasone		return (EAGAIN);
1397234370Sjasone
1398234370Sjasone	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1399234370Sjasone}
1400234370Sjasone
1401234370Sjasone/*
1402234370Sjasone * End non-standard functions.
1403234370Sjasone */
1404234370Sjasone/******************************************************************************/
1405234370Sjasone/*
1406234370Sjasone * Begin experimental functions.
1407234370Sjasone */
1408234370Sjasone#ifdef JEMALLOC_EXPERIMENTAL
1409234370Sjasone
1410245868Sjasonestatic JEMALLOC_ATTR(always_inline) void *
1411242844Sjasoneiallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1412242844Sjasone    arena_t *arena)
1413234370Sjasone{
1414234370Sjasone
1415234370Sjasone	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1416234370Sjasone	    alignment)));
1417234370Sjasone
1418234370Sjasone	if (alignment != 0)
1419242844Sjasone		return (ipallocx(usize, alignment, zero, try_tcache, arena));
1420234370Sjasone	else if (zero)
1421242844Sjasone		return (icallocx(usize, try_tcache, arena));
1422234370Sjasone	else
1423242844Sjasone		return (imallocx(usize, try_tcache, arena));
1424234370Sjasone}
1425234370Sjasone
1426234370Sjasoneint
1427234370Sjasoneje_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1428234370Sjasone{
1429234370Sjasone	void *p;
1430234370Sjasone	size_t usize;
1431234370Sjasone	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1432234370Sjasone	    & (SIZE_T_MAX-1));
1433234370Sjasone	bool zero = flags & ALLOCM_ZERO;
1434242844Sjasone	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1435242844Sjasone	arena_t *arena;
1436242844Sjasone	bool try_tcache;
1437234370Sjasone
1438234370Sjasone	assert(ptr != NULL);
1439234370Sjasone	assert(size != 0);
1440234370Sjasone
1441234370Sjasone	if (malloc_init())
1442234370Sjasone		goto label_oom;
1443234370Sjasone
1444242844Sjasone	if (arena_ind != UINT_MAX) {
1445242844Sjasone		arena = arenas[arena_ind];
1446242844Sjasone		try_tcache = false;
1447242844Sjasone	} else {
1448242844Sjasone		arena = NULL;
1449242844Sjasone		try_tcache = true;
1450242844Sjasone	}
1451242844Sjasone
1452234370Sjasone	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1453234370Sjasone	if (usize == 0)
1454234370Sjasone		goto label_oom;
1455234370Sjasone
1456234370Sjasone	if (config_prof && opt_prof) {
1457235238Sjasone		prof_thr_cnt_t *cnt;
1458235238Sjasone
1459234370Sjasone		PROF_ALLOC_PREP(1, usize, cnt);
1460234370Sjasone		if (cnt == NULL)
1461234370Sjasone			goto label_oom;
1462234370Sjasone		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1463234370Sjasone		    SMALL_MAXCLASS) {
1464234370Sjasone			size_t usize_promoted = (alignment == 0) ?
1465234370Sjasone			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1466234370Sjasone			    alignment);
1467234370Sjasone			assert(usize_promoted != 0);
1468242844Sjasone			p = iallocm(usize_promoted, alignment, zero,
1469242844Sjasone			    try_tcache, arena);
1470234370Sjasone			if (p == NULL)
1471234370Sjasone				goto label_oom;
1472234370Sjasone			arena_prof_promoted(p, usize);
1473234370Sjasone		} else {
1474242844Sjasone			p = iallocm(usize, alignment, zero, try_tcache, arena);
1475234370Sjasone			if (p == NULL)
1476234370Sjasone				goto label_oom;
1477234370Sjasone		}
1478234370Sjasone		prof_malloc(p, usize, cnt);
1479234370Sjasone	} else {
1480242844Sjasone		p = iallocm(usize, alignment, zero, try_tcache, arena);
1481234370Sjasone		if (p == NULL)
1482234370Sjasone			goto label_oom;
1483234370Sjasone	}
1484234370Sjasone	if (rsize != NULL)
1485234370Sjasone		*rsize = usize;
1486234370Sjasone
1487234370Sjasone	*ptr = p;
1488234370Sjasone	if (config_stats) {
1489234370Sjasone		assert(usize == isalloc(p, config_prof));
1490234370Sjasone		thread_allocated_tsd_get()->allocated += usize;
1491234370Sjasone	}
1492234370Sjasone	UTRACE(0, size, p);
1493234370Sjasone	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1494234370Sjasone	return (ALLOCM_SUCCESS);
1495234370Sjasonelabel_oom:
1496234370Sjasone	if (config_xmalloc && opt_xmalloc) {
1497234370Sjasone		malloc_write("<jemalloc>: Error in allocm(): "
1498234370Sjasone		    "out of memory\n");
1499234370Sjasone		abort();
1500234370Sjasone	}
1501234370Sjasone	*ptr = NULL;
1502234370Sjasone	UTRACE(0, size, 0);
1503234370Sjasone	return (ALLOCM_ERR_OOM);
1504234370Sjasone}
1505234370Sjasone
1506234370Sjasoneint
1507234370Sjasoneje_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1508234370Sjasone{
1509234370Sjasone	void *p, *q;
1510234370Sjasone	size_t usize;
1511234370Sjasone	size_t old_size;
1512234370Sjasone	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1513234370Sjasone	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1514234370Sjasone	    & (SIZE_T_MAX-1));
1515234370Sjasone	bool zero = flags & ALLOCM_ZERO;
1516234370Sjasone	bool no_move = flags & ALLOCM_NO_MOVE;
1517242844Sjasone	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1518242844Sjasone	bool try_tcache_alloc, try_tcache_dalloc;
1519242844Sjasone	arena_t *arena;
1520234370Sjasone
1521234370Sjasone	assert(ptr != NULL);
1522234370Sjasone	assert(*ptr != NULL);
1523234370Sjasone	assert(size != 0);
1524234370Sjasone	assert(SIZE_T_MAX - size >= extra);
1525234370Sjasone	assert(malloc_initialized || IS_INITIALIZER);
1526251300Sjasone	malloc_thread_init();
1527234370Sjasone
1528242844Sjasone	if (arena_ind != UINT_MAX) {
1529242844Sjasone		arena_chunk_t *chunk;
1530242844Sjasone		try_tcache_alloc = true;
1531242844Sjasone		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1532242844Sjasone		try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1533242844Sjasone		    arenas[arena_ind]);
1534242844Sjasone		arena = arenas[arena_ind];
1535242844Sjasone	} else {
1536242844Sjasone		try_tcache_alloc = true;
1537242844Sjasone		try_tcache_dalloc = true;
1538242844Sjasone		arena = NULL;
1539242844Sjasone	}
1540242844Sjasone
1541234370Sjasone	p = *ptr;
1542234370Sjasone	if (config_prof && opt_prof) {
1543235238Sjasone		prof_thr_cnt_t *cnt;
1544235238Sjasone
1545234370Sjasone		/*
1546234370Sjasone		 * usize isn't knowable before iralloc() returns when extra is
1547234370Sjasone		 * non-zero.  Therefore, compute its maximum possible value and
1548234370Sjasone		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1549234370Sjasone		 * backtrace.  prof_realloc() will use the actual usize to
1550234370Sjasone		 * decide whether to sample.
1551234370Sjasone		 */
1552234370Sjasone		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1553234370Sjasone		    sa2u(size+extra, alignment);
1554234370Sjasone		prof_ctx_t *old_ctx = prof_ctx_get(p);
1555234370Sjasone		old_size = isalloc(p, true);
1556234370Sjasone		if (config_valgrind && opt_valgrind)
1557234370Sjasone			old_rzsize = p2rz(p);
1558234370Sjasone		PROF_ALLOC_PREP(1, max_usize, cnt);
1559234370Sjasone		if (cnt == NULL)
1560234370Sjasone			goto label_oom;
1561234370Sjasone		/*
1562234370Sjasone		 * Use minimum usize to determine whether promotion may happen.
1563234370Sjasone		 */
1564234370Sjasone		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1565234370Sjasone		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1566234370Sjasone		    <= SMALL_MAXCLASS) {
1567242844Sjasone			q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1568234370Sjasone			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1569242844Sjasone			    alignment, zero, no_move, try_tcache_alloc,
1570242844Sjasone			    try_tcache_dalloc, arena);
1571234370Sjasone			if (q == NULL)
1572234370Sjasone				goto label_err;
1573234370Sjasone			if (max_usize < PAGE) {
1574234370Sjasone				usize = max_usize;
1575234370Sjasone				arena_prof_promoted(q, usize);
1576234370Sjasone			} else
1577234370Sjasone				usize = isalloc(q, config_prof);
1578234370Sjasone		} else {
1579242844Sjasone			q = irallocx(p, size, extra, alignment, zero, no_move,
1580242844Sjasone			    try_tcache_alloc, try_tcache_dalloc, arena);
1581234370Sjasone			if (q == NULL)
1582234370Sjasone				goto label_err;
1583234370Sjasone			usize = isalloc(q, config_prof);
1584234370Sjasone		}
1585234370Sjasone		prof_realloc(q, usize, cnt, old_size, old_ctx);
1586234370Sjasone		if (rsize != NULL)
1587234370Sjasone			*rsize = usize;
1588234370Sjasone	} else {
1589234370Sjasone		if (config_stats) {
1590234370Sjasone			old_size = isalloc(p, false);
1591234370Sjasone			if (config_valgrind && opt_valgrind)
1592234370Sjasone				old_rzsize = u2rz(old_size);
1593234370Sjasone		} else if (config_valgrind && opt_valgrind) {
1594234370Sjasone			old_size = isalloc(p, false);
1595234370Sjasone			old_rzsize = u2rz(old_size);
1596234370Sjasone		}
1597242844Sjasone		q = irallocx(p, size, extra, alignment, zero, no_move,
1598242844Sjasone		    try_tcache_alloc, try_tcache_dalloc, arena);
1599234370Sjasone		if (q == NULL)
1600234370Sjasone			goto label_err;
1601234370Sjasone		if (config_stats)
1602234370Sjasone			usize = isalloc(q, config_prof);
1603234370Sjasone		if (rsize != NULL) {
1604234370Sjasone			if (config_stats == false)
1605234370Sjasone				usize = isalloc(q, config_prof);
1606234370Sjasone			*rsize = usize;
1607234370Sjasone		}
1608234370Sjasone	}
1609234370Sjasone
1610234370Sjasone	*ptr = q;
1611234370Sjasone	if (config_stats) {
1612234370Sjasone		thread_allocated_t *ta;
1613234370Sjasone		ta = thread_allocated_tsd_get();
1614234370Sjasone		ta->allocated += usize;
1615234370Sjasone		ta->deallocated += old_size;
1616234370Sjasone	}
1617234370Sjasone	UTRACE(p, size, q);
1618234370Sjasone	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1619234370Sjasone	return (ALLOCM_SUCCESS);
1620234370Sjasonelabel_err:
1621234370Sjasone	if (no_move) {
1622234370Sjasone		UTRACE(p, size, q);
1623234370Sjasone		return (ALLOCM_ERR_NOT_MOVED);
1624234370Sjasone	}
1625234370Sjasonelabel_oom:
1626234370Sjasone	if (config_xmalloc && opt_xmalloc) {
1627234370Sjasone		malloc_write("<jemalloc>: Error in rallocm(): "
1628234370Sjasone		    "out of memory\n");
1629234370Sjasone		abort();
1630234370Sjasone	}
1631234370Sjasone	UTRACE(p, size, 0);
1632234370Sjasone	return (ALLOCM_ERR_OOM);
1633234370Sjasone}
1634234370Sjasone
1635234370Sjasoneint
1636234370Sjasoneje_sallocm(const void *ptr, size_t *rsize, int flags)
1637234370Sjasone{
1638234370Sjasone	size_t sz;
1639234370Sjasone
1640234370Sjasone	assert(malloc_initialized || IS_INITIALIZER);
1641251300Sjasone	malloc_thread_init();
1642234370Sjasone
1643234370Sjasone	if (config_ivsalloc)
1644234370Sjasone		sz = ivsalloc(ptr, config_prof);
1645234370Sjasone	else {
1646234370Sjasone		assert(ptr != NULL);
1647234370Sjasone		sz = isalloc(ptr, config_prof);
1648234370Sjasone	}
1649234370Sjasone	assert(rsize != NULL);
1650234370Sjasone	*rsize = sz;
1651234370Sjasone
1652234370Sjasone	return (ALLOCM_SUCCESS);
1653234370Sjasone}
1654234370Sjasone
1655234370Sjasoneint
1656234370Sjasoneje_dallocm(void *ptr, int flags)
1657234370Sjasone{
1658234370Sjasone	size_t usize;
1659234370Sjasone	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1660242844Sjasone	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1661242844Sjasone	bool try_tcache;
1662234370Sjasone
1663234370Sjasone	assert(ptr != NULL);
1664234370Sjasone	assert(malloc_initialized || IS_INITIALIZER);
1665234370Sjasone
1666242844Sjasone	if (arena_ind != UINT_MAX) {
1667242844Sjasone		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1668242844Sjasone		try_tcache = (chunk == ptr || chunk->arena !=
1669242844Sjasone		    arenas[arena_ind]);
1670242844Sjasone	} else
1671242844Sjasone		try_tcache = true;
1672242844Sjasone
1673234370Sjasone	UTRACE(ptr, 0, 0);
1674234370Sjasone	if (config_stats || config_valgrind)
1675234370Sjasone		usize = isalloc(ptr, config_prof);
1676234370Sjasone	if (config_prof && opt_prof) {
1677234370Sjasone		if (config_stats == false && config_valgrind == false)
1678234370Sjasone			usize = isalloc(ptr, config_prof);
1679234370Sjasone		prof_free(ptr, usize);
1680234370Sjasone	}
1681234370Sjasone	if (config_stats)
1682234370Sjasone		thread_allocated_tsd_get()->deallocated += usize;
1683234370Sjasone	if (config_valgrind && opt_valgrind)
1684234370Sjasone		rzsize = p2rz(ptr);
1685242844Sjasone	iqallocx(ptr, try_tcache);
1686234370Sjasone	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1687234370Sjasone
1688234370Sjasone	return (ALLOCM_SUCCESS);
1689234370Sjasone}
1690234370Sjasone
1691234370Sjasoneint
1692234370Sjasoneje_nallocm(size_t *rsize, size_t size, int flags)
1693234370Sjasone{
1694234370Sjasone	size_t usize;
1695234370Sjasone	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1696234370Sjasone	    & (SIZE_T_MAX-1));
1697234370Sjasone
1698234370Sjasone	assert(size != 0);
1699234370Sjasone
1700234370Sjasone	if (malloc_init())
1701234370Sjasone		return (ALLOCM_ERR_OOM);
1702234370Sjasone
1703234370Sjasone	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1704234370Sjasone	if (usize == 0)
1705234370Sjasone		return (ALLOCM_ERR_OOM);
1706234370Sjasone
1707234370Sjasone	if (rsize != NULL)
1708234370Sjasone		*rsize = usize;
1709234370Sjasone	return (ALLOCM_SUCCESS);
1710234370Sjasone}
1711234370Sjasone
1712234370Sjasone#endif
1713234370Sjasone/*
1714234370Sjasone * End experimental functions.
1715234370Sjasone */
1716234370Sjasone/******************************************************************************/
1717234370Sjasone/*
1718234370Sjasone * The following functions are used by threading libraries for protection of
1719234370Sjasone * malloc during fork().
1720234370Sjasone */
1721234370Sjasone
1722242844Sjasone/*
1723242844Sjasone * If an application creates a thread before doing any allocation in the main
1724242844Sjasone * thread, then calls fork(2) in the main thread followed by memory allocation
1725242844Sjasone * in the child process, a race can occur that results in deadlock within the
1726242844Sjasone * child: the main thread may have forked while the created thread had
1727242844Sjasone * partially initialized the allocator.  Ordinarily jemalloc prevents
1728242844Sjasone * fork/malloc races via the following functions it registers during
1729242844Sjasone * initialization using pthread_atfork(), but of course that does no good if
1730242844Sjasone * the allocator isn't fully initialized at fork time.  The following library
1731242844Sjasone * constructor is a partial solution to this problem.  It may still possible to
1732242844Sjasone * trigger the deadlock described above, but doing so would involve forking via
1733242844Sjasone * a library constructor that runs before jemalloc's runs.
1734242844Sjasone */
1735242844SjasoneJEMALLOC_ATTR(constructor)
1736242844Sjasonestatic void
1737242844Sjasonejemalloc_constructor(void)
1738242844Sjasone{
1739242844Sjasone
1740242844Sjasone	malloc_init();
1741242844Sjasone}
1742242844Sjasone
1743234370Sjasone#ifndef JEMALLOC_MUTEX_INIT_CB
1744234370Sjasonevoid
1745234370Sjasonejemalloc_prefork(void)
1746234370Sjasone#else
1747235238SjasoneJEMALLOC_EXPORT void
1748234370Sjasone_malloc_prefork(void)
1749234370Sjasone#endif
1750234370Sjasone{
1751234370Sjasone	unsigned i;
1752234370Sjasone
1753235322Sjasone#ifdef JEMALLOC_MUTEX_INIT_CB
1754235322Sjasone	if (malloc_initialized == false)
1755235322Sjasone		return;
1756235322Sjasone#endif
1757235322Sjasone	assert(malloc_initialized);
1758235322Sjasone
1759234370Sjasone	/* Acquire all mutexes in a safe order. */
1760242844Sjasone	ctl_prefork();
1761251300Sjasone	prof_prefork();
1762234370Sjasone	malloc_mutex_prefork(&arenas_lock);
1763242844Sjasone	for (i = 0; i < narenas_total; i++) {
1764234370Sjasone		if (arenas[i] != NULL)
1765234370Sjasone			arena_prefork(arenas[i]);
1766234370Sjasone	}
1767242844Sjasone	chunk_prefork();
1768234370Sjasone	base_prefork();
1769234370Sjasone	huge_prefork();
1770234370Sjasone}
1771234370Sjasone
1772234370Sjasone#ifndef JEMALLOC_MUTEX_INIT_CB
1773234370Sjasonevoid
1774234370Sjasonejemalloc_postfork_parent(void)
1775234370Sjasone#else
1776235238SjasoneJEMALLOC_EXPORT void
1777234370Sjasone_malloc_postfork(void)
1778234370Sjasone#endif
1779234370Sjasone{
1780234370Sjasone	unsigned i;
1781234370Sjasone
1782235322Sjasone#ifdef JEMALLOC_MUTEX_INIT_CB
1783235322Sjasone	if (malloc_initialized == false)
1784235322Sjasone		return;
1785235322Sjasone#endif
1786235322Sjasone	assert(malloc_initialized);
1787235322Sjasone
1788234370Sjasone	/* Release all mutexes, now that fork() has completed. */
1789234370Sjasone	huge_postfork_parent();
1790234370Sjasone	base_postfork_parent();
1791242844Sjasone	chunk_postfork_parent();
1792242844Sjasone	for (i = 0; i < narenas_total; i++) {
1793234370Sjasone		if (arenas[i] != NULL)
1794234370Sjasone			arena_postfork_parent(arenas[i]);
1795234370Sjasone	}
1796234370Sjasone	malloc_mutex_postfork_parent(&arenas_lock);
1797251300Sjasone	prof_postfork_parent();
1798242844Sjasone	ctl_postfork_parent();
1799234370Sjasone}
1800234370Sjasone
1801234370Sjasonevoid
1802234370Sjasonejemalloc_postfork_child(void)
1803234370Sjasone{
1804234370Sjasone	unsigned i;
1805234370Sjasone
1806235322Sjasone	assert(malloc_initialized);
1807235322Sjasone
1808234370Sjasone	/* Release all mutexes, now that fork() has completed. */
1809234370Sjasone	huge_postfork_child();
1810234370Sjasone	base_postfork_child();
1811242844Sjasone	chunk_postfork_child();
1812242844Sjasone	for (i = 0; i < narenas_total; i++) {
1813234370Sjasone		if (arenas[i] != NULL)
1814234370Sjasone			arena_postfork_child(arenas[i]);
1815234370Sjasone	}
1816234370Sjasone	malloc_mutex_postfork_child(&arenas_lock);
1817251300Sjasone	prof_postfork_child();
1818242844Sjasone	ctl_postfork_child();
1819234370Sjasone}
1820234370Sjasone
1821277317Skibvoid
1822277317Skib_malloc_first_thread(void)
1823277317Skib{
1824277317Skib
1825277317Skib	(void)malloc_mutex_first_thread();
1826277317Skib}
1827277317Skib
1828234370Sjasone/******************************************************************************/
1829234370Sjasone/*
1830234370Sjasone * The following functions are used for TLS allocation/deallocation in static
1831234370Sjasone * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1832234370Sjasone * is that these avoid accessing TLS variables.
1833234370Sjasone */
1834234370Sjasone
1835234370Sjasonestatic void *
1836234370Sjasonea0alloc(size_t size, bool zero)
1837234370Sjasone{
1838234370Sjasone
1839234370Sjasone	if (malloc_init())
1840234370Sjasone		return (NULL);
1841234370Sjasone
1842234370Sjasone	if (size == 0)
1843234370Sjasone		size = 1;
1844234370Sjasone
1845234370Sjasone	if (size <= arena_maxclass)
1846234370Sjasone		return (arena_malloc(arenas[0], size, zero, false));
1847234370Sjasone	else
1848234370Sjasone		return (huge_malloc(size, zero));
1849234370Sjasone}
1850234370Sjasone
1851234370Sjasonevoid *
1852234370Sjasonea0malloc(size_t size)
1853234370Sjasone{
1854234370Sjasone
1855234370Sjasone	return (a0alloc(size, false));
1856234370Sjasone}
1857234370Sjasone
1858234370Sjasonevoid *
1859234370Sjasonea0calloc(size_t num, size_t size)
1860234370Sjasone{
1861234370Sjasone
1862234370Sjasone	return (a0alloc(num * size, true));
1863234370Sjasone}
1864234370Sjasone
1865234370Sjasonevoid
1866234370Sjasonea0free(void *ptr)
1867234370Sjasone{
1868234370Sjasone	arena_chunk_t *chunk;
1869234370Sjasone
1870234370Sjasone	if (ptr == NULL)
1871234370Sjasone		return;
1872234370Sjasone
1873234370Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1874234370Sjasone	if (chunk != ptr)
1875234370Sjasone		arena_dalloc(chunk->arena, chunk, ptr, false);
1876234370Sjasone	else
1877234370Sjasone		huge_dalloc(ptr, true);
1878234370Sjasone}
1879234370Sjasone
1880234370Sjasone/******************************************************************************/
1881