1234402Sjasone#ifndef JEMALLOC_INTERNAL_H
2234402Sjasone#define JEMALLOC_INTERNAL_H
3234370Sjasone#include "libc_private.h"
4234370Sjasone#include "namespace.h"
5234370Sjasone
6235238Sjasone#include <math.h>
7235238Sjasone#ifdef _WIN32
8235238Sjasone#  include <windows.h>
9235238Sjasone#  define ENOENT ERROR_PATH_NOT_FOUND
10235238Sjasone#  define EINVAL ERROR_BAD_ARGUMENTS
11235238Sjasone#  define EAGAIN ERROR_OUTOFMEMORY
12235238Sjasone#  define EPERM  ERROR_WRITE_FAULT
13235238Sjasone#  define EFAULT ERROR_INVALID_ADDRESS
14235238Sjasone#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
15235238Sjasone#  undef ERANGE
16235238Sjasone#  define ERANGE ERROR_INVALID_DATA
17235238Sjasone#else
18235238Sjasone#  include <sys/param.h>
19235238Sjasone#  include <sys/mman.h>
20235238Sjasone#  include <sys/syscall.h>
21235238Sjasone#  if !defined(SYS_write) && defined(__NR_write)
22235238Sjasone#    define SYS_write __NR_write
23235238Sjasone#  endif
24235238Sjasone#  include <sys/uio.h>
25235238Sjasone#  include <pthread.h>
26235238Sjasone#  include <errno.h>
27234370Sjasone#endif
28234370Sjasone#include <sys/types.h>
29234370Sjasone
30234370Sjasone#include <limits.h>
31234370Sjasone#ifndef SIZE_T_MAX
32234370Sjasone#  define SIZE_T_MAX	SIZE_MAX
33234370Sjasone#endif
34234370Sjasone#include <stdarg.h>
35234370Sjasone#include <stdbool.h>
36234370Sjasone#include <stdio.h>
37234370Sjasone#include <stdlib.h>
38234370Sjasone#include <stdint.h>
39234370Sjasone#include <stddef.h>
40234370Sjasone#ifndef offsetof
41234370Sjasone#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
42234370Sjasone#endif
43234370Sjasone#include <inttypes.h>
44234370Sjasone#include <string.h>
45234370Sjasone#include <strings.h>
46234370Sjasone#include <ctype.h>
47235238Sjasone#ifdef _MSC_VER
48235238Sjasone#  include <io.h>
49235238Sjasonetypedef intptr_t ssize_t;
50235238Sjasone#  define PATH_MAX 1024
51235238Sjasone#  define STDERR_FILENO 2
52235238Sjasone#  define __func__ __FUNCTION__
53235238Sjasone/* Disable warnings about deprecated system functions */
54235238Sjasone#  pragma warning(disable: 4996)
55235238Sjasone#else
56235238Sjasone#  include <unistd.h>
57235238Sjasone#endif
58234370Sjasone#include <fcntl.h>
59234370Sjasone
60234370Sjasone#include "un-namespace.h"
61234370Sjasone#include "libc_private.h"
62234370Sjasone
63234370Sjasone#define	JEMALLOC_NO_DEMANGLE
64234370Sjasone#include "../jemalloc.h"
65234370Sjasone
66234370Sjasone#ifdef JEMALLOC_UTRACE
67234370Sjasone#include <sys/ktrace.h>
68234370Sjasone#endif
69234370Sjasone
70234370Sjasone#ifdef JEMALLOC_VALGRIND
71234370Sjasone#include <valgrind/valgrind.h>
72234370Sjasone#include <valgrind/memcheck.h>
73234370Sjasone#endif
74234370Sjasone
75234370Sjasone#include "jemalloc/internal/private_namespace.h"
76234370Sjasone
77234370Sjasone#ifdef JEMALLOC_CC_SILENCE
78234370Sjasone#define	UNUSED JEMALLOC_ATTR(unused)
79234370Sjasone#else
80234370Sjasone#define	UNUSED
81234370Sjasone#endif
82234370Sjasone
83234370Sjasonestatic const bool config_debug =
84234370Sjasone#ifdef JEMALLOC_DEBUG
85234370Sjasone    true
86234370Sjasone#else
87234370Sjasone    false
88234370Sjasone#endif
89234370Sjasone    ;
90234370Sjasonestatic const bool config_dss =
91234370Sjasone#ifdef JEMALLOC_DSS
92234370Sjasone    true
93234370Sjasone#else
94234370Sjasone    false
95234370Sjasone#endif
96234370Sjasone    ;
97234370Sjasonestatic const bool config_fill =
98234370Sjasone#ifdef JEMALLOC_FILL
99234370Sjasone    true
100234370Sjasone#else
101234370Sjasone    false
102234370Sjasone#endif
103234370Sjasone    ;
104235385Sjasonestatic const bool config_lazy_lock = true;
105234370Sjasonestatic const bool config_prof =
106234370Sjasone#ifdef JEMALLOC_PROF
107234370Sjasone    true
108234370Sjasone#else
109234370Sjasone    false
110234370Sjasone#endif
111234370Sjasone    ;
112234370Sjasonestatic const bool config_prof_libgcc =
113234370Sjasone#ifdef JEMALLOC_PROF_LIBGCC
114234370Sjasone    true
115234370Sjasone#else
116234370Sjasone    false
117234370Sjasone#endif
118234370Sjasone    ;
119234370Sjasonestatic const bool config_prof_libunwind =
120234370Sjasone#ifdef JEMALLOC_PROF_LIBUNWIND
121234370Sjasone    true
122234370Sjasone#else
123234370Sjasone    false
124234370Sjasone#endif
125234370Sjasone    ;
126235238Sjasonestatic const bool config_mremap =
127235238Sjasone#ifdef JEMALLOC_MREMAP
128235238Sjasone    true
129235238Sjasone#else
130235238Sjasone    false
131235238Sjasone#endif
132235238Sjasone    ;
133234370Sjasonestatic const bool config_munmap =
134234370Sjasone#ifdef JEMALLOC_MUNMAP
135234370Sjasone    true
136234370Sjasone#else
137234370Sjasone    false
138234370Sjasone#endif
139234370Sjasone    ;
140234370Sjasonestatic const bool config_stats =
141234370Sjasone#ifdef JEMALLOC_STATS
142234370Sjasone    true
143234370Sjasone#else
144234370Sjasone    false
145234370Sjasone#endif
146234370Sjasone    ;
147234370Sjasonestatic const bool config_tcache =
148234370Sjasone#ifdef JEMALLOC_TCACHE
149234370Sjasone    true
150234370Sjasone#else
151234370Sjasone    false
152234370Sjasone#endif
153234370Sjasone    ;
154234370Sjasonestatic const bool config_tls =
155234370Sjasone#ifdef JEMALLOC_TLS
156234370Sjasone    true
157234370Sjasone#else
158234370Sjasone    false
159234370Sjasone#endif
160234370Sjasone    ;
161234370Sjasonestatic const bool config_utrace =
162234370Sjasone#ifdef JEMALLOC_UTRACE
163234370Sjasone    true
164234370Sjasone#else
165234370Sjasone    false
166234370Sjasone#endif
167234370Sjasone    ;
168234370Sjasonestatic const bool config_valgrind =
169234370Sjasone#ifdef JEMALLOC_VALGRIND
170234370Sjasone    true
171234370Sjasone#else
172234370Sjasone    false
173234370Sjasone#endif
174234370Sjasone    ;
175234370Sjasonestatic const bool config_xmalloc =
176234370Sjasone#ifdef JEMALLOC_XMALLOC
177234370Sjasone    true
178234370Sjasone#else
179234370Sjasone    false
180234370Sjasone#endif
181234370Sjasone    ;
182234370Sjasonestatic const bool config_ivsalloc =
183234370Sjasone#ifdef JEMALLOC_IVSALLOC
184234370Sjasone    true
185234370Sjasone#else
186234370Sjasone    false
187234370Sjasone#endif
188234370Sjasone    ;
189234370Sjasone
190234402Sjasone#ifdef JEMALLOC_ATOMIC9
191234402Sjasone#include <machine/atomic.h>
192234402Sjasone#endif
193234402Sjasone
194234370Sjasone#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
195234370Sjasone#include <libkern/OSAtomic.h>
196234370Sjasone#endif
197234370Sjasone
198234370Sjasone#ifdef JEMALLOC_ZONE
199234370Sjasone#include <mach/mach_error.h>
200234370Sjasone#include <mach/mach_init.h>
201234370Sjasone#include <mach/vm_map.h>
202234370Sjasone#include <malloc/malloc.h>
203234370Sjasone#endif
204234370Sjasone
205234370Sjasone#define	RB_COMPACT
206234370Sjasone#include "jemalloc/internal/rb.h"
207234370Sjasone#include "jemalloc/internal/qr.h"
208234370Sjasone#include "jemalloc/internal/ql.h"
209234370Sjasone
210234370Sjasone/*
211234370Sjasone * jemalloc can conceptually be broken into components (arena, tcache, etc.),
212234370Sjasone * but there are circular dependencies that cannot be broken without
213234370Sjasone * substantial performance degradation.  In order to reduce the effect on
214234370Sjasone * visual code flow, read the header files in multiple passes, with one of the
215234370Sjasone * following cpp variables defined during each pass:
216234370Sjasone *
217234370Sjasone *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
218234370Sjasone *                        types.
219234370Sjasone *   JEMALLOC_H_STRUCTS : Data structures.
220234370Sjasone *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
221234370Sjasone *   JEMALLOC_H_INLINES : Inline functions.
222234370Sjasone */
223234370Sjasone/******************************************************************************/
224234370Sjasone#define JEMALLOC_H_TYPES
225234370Sjasone
226234370Sjasone#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
227234370Sjasone
228234370Sjasone#define	ZU(z)	((size_t)z)
229245868Sjasone#define	QU(q)	((uint64_t)q)
230234370Sjasone
231234370Sjasone#ifndef __DECONST
232234370Sjasone#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
233234370Sjasone#endif
234234370Sjasone
235234370Sjasone#ifdef JEMALLOC_DEBUG
236234370Sjasone   /* Disable inlining to make debugging easier. */
237245868Sjasone#  define JEMALLOC_ALWAYS_INLINE
238234370Sjasone#  define JEMALLOC_INLINE
239234370Sjasone#  define inline
240234370Sjasone#else
241234370Sjasone#  define JEMALLOC_ENABLE_INLINE
242245868Sjasone#  ifdef JEMALLOC_HAVE_ATTR
243245868Sjasone#    define JEMALLOC_ALWAYS_INLINE \
244245868Sjasone	 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
245245868Sjasone#  else
246245868Sjasone#    define JEMALLOC_ALWAYS_INLINE static inline
247245868Sjasone#  endif
248234370Sjasone#  define JEMALLOC_INLINE static inline
249235238Sjasone#  ifdef _MSC_VER
250235238Sjasone#    define inline _inline
251235238Sjasone#  endif
252234370Sjasone#endif
253234370Sjasone
254234370Sjasone/* Smallest size class to support. */
255234370Sjasone#define	LG_TINY_MIN		3
256234370Sjasone#define	TINY_MIN		(1U << LG_TINY_MIN)
257234370Sjasone
258234370Sjasone/*
259234370Sjasone * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
260234370Sjasone * classes).
261234370Sjasone */
262234370Sjasone#ifndef LG_QUANTUM
263235238Sjasone#  if (defined(__i386__) || defined(_M_IX86))
264234370Sjasone#    define LG_QUANTUM		4
265234370Sjasone#  endif
266234370Sjasone#  ifdef __ia64__
267234370Sjasone#    define LG_QUANTUM		4
268234370Sjasone#  endif
269234370Sjasone#  ifdef __alpha__
270234370Sjasone#    define LG_QUANTUM		4
271234370Sjasone#  endif
272234370Sjasone#  ifdef __sparc64__
273234370Sjasone#    define LG_QUANTUM		4
274234370Sjasone#  endif
275235238Sjasone#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
276234370Sjasone#    define LG_QUANTUM		4
277234370Sjasone#  endif
278234370Sjasone#  ifdef __arm__
279234370Sjasone#    define LG_QUANTUM		3
280234370Sjasone#  endif
281251300Sjasone#  ifdef __aarch64__
282251300Sjasone#    define LG_QUANTUM		4
283251300Sjasone#  endif
284242844Sjasone#  ifdef __hppa__
285242844Sjasone#    define LG_QUANTUM		4
286242844Sjasone#  endif
287234370Sjasone#  ifdef __mips__
288234370Sjasone#    define LG_QUANTUM		3
289234370Sjasone#  endif
290234370Sjasone#  ifdef __powerpc__
291234370Sjasone#    define LG_QUANTUM		4
292234370Sjasone#  endif
293251300Sjasone#  ifdef __s390__
294234370Sjasone#    define LG_QUANTUM		4
295234370Sjasone#  endif
296234370Sjasone#  ifdef __SH4__
297234370Sjasone#    define LG_QUANTUM		4
298234370Sjasone#  endif
299234370Sjasone#  ifdef __tile__
300234370Sjasone#    define LG_QUANTUM		4
301234370Sjasone#  endif
302234370Sjasone#  ifndef LG_QUANTUM
303234370Sjasone#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
304234370Sjasone#  endif
305234370Sjasone#endif
306234370Sjasone
307234370Sjasone#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
308234370Sjasone#define	QUANTUM_MASK		(QUANTUM - 1)
309234370Sjasone
310234370Sjasone/* Return the smallest quantum multiple that is >= a. */
311234370Sjasone#define	QUANTUM_CEILING(a)						\
312234370Sjasone	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
313234370Sjasone
314234370Sjasone#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
315234370Sjasone#define	LONG_MASK		(LONG - 1)
316234370Sjasone
317234370Sjasone/* Return the smallest long multiple that is >= a. */
318234370Sjasone#define	LONG_CEILING(a)							\
319234370Sjasone	(((a) + LONG_MASK) & ~LONG_MASK)
320234370Sjasone
321234370Sjasone#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
322234370Sjasone#define	PTR_MASK		(SIZEOF_PTR - 1)
323234370Sjasone
324234370Sjasone/* Return the smallest (void *) multiple that is >= a. */
325234370Sjasone#define	PTR_CEILING(a)							\
326234370Sjasone	(((a) + PTR_MASK) & ~PTR_MASK)
327234370Sjasone
328234370Sjasone/*
329234370Sjasone * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
330234370Sjasone * In addition, this controls the spacing of cacheline-spaced size classes.
331235238Sjasone *
332235238Sjasone * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
333235238Sjasone * only handle raw constants.
334234370Sjasone */
335234370Sjasone#define	LG_CACHELINE		6
336235238Sjasone#define	CACHELINE		64
337234370Sjasone#define	CACHELINE_MASK		(CACHELINE - 1)
338234370Sjasone
339234370Sjasone/* Return the smallest cacheline multiple that is >= s. */
340234370Sjasone#define	CACHELINE_CEILING(s)						\
341234370Sjasone	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
342234370Sjasone
343234370Sjasone/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
344234370Sjasone#ifdef PAGE_MASK
345234370Sjasone#  undef PAGE_MASK
346234370Sjasone#endif
347234370Sjasone#define	LG_PAGE		STATIC_PAGE_SHIFT
348234370Sjasone#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
349234370Sjasone#define	PAGE_MASK	((size_t)(PAGE - 1))
350234370Sjasone
351234370Sjasone/* Return the smallest pagesize multiple that is >= s. */
352234370Sjasone#define	PAGE_CEILING(s)							\
353234370Sjasone	(((s) + PAGE_MASK) & ~PAGE_MASK)
354234370Sjasone
355234370Sjasone/* Return the nearest aligned address at or below a. */
356234370Sjasone#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
357234370Sjasone	((void *)((uintptr_t)(a) & (-(alignment))))
358234370Sjasone
359234370Sjasone/* Return the offset between a and the nearest aligned address at or below a. */
360234370Sjasone#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
361234370Sjasone	((size_t)((uintptr_t)(a) & (alignment - 1)))
362234370Sjasone
363234370Sjasone/* Return the smallest alignment multiple that is >= s. */
364234370Sjasone#define	ALIGNMENT_CEILING(s, alignment)					\
365234370Sjasone	(((s) + (alignment - 1)) & (-(alignment)))
366234370Sjasone
367235238Sjasone/* Declare a variable length array */
368235238Sjasone#if __STDC_VERSION__ < 199901L
369235238Sjasone#  ifdef _MSC_VER
370235238Sjasone#    include <malloc.h>
371235238Sjasone#    define alloca _alloca
372235238Sjasone#  else
373245868Sjasone#    ifdef JEMALLOC_HAS_ALLOCA_H
374245868Sjasone#      include <alloca.h>
375245868Sjasone#    else
376245868Sjasone#      include <stdlib.h>
377245868Sjasone#    endif
378235238Sjasone#  endif
379235238Sjasone#  define VARIABLE_ARRAY(type, name, count) \
380235238Sjasone	type *name = alloca(sizeof(type) * count)
381235238Sjasone#else
382235238Sjasone#  define VARIABLE_ARRAY(type, name, count) type name[count]
383235238Sjasone#endif
384235238Sjasone
385234370Sjasone#ifdef JEMALLOC_VALGRIND
386234370Sjasone/*
387234370Sjasone * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
388234370Sjasone * so that when Valgrind reports errors, there are no extra stack frames
389234370Sjasone * in the backtraces.
390234370Sjasone *
391234370Sjasone * The size that is reported to valgrind must be consistent through a chain of
392234370Sjasone * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
393234370Sjasone * jemalloc, so it is critical that all callers of these macros provide usize
394234370Sjasone * rather than request size.  As a result, buffer overflow detection is
395234370Sjasone * technically weakened for the standard API, though it is generally accepted
396234370Sjasone * practice to consider any extra bytes reported by malloc_usable_size() as
397234370Sjasone * usable space.
398234370Sjasone */
399234370Sjasone#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
400234370Sjasone	if (config_valgrind && opt_valgrind && cond)			\
401234370Sjasone		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
402234370Sjasone} while (0)
403234370Sjasone#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
404234370Sjasone    old_rzsize, zero)  do {						\
405234370Sjasone	if (config_valgrind && opt_valgrind) {				\
406234370Sjasone		size_t rzsize = p2rz(ptr);				\
407234370Sjasone									\
408234370Sjasone		if (ptr == old_ptr) {					\
409234370Sjasone			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
410234370Sjasone			    usize, rzsize);				\
411234370Sjasone			if (zero && old_usize < usize) {		\
412234370Sjasone				VALGRIND_MAKE_MEM_DEFINED(		\
413234370Sjasone				    (void *)((uintptr_t)ptr +		\
414234370Sjasone				    old_usize), usize - old_usize);	\
415234370Sjasone			}						\
416234370Sjasone		} else {						\
417234370Sjasone			if (old_ptr != NULL) {				\
418234370Sjasone				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
419234370Sjasone				    old_rzsize);			\
420234370Sjasone			}						\
421234370Sjasone			if (ptr != NULL) {				\
422234370Sjasone				size_t copy_size = (old_usize < usize)	\
423234370Sjasone				    ?  old_usize : usize;		\
424234370Sjasone				size_t tail_size = usize - copy_size;	\
425234370Sjasone				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
426234370Sjasone				    rzsize, false);			\
427234370Sjasone				if (copy_size > 0) {			\
428234370Sjasone					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
429234370Sjasone					    copy_size);			\
430234370Sjasone				}					\
431234370Sjasone				if (zero && tail_size > 0) {		\
432234370Sjasone					VALGRIND_MAKE_MEM_DEFINED(	\
433234370Sjasone					    (void *)((uintptr_t)ptr +	\
434234370Sjasone					    copy_size), tail_size);	\
435234370Sjasone				}					\
436234370Sjasone			}						\
437234370Sjasone		}							\
438234370Sjasone	}								\
439234370Sjasone} while (0)
440234370Sjasone#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
441234370Sjasone	if (config_valgrind && opt_valgrind)				\
442234370Sjasone		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
443234370Sjasone} while (0)
444234370Sjasone#else
445242844Sjasone#define	RUNNING_ON_VALGRIND	((unsigned)0)
446251300Sjasone#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
447251300Sjasone    do {} while (0)
448251300Sjasone#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
449251300Sjasone    do {} while (0)
450251300Sjasone#define	VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
451251300Sjasone#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
452251300Sjasone#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
453251300Sjasone#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
454251300Sjasone#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
455234370Sjasone#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
456251300Sjasone    old_rzsize, zero) do {} while (0)
457251300Sjasone#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
458234370Sjasone#endif
459234370Sjasone
460234370Sjasone#include "jemalloc/internal/util.h"
461234370Sjasone#include "jemalloc/internal/atomic.h"
462234370Sjasone#include "jemalloc/internal/prng.h"
463234370Sjasone#include "jemalloc/internal/ckh.h"
464234370Sjasone#include "jemalloc/internal/size_classes.h"
465234370Sjasone#include "jemalloc/internal/stats.h"
466234370Sjasone#include "jemalloc/internal/ctl.h"
467234370Sjasone#include "jemalloc/internal/mutex.h"
468234370Sjasone#include "jemalloc/internal/tsd.h"
469234370Sjasone#include "jemalloc/internal/mb.h"
470234370Sjasone#include "jemalloc/internal/extent.h"
471234370Sjasone#include "jemalloc/internal/arena.h"
472234370Sjasone#include "jemalloc/internal/bitmap.h"
473234370Sjasone#include "jemalloc/internal/base.h"
474234370Sjasone#include "jemalloc/internal/chunk.h"
475234370Sjasone#include "jemalloc/internal/huge.h"
476234370Sjasone#include "jemalloc/internal/rtree.h"
477234370Sjasone#include "jemalloc/internal/tcache.h"
478234370Sjasone#include "jemalloc/internal/hash.h"
479234370Sjasone#include "jemalloc/internal/quarantine.h"
480234370Sjasone#include "jemalloc/internal/prof.h"
481234370Sjasone
482234370Sjasone#undef JEMALLOC_H_TYPES
483234370Sjasone/******************************************************************************/
484234370Sjasone#define JEMALLOC_H_STRUCTS
485234370Sjasone
486234370Sjasone#include "jemalloc/internal/util.h"
487234370Sjasone#include "jemalloc/internal/atomic.h"
488234370Sjasone#include "jemalloc/internal/prng.h"
489234370Sjasone#include "jemalloc/internal/ckh.h"
490234370Sjasone#include "jemalloc/internal/size_classes.h"
491234370Sjasone#include "jemalloc/internal/stats.h"
492234370Sjasone#include "jemalloc/internal/ctl.h"
493234370Sjasone#include "jemalloc/internal/mutex.h"
494234370Sjasone#include "jemalloc/internal/tsd.h"
495234370Sjasone#include "jemalloc/internal/mb.h"
496234370Sjasone#include "jemalloc/internal/bitmap.h"
497234370Sjasone#include "jemalloc/internal/extent.h"
498234370Sjasone#include "jemalloc/internal/arena.h"
499234370Sjasone#include "jemalloc/internal/base.h"
500234370Sjasone#include "jemalloc/internal/chunk.h"
501234370Sjasone#include "jemalloc/internal/huge.h"
502234370Sjasone#include "jemalloc/internal/rtree.h"
503234370Sjasone#include "jemalloc/internal/tcache.h"
504234370Sjasone#include "jemalloc/internal/hash.h"
505234370Sjasone#include "jemalloc/internal/quarantine.h"
506234370Sjasone#include "jemalloc/internal/prof.h"
507234370Sjasone
508234370Sjasonetypedef struct {
509234370Sjasone	uint64_t	allocated;
510234370Sjasone	uint64_t	deallocated;
511234370Sjasone} thread_allocated_t;
512234370Sjasone/*
513234370Sjasone * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
514234370Sjasone * argument.
515234370Sjasone */
516234370Sjasone#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
517234370Sjasone
518234370Sjasone#undef JEMALLOC_H_STRUCTS
519234370Sjasone/******************************************************************************/
520234370Sjasone#define JEMALLOC_H_EXTERNS
521234370Sjasone
522234370Sjasoneextern bool	opt_abort;
523234370Sjasoneextern bool	opt_junk;
524234370Sjasoneextern size_t	opt_quarantine;
525234370Sjasoneextern bool	opt_redzone;
526234370Sjasoneextern bool	opt_utrace;
527234370Sjasoneextern bool	opt_valgrind;
528234370Sjasoneextern bool	opt_xmalloc;
529234370Sjasoneextern bool	opt_zero;
530234370Sjasoneextern size_t	opt_narenas;
531234370Sjasone
532234370Sjasone/* Number of CPUs. */
533234370Sjasoneextern unsigned		ncpus;
534234370Sjasone
535242844Sjasone/* Protects arenas initialization (arenas, arenas_total). */
536242844Sjasoneextern malloc_mutex_t	arenas_lock;
537234370Sjasone/*
538234370Sjasone * Arenas that are used to service external requests.  Not all elements of the
539234370Sjasone * arenas array are necessarily used; arenas are created lazily as needed.
540242844Sjasone *
541242844Sjasone * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
542242844Sjasone * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
543242844Sjasone * takes some action to create them and allocate from them.
544234370Sjasone */
545234370Sjasoneextern arena_t		**arenas;
546242844Sjasoneextern unsigned		narenas_total;
547242844Sjasoneextern unsigned		narenas_auto; /* Read-only after initialization. */
548234370Sjasone
549234370Sjasonearena_t	*arenas_extend(unsigned ind);
550234370Sjasonevoid	arenas_cleanup(void *arg);
551234370Sjasonearena_t	*choose_arena_hard(void);
552234370Sjasonevoid	jemalloc_prefork(void);
553234370Sjasonevoid	jemalloc_postfork_parent(void);
554234370Sjasonevoid	jemalloc_postfork_child(void);
555234370Sjasone
556234370Sjasone#include "jemalloc/internal/util.h"
557234370Sjasone#include "jemalloc/internal/atomic.h"
558234370Sjasone#include "jemalloc/internal/prng.h"
559234370Sjasone#include "jemalloc/internal/ckh.h"
560234370Sjasone#include "jemalloc/internal/size_classes.h"
561234370Sjasone#include "jemalloc/internal/stats.h"
562234370Sjasone#include "jemalloc/internal/ctl.h"
563234370Sjasone#include "jemalloc/internal/mutex.h"
564234370Sjasone#include "jemalloc/internal/tsd.h"
565234370Sjasone#include "jemalloc/internal/mb.h"
566234370Sjasone#include "jemalloc/internal/bitmap.h"
567234370Sjasone#include "jemalloc/internal/extent.h"
568234370Sjasone#include "jemalloc/internal/arena.h"
569234370Sjasone#include "jemalloc/internal/base.h"
570234370Sjasone#include "jemalloc/internal/chunk.h"
571234370Sjasone#include "jemalloc/internal/huge.h"
572234370Sjasone#include "jemalloc/internal/rtree.h"
573234370Sjasone#include "jemalloc/internal/tcache.h"
574234370Sjasone#include "jemalloc/internal/hash.h"
575234370Sjasone#include "jemalloc/internal/quarantine.h"
576234370Sjasone#include "jemalloc/internal/prof.h"
577234370Sjasone
578234370Sjasone#undef JEMALLOC_H_EXTERNS
579234370Sjasone/******************************************************************************/
580234370Sjasone#define JEMALLOC_H_INLINES
581234370Sjasone
582234370Sjasone#include "jemalloc/internal/util.h"
583234370Sjasone#include "jemalloc/internal/atomic.h"
584234370Sjasone#include "jemalloc/internal/prng.h"
585234370Sjasone#include "jemalloc/internal/ckh.h"
586234370Sjasone#include "jemalloc/internal/size_classes.h"
587234370Sjasone#include "jemalloc/internal/stats.h"
588234370Sjasone#include "jemalloc/internal/ctl.h"
589234370Sjasone#include "jemalloc/internal/mutex.h"
590234370Sjasone#include "jemalloc/internal/tsd.h"
591234370Sjasone#include "jemalloc/internal/mb.h"
592234370Sjasone#include "jemalloc/internal/extent.h"
593234370Sjasone#include "jemalloc/internal/base.h"
594234370Sjasone#include "jemalloc/internal/chunk.h"
595234370Sjasone#include "jemalloc/internal/huge.h"
596234370Sjasone
597234370Sjasone#ifndef JEMALLOC_ENABLE_INLINE
598234370Sjasonemalloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
599234370Sjasone
600234370Sjasonesize_t	s2u(size_t size);
601234370Sjasonesize_t	sa2u(size_t size, size_t alignment);
602242844Sjasoneunsigned	narenas_total_get(void);
603234370Sjasonearena_t	*choose_arena(arena_t *arena);
604234370Sjasone#endif
605234370Sjasone
606234370Sjasone#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
607234370Sjasone/*
608234370Sjasone * Map of pthread_self() --> arenas[???], used for selecting an arena to use
609234370Sjasone * for allocations.
610234370Sjasone */
611234370Sjasonemalloc_tsd_externs(arenas, arena_t *)
612245868Sjasonemalloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
613245868Sjasone    arenas_cleanup)
614234370Sjasone
615234370Sjasone/*
616234370Sjasone * Compute usable size that would result from allocating an object with the
617234370Sjasone * specified size.
618234370Sjasone */
619245868SjasoneJEMALLOC_ALWAYS_INLINE size_t
620234370Sjasones2u(size_t size)
621234370Sjasone{
622234370Sjasone
623234370Sjasone	if (size <= SMALL_MAXCLASS)
624234370Sjasone		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
625234370Sjasone	if (size <= arena_maxclass)
626234370Sjasone		return (PAGE_CEILING(size));
627234370Sjasone	return (CHUNK_CEILING(size));
628234370Sjasone}
629234370Sjasone
630234370Sjasone/*
631234370Sjasone * Compute usable size that would result from allocating an object with the
632234370Sjasone * specified size and alignment.
633234370Sjasone */
634245868SjasoneJEMALLOC_ALWAYS_INLINE size_t
635234370Sjasonesa2u(size_t size, size_t alignment)
636234370Sjasone{
637234370Sjasone	size_t usize;
638234370Sjasone
639234370Sjasone	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
640234370Sjasone
641234370Sjasone	/*
642234370Sjasone	 * Round size up to the nearest multiple of alignment.
643234370Sjasone	 *
644234370Sjasone	 * This done, we can take advantage of the fact that for each small
645234370Sjasone	 * size class, every object is aligned at the smallest power of two
646234370Sjasone	 * that is non-zero in the base two representation of the size.  For
647234370Sjasone	 * example:
648234370Sjasone	 *
649234370Sjasone	 *   Size |   Base 2 | Minimum alignment
650234370Sjasone	 *   -----+----------+------------------
651234370Sjasone	 *     96 |  1100000 |  32
652234370Sjasone	 *    144 | 10100000 |  32
653234370Sjasone	 *    192 | 11000000 |  64
654234370Sjasone	 */
655234370Sjasone	usize = ALIGNMENT_CEILING(size, alignment);
656234370Sjasone	/*
657234370Sjasone	 * (usize < size) protects against the combination of maximal
658234370Sjasone	 * alignment and size greater than maximal alignment.
659234370Sjasone	 */
660234370Sjasone	if (usize < size) {
661234370Sjasone		/* size_t overflow. */
662234370Sjasone		return (0);
663234370Sjasone	}
664234370Sjasone
665234370Sjasone	if (usize <= arena_maxclass && alignment <= PAGE) {
666234370Sjasone		if (usize <= SMALL_MAXCLASS)
667234370Sjasone			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
668234370Sjasone		return (PAGE_CEILING(usize));
669234370Sjasone	} else {
670234370Sjasone		size_t run_size;
671234370Sjasone
672234370Sjasone		/*
673234370Sjasone		 * We can't achieve subpage alignment, so round up alignment
674234370Sjasone		 * permanently; it makes later calculations simpler.
675234370Sjasone		 */
676234370Sjasone		alignment = PAGE_CEILING(alignment);
677234370Sjasone		usize = PAGE_CEILING(size);
678234370Sjasone		/*
679234370Sjasone		 * (usize < size) protects against very large sizes within
680234370Sjasone		 * PAGE of SIZE_T_MAX.
681234370Sjasone		 *
682234370Sjasone		 * (usize + alignment < usize) protects against the
683234370Sjasone		 * combination of maximal alignment and usize large enough
684234370Sjasone		 * to cause overflow.  This is similar to the first overflow
685234370Sjasone		 * check above, but it needs to be repeated due to the new
686234370Sjasone		 * usize value, which may now be *equal* to maximal
687234370Sjasone		 * alignment, whereas before we only detected overflow if the
688234370Sjasone		 * original size was *greater* than maximal alignment.
689234370Sjasone		 */
690234370Sjasone		if (usize < size || usize + alignment < usize) {
691234370Sjasone			/* size_t overflow. */
692234370Sjasone			return (0);
693234370Sjasone		}
694234370Sjasone
695234370Sjasone		/*
696234370Sjasone		 * Calculate the size of the over-size run that arena_palloc()
697234370Sjasone		 * would need to allocate in order to guarantee the alignment.
698234370Sjasone		 * If the run wouldn't fit within a chunk, round up to a huge
699234370Sjasone		 * allocation size.
700234370Sjasone		 */
701234370Sjasone		run_size = usize + alignment - PAGE;
702234370Sjasone		if (run_size <= arena_maxclass)
703234370Sjasone			return (PAGE_CEILING(usize));
704234370Sjasone		return (CHUNK_CEILING(usize));
705234370Sjasone	}
706234370Sjasone}
707234370Sjasone
708242844SjasoneJEMALLOC_INLINE unsigned
709242844Sjasonenarenas_total_get(void)
710242844Sjasone{
711242844Sjasone	unsigned narenas;
712242844Sjasone
713242844Sjasone	malloc_mutex_lock(&arenas_lock);
714242844Sjasone	narenas = narenas_total;
715242844Sjasone	malloc_mutex_unlock(&arenas_lock);
716242844Sjasone
717242844Sjasone	return (narenas);
718242844Sjasone}
719242844Sjasone
720234370Sjasone/* Choose an arena based on a per-thread value. */
721234370SjasoneJEMALLOC_INLINE arena_t *
722234370Sjasonechoose_arena(arena_t *arena)
723234370Sjasone{
724234370Sjasone	arena_t *ret;
725234370Sjasone
726234370Sjasone	if (arena != NULL)
727234370Sjasone		return (arena);
728234370Sjasone
729234370Sjasone	if ((ret = *arenas_tsd_get()) == NULL) {
730234370Sjasone		ret = choose_arena_hard();
731234370Sjasone		assert(ret != NULL);
732234370Sjasone	}
733234370Sjasone
734234370Sjasone	return (ret);
735234370Sjasone}
736234370Sjasone#endif
737234370Sjasone
738234370Sjasone#include "jemalloc/internal/bitmap.h"
739234370Sjasone#include "jemalloc/internal/rtree.h"
740235238Sjasone/*
741235238Sjasone * Include arena.h twice in order to resolve circular dependencies with
742235238Sjasone * tcache.h.
743235238Sjasone */
744235238Sjasone#define	JEMALLOC_ARENA_INLINE_A
745235238Sjasone#include "jemalloc/internal/arena.h"
746235238Sjasone#undef JEMALLOC_ARENA_INLINE_A
747234370Sjasone#include "jemalloc/internal/tcache.h"
748235238Sjasone#define	JEMALLOC_ARENA_INLINE_B
749234370Sjasone#include "jemalloc/internal/arena.h"
750235238Sjasone#undef JEMALLOC_ARENA_INLINE_B
751234370Sjasone#include "jemalloc/internal/hash.h"
752234370Sjasone#include "jemalloc/internal/quarantine.h"
753234370Sjasone
754234370Sjasone#ifndef JEMALLOC_ENABLE_INLINE
755242844Sjasonevoid	*imallocx(size_t size, bool try_tcache, arena_t *arena);
756234370Sjasonevoid	*imalloc(size_t size);
757242844Sjasonevoid	*icallocx(size_t size, bool try_tcache, arena_t *arena);
758234370Sjasonevoid	*icalloc(size_t size);
759242844Sjasonevoid	*ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
760242844Sjasone    arena_t *arena);
761234370Sjasonevoid	*ipalloc(size_t usize, size_t alignment, bool zero);
762234370Sjasonesize_t	isalloc(const void *ptr, bool demote);
763234370Sjasonesize_t	ivsalloc(const void *ptr, bool demote);
764234370Sjasonesize_t	u2rz(size_t usize);
765234370Sjasonesize_t	p2rz(const void *ptr);
766242844Sjasonevoid	idallocx(void *ptr, bool try_tcache);
767234370Sjasonevoid	idalloc(void *ptr);
768242844Sjasonevoid	iqallocx(void *ptr, bool try_tcache);
769234370Sjasonevoid	iqalloc(void *ptr);
770242844Sjasonevoid	*irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
771242844Sjasone    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
772242844Sjasone    arena_t *arena);
773234370Sjasonevoid	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
774234370Sjasone    bool zero, bool no_move);
775234370Sjasonemalloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
776234370Sjasone#endif
777234370Sjasone
778234370Sjasone#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
779245868SjasoneJEMALLOC_ALWAYS_INLINE void *
780242844Sjasoneimallocx(size_t size, bool try_tcache, arena_t *arena)
781234370Sjasone{
782234370Sjasone
783234370Sjasone	assert(size != 0);
784234370Sjasone
785234370Sjasone	if (size <= arena_maxclass)
786242844Sjasone		return (arena_malloc(arena, size, false, try_tcache));
787234370Sjasone	else
788234370Sjasone		return (huge_malloc(size, false));
789234370Sjasone}
790234370Sjasone
791245868SjasoneJEMALLOC_ALWAYS_INLINE void *
792242844Sjasoneimalloc(size_t size)
793234370Sjasone{
794234370Sjasone
795242844Sjasone	return (imallocx(size, true, NULL));
796242844Sjasone}
797242844Sjasone
798245868SjasoneJEMALLOC_ALWAYS_INLINE void *
799242844Sjasoneicallocx(size_t size, bool try_tcache, arena_t *arena)
800242844Sjasone{
801242844Sjasone
802234370Sjasone	if (size <= arena_maxclass)
803242844Sjasone		return (arena_malloc(arena, size, true, try_tcache));
804234370Sjasone	else
805234370Sjasone		return (huge_malloc(size, true));
806234370Sjasone}
807234370Sjasone
808245868SjasoneJEMALLOC_ALWAYS_INLINE void *
809242844Sjasoneicalloc(size_t size)
810234370Sjasone{
811242844Sjasone
812242844Sjasone	return (icallocx(size, true, NULL));
813242844Sjasone}
814242844Sjasone
815245868SjasoneJEMALLOC_ALWAYS_INLINE void *
816242844Sjasoneipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
817242844Sjasone    arena_t *arena)
818242844Sjasone{
819234370Sjasone	void *ret;
820234370Sjasone
821234370Sjasone	assert(usize != 0);
822234370Sjasone	assert(usize == sa2u(usize, alignment));
823234370Sjasone
824234370Sjasone	if (usize <= arena_maxclass && alignment <= PAGE)
825242844Sjasone		ret = arena_malloc(arena, usize, zero, try_tcache);
826234370Sjasone	else {
827234370Sjasone		if (usize <= arena_maxclass) {
828242844Sjasone			ret = arena_palloc(choose_arena(arena), usize,
829242844Sjasone			    alignment, zero);
830234370Sjasone		} else if (alignment <= chunksize)
831234370Sjasone			ret = huge_malloc(usize, zero);
832234370Sjasone		else
833234370Sjasone			ret = huge_palloc(usize, alignment, zero);
834234370Sjasone	}
835234370Sjasone
836234370Sjasone	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
837234370Sjasone	return (ret);
838234370Sjasone}
839234370Sjasone
840245868SjasoneJEMALLOC_ALWAYS_INLINE void *
841242844Sjasoneipalloc(size_t usize, size_t alignment, bool zero)
842242844Sjasone{
843242844Sjasone
844242844Sjasone	return (ipallocx(usize, alignment, zero, true, NULL));
845242844Sjasone}
846242844Sjasone
847234370Sjasone/*
848234370Sjasone * Typical usage:
849234370Sjasone *   void *ptr = [...]
850234370Sjasone *   size_t sz = isalloc(ptr, config_prof);
851234370Sjasone */
852245868SjasoneJEMALLOC_ALWAYS_INLINE size_t
853234370Sjasoneisalloc(const void *ptr, bool demote)
854234370Sjasone{
855234370Sjasone	size_t ret;
856234370Sjasone	arena_chunk_t *chunk;
857234370Sjasone
858234370Sjasone	assert(ptr != NULL);
859234370Sjasone	/* Demotion only makes sense if config_prof is true. */
860234370Sjasone	assert(config_prof || demote == false);
861234370Sjasone
862234370Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
863234543Sjasone	if (chunk != ptr)
864234370Sjasone		ret = arena_salloc(ptr, demote);
865234543Sjasone	else
866234370Sjasone		ret = huge_salloc(ptr);
867234370Sjasone
868234370Sjasone	return (ret);
869234370Sjasone}
870234370Sjasone
871245868SjasoneJEMALLOC_ALWAYS_INLINE size_t
872234370Sjasoneivsalloc(const void *ptr, bool demote)
873234370Sjasone{
874234370Sjasone
875234370Sjasone	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
876234370Sjasone	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
877234370Sjasone		return (0);
878234370Sjasone
879234370Sjasone	return (isalloc(ptr, demote));
880234370Sjasone}
881234370Sjasone
882234370SjasoneJEMALLOC_INLINE size_t
883234370Sjasoneu2rz(size_t usize)
884234370Sjasone{
885234370Sjasone	size_t ret;
886234370Sjasone
887234370Sjasone	if (usize <= SMALL_MAXCLASS) {
888234370Sjasone		size_t binind = SMALL_SIZE2BIN(usize);
889234370Sjasone		ret = arena_bin_info[binind].redzone_size;
890234370Sjasone	} else
891234370Sjasone		ret = 0;
892234370Sjasone
893234370Sjasone	return (ret);
894234370Sjasone}
895234370Sjasone
896234370SjasoneJEMALLOC_INLINE size_t
897234370Sjasonep2rz(const void *ptr)
898234370Sjasone{
899234370Sjasone	size_t usize = isalloc(ptr, false);
900234370Sjasone
901234370Sjasone	return (u2rz(usize));
902234370Sjasone}
903234370Sjasone
904245868SjasoneJEMALLOC_ALWAYS_INLINE void
905242844Sjasoneidallocx(void *ptr, bool try_tcache)
906234370Sjasone{
907234370Sjasone	arena_chunk_t *chunk;
908234370Sjasone
909234370Sjasone	assert(ptr != NULL);
910234370Sjasone
911234370Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
912234370Sjasone	if (chunk != ptr)
913242844Sjasone		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
914234370Sjasone	else
915234370Sjasone		huge_dalloc(ptr, true);
916234370Sjasone}
917234370Sjasone
918245868SjasoneJEMALLOC_ALWAYS_INLINE void
919242844Sjasoneidalloc(void *ptr)
920234370Sjasone{
921234370Sjasone
922242844Sjasone	idallocx(ptr, true);
923242844Sjasone}
924242844Sjasone
925245868SjasoneJEMALLOC_ALWAYS_INLINE void
926242844Sjasoneiqallocx(void *ptr, bool try_tcache)
927242844Sjasone{
928242844Sjasone
929234370Sjasone	if (config_fill && opt_quarantine)
930234370Sjasone		quarantine(ptr);
931234370Sjasone	else
932242844Sjasone		idallocx(ptr, try_tcache);
933234370Sjasone}
934234370Sjasone
935245868SjasoneJEMALLOC_ALWAYS_INLINE void
936242844Sjasoneiqalloc(void *ptr)
937242844Sjasone{
938242844Sjasone
939242844Sjasone	iqallocx(ptr, true);
940242844Sjasone}
941242844Sjasone
942245868SjasoneJEMALLOC_ALWAYS_INLINE void *
943242844Sjasoneirallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
944242844Sjasone    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
945234370Sjasone{
946234370Sjasone	void *ret;
947234370Sjasone	size_t oldsize;
948234370Sjasone
949234370Sjasone	assert(ptr != NULL);
950234370Sjasone	assert(size != 0);
951234370Sjasone
952234370Sjasone	oldsize = isalloc(ptr, config_prof);
953234370Sjasone
954234370Sjasone	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
955234370Sjasone	    != 0) {
956234370Sjasone		size_t usize, copysize;
957234370Sjasone
958234370Sjasone		/*
959234370Sjasone		 * Existing object alignment is inadequate; allocate new space
960234370Sjasone		 * and copy.
961234370Sjasone		 */
962234370Sjasone		if (no_move)
963234370Sjasone			return (NULL);
964234370Sjasone		usize = sa2u(size + extra, alignment);
965234370Sjasone		if (usize == 0)
966234370Sjasone			return (NULL);
967242844Sjasone		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
968234370Sjasone		if (ret == NULL) {
969234370Sjasone			if (extra == 0)
970234370Sjasone				return (NULL);
971234370Sjasone			/* Try again, without extra this time. */
972234370Sjasone			usize = sa2u(size, alignment);
973234370Sjasone			if (usize == 0)
974234370Sjasone				return (NULL);
975242844Sjasone			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
976242844Sjasone			    arena);
977234370Sjasone			if (ret == NULL)
978234370Sjasone				return (NULL);
979234370Sjasone		}
980234370Sjasone		/*
981234370Sjasone		 * Copy at most size bytes (not size+extra), since the caller
982234370Sjasone		 * has no expectation that the extra bytes will be reliably
983234370Sjasone		 * preserved.
984234370Sjasone		 */
985234370Sjasone		copysize = (size < oldsize) ? size : oldsize;
986234370Sjasone		memcpy(ret, ptr, copysize);
987242844Sjasone		iqallocx(ptr, try_tcache_dalloc);
988234370Sjasone		return (ret);
989234370Sjasone	}
990234370Sjasone
991234370Sjasone	if (no_move) {
992234370Sjasone		if (size <= arena_maxclass) {
993234370Sjasone			return (arena_ralloc_no_move(ptr, oldsize, size,
994234370Sjasone			    extra, zero));
995234370Sjasone		} else {
996234370Sjasone			return (huge_ralloc_no_move(ptr, oldsize, size,
997234370Sjasone			    extra));
998234370Sjasone		}
999234370Sjasone	} else {
1000234370Sjasone		if (size + extra <= arena_maxclass) {
1001242844Sjasone			return (arena_ralloc(arena, ptr, oldsize, size, extra,
1002242844Sjasone			    alignment, zero, try_tcache_alloc,
1003242844Sjasone			    try_tcache_dalloc));
1004234370Sjasone		} else {
1005234370Sjasone			return (huge_ralloc(ptr, oldsize, size, extra,
1006242844Sjasone			    alignment, zero, try_tcache_dalloc));
1007234370Sjasone		}
1008234370Sjasone	}
1009234370Sjasone}
1010234370Sjasone
1011245868SjasoneJEMALLOC_ALWAYS_INLINE void *
1012242844Sjasoneiralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
1013242844Sjasone    bool no_move)
1014242844Sjasone{
1015242844Sjasone
1016242844Sjasone	return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
1017242844Sjasone	    NULL));
1018242844Sjasone}
1019242844Sjasone
1020234370Sjasonemalloc_tsd_externs(thread_allocated, thread_allocated_t)
1021245868Sjasonemalloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1022234370Sjasone    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1023234370Sjasone#endif
1024234370Sjasone
1025234370Sjasone#include "jemalloc/internal/prof.h"
1026234370Sjasone
1027234370Sjasone#undef JEMALLOC_H_INLINES
1028234370Sjasone/******************************************************************************/
1029234402Sjasone#endif /* JEMALLOC_INTERNAL_H */
1030