1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include "libc_private.h"
4#include "namespace.h"
5
6#include <math.h>
7#ifdef _WIN32
8#  include <windows.h>
9#  define ENOENT ERROR_PATH_NOT_FOUND
10#  define EINVAL ERROR_BAD_ARGUMENTS
11#  define EAGAIN ERROR_OUTOFMEMORY
12#  define EPERM  ERROR_WRITE_FAULT
13#  define EFAULT ERROR_INVALID_ADDRESS
14#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
15#  undef ERANGE
16#  define ERANGE ERROR_INVALID_DATA
17#else
18#  include <sys/param.h>
19#  include <sys/mman.h>
20#  include <sys/syscall.h>
21#  if !defined(SYS_write) && defined(__NR_write)
22#    define SYS_write __NR_write
23#  endif
24#  include <sys/uio.h>
25#  include <pthread.h>
26#  include <errno.h>
27#endif
28#include <sys/types.h>
29
30#include <limits.h>
31#ifndef SIZE_T_MAX
32#  define SIZE_T_MAX	SIZE_MAX
33#endif
34#include <stdarg.h>
35#include <stdbool.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <stdint.h>
39#include <stddef.h>
40#ifndef offsetof
41#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
42#endif
43#include <inttypes.h>
44#include <string.h>
45#include <strings.h>
46#include <ctype.h>
47#ifdef _MSC_VER
48#  include <io.h>
49typedef intptr_t ssize_t;
50#  define PATH_MAX 1024
51#  define STDERR_FILENO 2
52#  define __func__ __FUNCTION__
53/* Disable warnings about deprecated system functions */
54#  pragma warning(disable: 4996)
55#else
56#  include <unistd.h>
57#endif
58#include <fcntl.h>
59
60#include "un-namespace.h"
61#include "libc_private.h"
62
63#define	JEMALLOC_NO_DEMANGLE
64#include "../jemalloc.h"
65
66#ifdef JEMALLOC_UTRACE
67#include <sys/ktrace.h>
68#endif
69
70#ifdef JEMALLOC_VALGRIND
71#include <valgrind/valgrind.h>
72#include <valgrind/memcheck.h>
73#endif
74
75#include "jemalloc/internal/private_namespace.h"
76
77#ifdef JEMALLOC_CC_SILENCE
78#define	UNUSED JEMALLOC_ATTR(unused)
79#else
80#define	UNUSED
81#endif
82
83static const bool config_debug =
84#ifdef JEMALLOC_DEBUG
85    true
86#else
87    false
88#endif
89    ;
90static const bool config_dss =
91#ifdef JEMALLOC_DSS
92    true
93#else
94    false
95#endif
96    ;
97static const bool config_fill =
98#ifdef JEMALLOC_FILL
99    true
100#else
101    false
102#endif
103    ;
104static const bool config_lazy_lock = true;
105static const bool config_prof =
106#ifdef JEMALLOC_PROF
107    true
108#else
109    false
110#endif
111    ;
112static const bool config_prof_libgcc =
113#ifdef JEMALLOC_PROF_LIBGCC
114    true
115#else
116    false
117#endif
118    ;
119static const bool config_prof_libunwind =
120#ifdef JEMALLOC_PROF_LIBUNWIND
121    true
122#else
123    false
124#endif
125    ;
126static const bool config_mremap =
127#ifdef JEMALLOC_MREMAP
128    true
129#else
130    false
131#endif
132    ;
133static const bool config_munmap =
134#ifdef JEMALLOC_MUNMAP
135    true
136#else
137    false
138#endif
139    ;
140static const bool config_stats =
141#ifdef JEMALLOC_STATS
142    true
143#else
144    false
145#endif
146    ;
147static const bool config_tcache =
148#ifdef JEMALLOC_TCACHE
149    true
150#else
151    false
152#endif
153    ;
154static const bool config_tls =
155#ifdef JEMALLOC_TLS
156    true
157#else
158    false
159#endif
160    ;
161static const bool config_utrace =
162#ifdef JEMALLOC_UTRACE
163    true
164#else
165    false
166#endif
167    ;
168static const bool config_valgrind =
169#ifdef JEMALLOC_VALGRIND
170    true
171#else
172    false
173#endif
174    ;
175static const bool config_xmalloc =
176#ifdef JEMALLOC_XMALLOC
177    true
178#else
179    false
180#endif
181    ;
182static const bool config_ivsalloc =
183#ifdef JEMALLOC_IVSALLOC
184    true
185#else
186    false
187#endif
188    ;
189
190#ifdef JEMALLOC_ATOMIC9
191#include <machine/atomic.h>
192#endif
193
194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
195#include <libkern/OSAtomic.h>
196#endif
197
198#ifdef JEMALLOC_ZONE
199#include <mach/mach_error.h>
200#include <mach/mach_init.h>
201#include <mach/vm_map.h>
202#include <malloc/malloc.h>
203#endif
204
205#define	RB_COMPACT
206#include "jemalloc/internal/rb.h"
207#include "jemalloc/internal/qr.h"
208#include "jemalloc/internal/ql.h"
209
210/*
211 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
212 * but there are circular dependencies that cannot be broken without
213 * substantial performance degradation.  In order to reduce the effect on
214 * visual code flow, read the header files in multiple passes, with one of the
215 * following cpp variables defined during each pass:
216 *
217 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
218 *                        types.
219 *   JEMALLOC_H_STRUCTS : Data structures.
220 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
221 *   JEMALLOC_H_INLINES : Inline functions.
222 */
223/******************************************************************************/
224#define JEMALLOC_H_TYPES
225
226#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
227
228#define	ZU(z)	((size_t)z)
229#define	QU(q)	((uint64_t)q)
230
231#ifndef __DECONST
232#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
233#endif
234
235#ifdef JEMALLOC_DEBUG
236   /* Disable inlining to make debugging easier. */
237#  define JEMALLOC_ALWAYS_INLINE
238#  define JEMALLOC_INLINE
239#  define inline
240#else
241#  define JEMALLOC_ENABLE_INLINE
242#  ifdef JEMALLOC_HAVE_ATTR
243#    define JEMALLOC_ALWAYS_INLINE \
244	 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
245#  else
246#    define JEMALLOC_ALWAYS_INLINE static inline
247#  endif
248#  define JEMALLOC_INLINE static inline
249#  ifdef _MSC_VER
250#    define inline _inline
251#  endif
252#endif
253
254/* Smallest size class to support. */
255#define	LG_TINY_MIN		3
256#define	TINY_MIN		(1U << LG_TINY_MIN)
257
258/*
259 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
260 * classes).
261 */
262#ifndef LG_QUANTUM
263#  if (defined(__i386__) || defined(_M_IX86))
264#    define LG_QUANTUM		4
265#  endif
266#  ifdef __ia64__
267#    define LG_QUANTUM		4
268#  endif
269#  ifdef __alpha__
270#    define LG_QUANTUM		4
271#  endif
272#  ifdef __sparc64__
273#    define LG_QUANTUM		4
274#  endif
275#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
276#    define LG_QUANTUM		4
277#  endif
278#  ifdef __arm__
279#    define LG_QUANTUM		3
280#  endif
281#  ifdef __aarch64__
282#    define LG_QUANTUM		4
283#  endif
284#  ifdef __hppa__
285#    define LG_QUANTUM		4
286#  endif
287#  ifdef __mips__
288#    define LG_QUANTUM		3
289#  endif
290#  ifdef __powerpc__
291#    define LG_QUANTUM		4
292#  endif
293#  ifdef __s390__
294#    define LG_QUANTUM		4
295#  endif
296#  ifdef __SH4__
297#    define LG_QUANTUM		4
298#  endif
299#  ifdef __tile__
300#    define LG_QUANTUM		4
301#  endif
302#  ifndef LG_QUANTUM
303#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
304#  endif
305#endif
306
307#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
308#define	QUANTUM_MASK		(QUANTUM - 1)
309
310/* Return the smallest quantum multiple that is >= a. */
311#define	QUANTUM_CEILING(a)						\
312	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
313
314#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
315#define	LONG_MASK		(LONG - 1)
316
317/* Return the smallest long multiple that is >= a. */
318#define	LONG_CEILING(a)							\
319	(((a) + LONG_MASK) & ~LONG_MASK)
320
321#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
322#define	PTR_MASK		(SIZEOF_PTR - 1)
323
324/* Return the smallest (void *) multiple that is >= a. */
325#define	PTR_CEILING(a)							\
326	(((a) + PTR_MASK) & ~PTR_MASK)
327
328/*
329 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
330 * In addition, this controls the spacing of cacheline-spaced size classes.
331 *
332 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
333 * only handle raw constants.
334 */
335#define	LG_CACHELINE		6
336#define	CACHELINE		64
337#define	CACHELINE_MASK		(CACHELINE - 1)
338
339/* Return the smallest cacheline multiple that is >= s. */
340#define	CACHELINE_CEILING(s)						\
341	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
342
343/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
344#ifdef PAGE_MASK
345#  undef PAGE_MASK
346#endif
347#define	LG_PAGE		STATIC_PAGE_SHIFT
348#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
349#define	PAGE_MASK	((size_t)(PAGE - 1))
350
351/* Return the smallest pagesize multiple that is >= s. */
352#define	PAGE_CEILING(s)							\
353	(((s) + PAGE_MASK) & ~PAGE_MASK)
354
355/* Return the nearest aligned address at or below a. */
356#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
357	((void *)((uintptr_t)(a) & (-(alignment))))
358
359/* Return the offset between a and the nearest aligned address at or below a. */
360#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
361	((size_t)((uintptr_t)(a) & (alignment - 1)))
362
363/* Return the smallest alignment multiple that is >= s. */
364#define	ALIGNMENT_CEILING(s, alignment)					\
365	(((s) + (alignment - 1)) & (-(alignment)))
366
367/* Declare a variable length array */
368#if __STDC_VERSION__ < 199901L
369#  ifdef _MSC_VER
370#    include <malloc.h>
371#    define alloca _alloca
372#  else
373#    ifdef JEMALLOC_HAS_ALLOCA_H
374#      include <alloca.h>
375#    else
376#      include <stdlib.h>
377#    endif
378#  endif
379#  define VARIABLE_ARRAY(type, name, count) \
380	type *name = alloca(sizeof(type) * count)
381#else
382#  define VARIABLE_ARRAY(type, name, count) type name[count]
383#endif
384
385#ifdef JEMALLOC_VALGRIND
386/*
387 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
388 * so that when Valgrind reports errors, there are no extra stack frames
389 * in the backtraces.
390 *
391 * The size that is reported to valgrind must be consistent through a chain of
392 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
393 * jemalloc, so it is critical that all callers of these macros provide usize
394 * rather than request size.  As a result, buffer overflow detection is
395 * technically weakened for the standard API, though it is generally accepted
396 * practice to consider any extra bytes reported by malloc_usable_size() as
397 * usable space.
398 */
399#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
400	if (config_valgrind && opt_valgrind && cond)			\
401		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
402} while (0)
403#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
404    old_rzsize, zero)  do {						\
405	if (config_valgrind && opt_valgrind) {				\
406		size_t rzsize = p2rz(ptr);				\
407									\
408		if (ptr == old_ptr) {					\
409			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
410			    usize, rzsize);				\
411			if (zero && old_usize < usize) {		\
412				VALGRIND_MAKE_MEM_DEFINED(		\
413				    (void *)((uintptr_t)ptr +		\
414				    old_usize), usize - old_usize);	\
415			}						\
416		} else {						\
417			if (old_ptr != NULL) {				\
418				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
419				    old_rzsize);			\
420			}						\
421			if (ptr != NULL) {				\
422				size_t copy_size = (old_usize < usize)	\
423				    ?  old_usize : usize;		\
424				size_t tail_size = usize - copy_size;	\
425				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
426				    rzsize, false);			\
427				if (copy_size > 0) {			\
428					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
429					    copy_size);			\
430				}					\
431				if (zero && tail_size > 0) {		\
432					VALGRIND_MAKE_MEM_DEFINED(	\
433					    (void *)((uintptr_t)ptr +	\
434					    copy_size), tail_size);	\
435				}					\
436			}						\
437		}							\
438	}								\
439} while (0)
440#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
441	if (config_valgrind && opt_valgrind)				\
442		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
443} while (0)
444#else
445#define	RUNNING_ON_VALGRIND	((unsigned)0)
446#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
447    do {} while (0)
448#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
449    do {} while (0)
450#define	VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
451#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
452#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
453#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
454#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
455#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
456    old_rzsize, zero) do {} while (0)
457#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
458#endif
459
460#include "jemalloc/internal/util.h"
461#include "jemalloc/internal/atomic.h"
462#include "jemalloc/internal/prng.h"
463#include "jemalloc/internal/ckh.h"
464#include "jemalloc/internal/size_classes.h"
465#include "jemalloc/internal/stats.h"
466#include "jemalloc/internal/ctl.h"
467#include "jemalloc/internal/mutex.h"
468#include "jemalloc/internal/tsd.h"
469#include "jemalloc/internal/mb.h"
470#include "jemalloc/internal/extent.h"
471#include "jemalloc/internal/arena.h"
472#include "jemalloc/internal/bitmap.h"
473#include "jemalloc/internal/base.h"
474#include "jemalloc/internal/chunk.h"
475#include "jemalloc/internal/huge.h"
476#include "jemalloc/internal/rtree.h"
477#include "jemalloc/internal/tcache.h"
478#include "jemalloc/internal/hash.h"
479#include "jemalloc/internal/quarantine.h"
480#include "jemalloc/internal/prof.h"
481
482#undef JEMALLOC_H_TYPES
483/******************************************************************************/
484#define JEMALLOC_H_STRUCTS
485
486#include "jemalloc/internal/util.h"
487#include "jemalloc/internal/atomic.h"
488#include "jemalloc/internal/prng.h"
489#include "jemalloc/internal/ckh.h"
490#include "jemalloc/internal/size_classes.h"
491#include "jemalloc/internal/stats.h"
492#include "jemalloc/internal/ctl.h"
493#include "jemalloc/internal/mutex.h"
494#include "jemalloc/internal/tsd.h"
495#include "jemalloc/internal/mb.h"
496#include "jemalloc/internal/bitmap.h"
497#include "jemalloc/internal/extent.h"
498#include "jemalloc/internal/arena.h"
499#include "jemalloc/internal/base.h"
500#include "jemalloc/internal/chunk.h"
501#include "jemalloc/internal/huge.h"
502#include "jemalloc/internal/rtree.h"
503#include "jemalloc/internal/tcache.h"
504#include "jemalloc/internal/hash.h"
505#include "jemalloc/internal/quarantine.h"
506#include "jemalloc/internal/prof.h"
507
508typedef struct {
509	uint64_t	allocated;
510	uint64_t	deallocated;
511} thread_allocated_t;
512/*
513 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
514 * argument.
515 */
516#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
517
518#undef JEMALLOC_H_STRUCTS
519/******************************************************************************/
520#define JEMALLOC_H_EXTERNS
521
522extern bool	opt_abort;
523extern bool	opt_junk;
524extern size_t	opt_quarantine;
525extern bool	opt_redzone;
526extern bool	opt_utrace;
527extern bool	opt_valgrind;
528extern bool	opt_xmalloc;
529extern bool	opt_zero;
530extern size_t	opt_narenas;
531
532/* Number of CPUs. */
533extern unsigned		ncpus;
534
535/* Protects arenas initialization (arenas, arenas_total). */
536extern malloc_mutex_t	arenas_lock;
537/*
538 * Arenas that are used to service external requests.  Not all elements of the
539 * arenas array are necessarily used; arenas are created lazily as needed.
540 *
541 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
542 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
543 * takes some action to create them and allocate from them.
544 */
545extern arena_t		**arenas;
546extern unsigned		narenas_total;
547extern unsigned		narenas_auto; /* Read-only after initialization. */
548
549arena_t	*arenas_extend(unsigned ind);
550void	arenas_cleanup(void *arg);
551arena_t	*choose_arena_hard(void);
552void	jemalloc_prefork(void);
553void	jemalloc_postfork_parent(void);
554void	jemalloc_postfork_child(void);
555
556#include "jemalloc/internal/util.h"
557#include "jemalloc/internal/atomic.h"
558#include "jemalloc/internal/prng.h"
559#include "jemalloc/internal/ckh.h"
560#include "jemalloc/internal/size_classes.h"
561#include "jemalloc/internal/stats.h"
562#include "jemalloc/internal/ctl.h"
563#include "jemalloc/internal/mutex.h"
564#include "jemalloc/internal/tsd.h"
565#include "jemalloc/internal/mb.h"
566#include "jemalloc/internal/bitmap.h"
567#include "jemalloc/internal/extent.h"
568#include "jemalloc/internal/arena.h"
569#include "jemalloc/internal/base.h"
570#include "jemalloc/internal/chunk.h"
571#include "jemalloc/internal/huge.h"
572#include "jemalloc/internal/rtree.h"
573#include "jemalloc/internal/tcache.h"
574#include "jemalloc/internal/hash.h"
575#include "jemalloc/internal/quarantine.h"
576#include "jemalloc/internal/prof.h"
577
578#undef JEMALLOC_H_EXTERNS
579/******************************************************************************/
580#define JEMALLOC_H_INLINES
581
582#include "jemalloc/internal/util.h"
583#include "jemalloc/internal/atomic.h"
584#include "jemalloc/internal/prng.h"
585#include "jemalloc/internal/ckh.h"
586#include "jemalloc/internal/size_classes.h"
587#include "jemalloc/internal/stats.h"
588#include "jemalloc/internal/ctl.h"
589#include "jemalloc/internal/mutex.h"
590#include "jemalloc/internal/tsd.h"
591#include "jemalloc/internal/mb.h"
592#include "jemalloc/internal/extent.h"
593#include "jemalloc/internal/base.h"
594#include "jemalloc/internal/chunk.h"
595#include "jemalloc/internal/huge.h"
596
597#ifndef JEMALLOC_ENABLE_INLINE
598malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
599
600size_t	s2u(size_t size);
601size_t	sa2u(size_t size, size_t alignment);
602unsigned	narenas_total_get(void);
603arena_t	*choose_arena(arena_t *arena);
604#endif
605
606#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
607/*
608 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
609 * for allocations.
610 */
611malloc_tsd_externs(arenas, arena_t *)
612malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
613    arenas_cleanup)
614
615/*
616 * Compute usable size that would result from allocating an object with the
617 * specified size.
618 */
619JEMALLOC_ALWAYS_INLINE size_t
620s2u(size_t size)
621{
622
623	if (size <= SMALL_MAXCLASS)
624		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
625	if (size <= arena_maxclass)
626		return (PAGE_CEILING(size));
627	return (CHUNK_CEILING(size));
628}
629
630/*
631 * Compute usable size that would result from allocating an object with the
632 * specified size and alignment.
633 */
634JEMALLOC_ALWAYS_INLINE size_t
635sa2u(size_t size, size_t alignment)
636{
637	size_t usize;
638
639	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
640
641	/*
642	 * Round size up to the nearest multiple of alignment.
643	 *
644	 * This done, we can take advantage of the fact that for each small
645	 * size class, every object is aligned at the smallest power of two
646	 * that is non-zero in the base two representation of the size.  For
647	 * example:
648	 *
649	 *   Size |   Base 2 | Minimum alignment
650	 *   -----+----------+------------------
651	 *     96 |  1100000 |  32
652	 *    144 | 10100000 |  32
653	 *    192 | 11000000 |  64
654	 */
655	usize = ALIGNMENT_CEILING(size, alignment);
656	/*
657	 * (usize < size) protects against the combination of maximal
658	 * alignment and size greater than maximal alignment.
659	 */
660	if (usize < size) {
661		/* size_t overflow. */
662		return (0);
663	}
664
665	if (usize <= arena_maxclass && alignment <= PAGE) {
666		if (usize <= SMALL_MAXCLASS)
667			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
668		return (PAGE_CEILING(usize));
669	} else {
670		size_t run_size;
671
672		/*
673		 * We can't achieve subpage alignment, so round up alignment
674		 * permanently; it makes later calculations simpler.
675		 */
676		alignment = PAGE_CEILING(alignment);
677		usize = PAGE_CEILING(size);
678		/*
679		 * (usize < size) protects against very large sizes within
680		 * PAGE of SIZE_T_MAX.
681		 *
682		 * (usize + alignment < usize) protects against the
683		 * combination of maximal alignment and usize large enough
684		 * to cause overflow.  This is similar to the first overflow
685		 * check above, but it needs to be repeated due to the new
686		 * usize value, which may now be *equal* to maximal
687		 * alignment, whereas before we only detected overflow if the
688		 * original size was *greater* than maximal alignment.
689		 */
690		if (usize < size || usize + alignment < usize) {
691			/* size_t overflow. */
692			return (0);
693		}
694
695		/*
696		 * Calculate the size of the over-size run that arena_palloc()
697		 * would need to allocate in order to guarantee the alignment.
698		 * If the run wouldn't fit within a chunk, round up to a huge
699		 * allocation size.
700		 */
701		run_size = usize + alignment - PAGE;
702		if (run_size <= arena_maxclass)
703			return (PAGE_CEILING(usize));
704		return (CHUNK_CEILING(usize));
705	}
706}
707
708JEMALLOC_INLINE unsigned
709narenas_total_get(void)
710{
711	unsigned narenas;
712
713	malloc_mutex_lock(&arenas_lock);
714	narenas = narenas_total;
715	malloc_mutex_unlock(&arenas_lock);
716
717	return (narenas);
718}
719
720/* Choose an arena based on a per-thread value. */
721JEMALLOC_INLINE arena_t *
722choose_arena(arena_t *arena)
723{
724	arena_t *ret;
725
726	if (arena != NULL)
727		return (arena);
728
729	if ((ret = *arenas_tsd_get()) == NULL) {
730		ret = choose_arena_hard();
731		assert(ret != NULL);
732	}
733
734	return (ret);
735}
736#endif
737
738#include "jemalloc/internal/bitmap.h"
739#include "jemalloc/internal/rtree.h"
740/*
741 * Include arena.h twice in order to resolve circular dependencies with
742 * tcache.h.
743 */
744#define	JEMALLOC_ARENA_INLINE_A
745#include "jemalloc/internal/arena.h"
746#undef JEMALLOC_ARENA_INLINE_A
747#include "jemalloc/internal/tcache.h"
748#define	JEMALLOC_ARENA_INLINE_B
749#include "jemalloc/internal/arena.h"
750#undef JEMALLOC_ARENA_INLINE_B
751#include "jemalloc/internal/hash.h"
752#include "jemalloc/internal/quarantine.h"
753
754#ifndef JEMALLOC_ENABLE_INLINE
755void	*imallocx(size_t size, bool try_tcache, arena_t *arena);
756void	*imalloc(size_t size);
757void	*icallocx(size_t size, bool try_tcache, arena_t *arena);
758void	*icalloc(size_t size);
759void	*ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
760    arena_t *arena);
761void	*ipalloc(size_t usize, size_t alignment, bool zero);
762size_t	isalloc(const void *ptr, bool demote);
763size_t	ivsalloc(const void *ptr, bool demote);
764size_t	u2rz(size_t usize);
765size_t	p2rz(const void *ptr);
766void	idallocx(void *ptr, bool try_tcache);
767void	idalloc(void *ptr);
768void	iqallocx(void *ptr, bool try_tcache);
769void	iqalloc(void *ptr);
770void	*irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
771    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
772    arena_t *arena);
773void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
774    bool zero, bool no_move);
775malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
776#endif
777
778#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
779JEMALLOC_ALWAYS_INLINE void *
780imallocx(size_t size, bool try_tcache, arena_t *arena)
781{
782
783	assert(size != 0);
784
785	if (size <= arena_maxclass)
786		return (arena_malloc(arena, size, false, try_tcache));
787	else
788		return (huge_malloc(size, false));
789}
790
791JEMALLOC_ALWAYS_INLINE void *
792imalloc(size_t size)
793{
794
795	return (imallocx(size, true, NULL));
796}
797
798JEMALLOC_ALWAYS_INLINE void *
799icallocx(size_t size, bool try_tcache, arena_t *arena)
800{
801
802	if (size <= arena_maxclass)
803		return (arena_malloc(arena, size, true, try_tcache));
804	else
805		return (huge_malloc(size, true));
806}
807
808JEMALLOC_ALWAYS_INLINE void *
809icalloc(size_t size)
810{
811
812	return (icallocx(size, true, NULL));
813}
814
815JEMALLOC_ALWAYS_INLINE void *
816ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
817    arena_t *arena)
818{
819	void *ret;
820
821	assert(usize != 0);
822	assert(usize == sa2u(usize, alignment));
823
824	if (usize <= arena_maxclass && alignment <= PAGE)
825		ret = arena_malloc(arena, usize, zero, try_tcache);
826	else {
827		if (usize <= arena_maxclass) {
828			ret = arena_palloc(choose_arena(arena), usize,
829			    alignment, zero);
830		} else if (alignment <= chunksize)
831			ret = huge_malloc(usize, zero);
832		else
833			ret = huge_palloc(usize, alignment, zero);
834	}
835
836	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
837	return (ret);
838}
839
840JEMALLOC_ALWAYS_INLINE void *
841ipalloc(size_t usize, size_t alignment, bool zero)
842{
843
844	return (ipallocx(usize, alignment, zero, true, NULL));
845}
846
847/*
848 * Typical usage:
849 *   void *ptr = [...]
850 *   size_t sz = isalloc(ptr, config_prof);
851 */
852JEMALLOC_ALWAYS_INLINE size_t
853isalloc(const void *ptr, bool demote)
854{
855	size_t ret;
856	arena_chunk_t *chunk;
857
858	assert(ptr != NULL);
859	/* Demotion only makes sense if config_prof is true. */
860	assert(config_prof || demote == false);
861
862	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
863	if (chunk != ptr)
864		ret = arena_salloc(ptr, demote);
865	else
866		ret = huge_salloc(ptr);
867
868	return (ret);
869}
870
871JEMALLOC_ALWAYS_INLINE size_t
872ivsalloc(const void *ptr, bool demote)
873{
874
875	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
876	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
877		return (0);
878
879	return (isalloc(ptr, demote));
880}
881
882JEMALLOC_INLINE size_t
883u2rz(size_t usize)
884{
885	size_t ret;
886
887	if (usize <= SMALL_MAXCLASS) {
888		size_t binind = SMALL_SIZE2BIN(usize);
889		ret = arena_bin_info[binind].redzone_size;
890	} else
891		ret = 0;
892
893	return (ret);
894}
895
896JEMALLOC_INLINE size_t
897p2rz(const void *ptr)
898{
899	size_t usize = isalloc(ptr, false);
900
901	return (u2rz(usize));
902}
903
904JEMALLOC_ALWAYS_INLINE void
905idallocx(void *ptr, bool try_tcache)
906{
907	arena_chunk_t *chunk;
908
909	assert(ptr != NULL);
910
911	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
912	if (chunk != ptr)
913		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
914	else
915		huge_dalloc(ptr, true);
916}
917
918JEMALLOC_ALWAYS_INLINE void
919idalloc(void *ptr)
920{
921
922	idallocx(ptr, true);
923}
924
925JEMALLOC_ALWAYS_INLINE void
926iqallocx(void *ptr, bool try_tcache)
927{
928
929	if (config_fill && opt_quarantine)
930		quarantine(ptr);
931	else
932		idallocx(ptr, try_tcache);
933}
934
935JEMALLOC_ALWAYS_INLINE void
936iqalloc(void *ptr)
937{
938
939	iqallocx(ptr, true);
940}
941
942JEMALLOC_ALWAYS_INLINE void *
943irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
944    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
945{
946	void *ret;
947	size_t oldsize;
948
949	assert(ptr != NULL);
950	assert(size != 0);
951
952	oldsize = isalloc(ptr, config_prof);
953
954	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
955	    != 0) {
956		size_t usize, copysize;
957
958		/*
959		 * Existing object alignment is inadequate; allocate new space
960		 * and copy.
961		 */
962		if (no_move)
963			return (NULL);
964		usize = sa2u(size + extra, alignment);
965		if (usize == 0)
966			return (NULL);
967		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
968		if (ret == NULL) {
969			if (extra == 0)
970				return (NULL);
971			/* Try again, without extra this time. */
972			usize = sa2u(size, alignment);
973			if (usize == 0)
974				return (NULL);
975			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
976			    arena);
977			if (ret == NULL)
978				return (NULL);
979		}
980		/*
981		 * Copy at most size bytes (not size+extra), since the caller
982		 * has no expectation that the extra bytes will be reliably
983		 * preserved.
984		 */
985		copysize = (size < oldsize) ? size : oldsize;
986		memcpy(ret, ptr, copysize);
987		iqallocx(ptr, try_tcache_dalloc);
988		return (ret);
989	}
990
991	if (no_move) {
992		if (size <= arena_maxclass) {
993			return (arena_ralloc_no_move(ptr, oldsize, size,
994			    extra, zero));
995		} else {
996			return (huge_ralloc_no_move(ptr, oldsize, size,
997			    extra));
998		}
999	} else {
1000		if (size + extra <= arena_maxclass) {
1001			return (arena_ralloc(arena, ptr, oldsize, size, extra,
1002			    alignment, zero, try_tcache_alloc,
1003			    try_tcache_dalloc));
1004		} else {
1005			return (huge_ralloc(ptr, oldsize, size, extra,
1006			    alignment, zero, try_tcache_dalloc));
1007		}
1008	}
1009}
1010
1011JEMALLOC_ALWAYS_INLINE void *
1012iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
1013    bool no_move)
1014{
1015
1016	return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
1017	    NULL));
1018}
1019
1020malloc_tsd_externs(thread_allocated, thread_allocated_t)
1021malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1022    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1023#endif
1024
1025#include "jemalloc/internal/prof.h"
1026
1027#undef JEMALLOC_H_INLINES
1028/******************************************************************************/
1029#endif /* JEMALLOC_INTERNAL_H */
1030