jemalloc_internal.h revision 234370
1#include "libc_private.h"
2#include "namespace.h"
3
4#include <sys/mman.h>
5#include <sys/param.h>
6#include <sys/syscall.h>
7#if !defined(SYS_write) && defined(__NR_write)
8#define	SYS_write __NR_write
9#endif
10#include <sys/time.h>
11#include <sys/types.h>
12#include <sys/uio.h>
13
14#include <errno.h>
15#include <limits.h>
16#ifndef SIZE_T_MAX
17#  define SIZE_T_MAX	SIZE_MAX
18#endif
19#include <pthread.h>
20#include <sched.h>
21#include <stdarg.h>
22#include <stdbool.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stdint.h>
26#include <stddef.h>
27#ifndef offsetof
28#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
29#endif
30#include <inttypes.h>
31#include <string.h>
32#include <strings.h>
33#include <ctype.h>
34#include <unistd.h>
35#include <fcntl.h>
36#include <pthread.h>
37#include <math.h>
38
39#include "un-namespace.h"
40#include "libc_private.h"
41
42#define	JEMALLOC_NO_DEMANGLE
43#include "../jemalloc.h"
44
45#ifdef JEMALLOC_UTRACE
46#include <sys/ktrace.h>
47#endif
48
49#ifdef JEMALLOC_VALGRIND
50#include <valgrind/valgrind.h>
51#include <valgrind/memcheck.h>
52#endif
53
54#include "jemalloc/internal/private_namespace.h"
55
56#ifdef JEMALLOC_CC_SILENCE
57#define	UNUSED JEMALLOC_ATTR(unused)
58#else
59#define	UNUSED
60#endif
61
62static const bool config_debug =
63#ifdef JEMALLOC_DEBUG
64    true
65#else
66    false
67#endif
68    ;
69static const bool config_dss =
70#ifdef JEMALLOC_DSS
71    true
72#else
73    false
74#endif
75    ;
76static const bool config_fill =
77#ifdef JEMALLOC_FILL
78    true
79#else
80    false
81#endif
82    ;
83static const bool config_lazy_lock =
84#ifdef JEMALLOC_LAZY_LOCK
85    true
86#else
87    false
88#endif
89    ;
90static const bool config_prof =
91#ifdef JEMALLOC_PROF
92    true
93#else
94    false
95#endif
96    ;
97static const bool config_prof_libgcc =
98#ifdef JEMALLOC_PROF_LIBGCC
99    true
100#else
101    false
102#endif
103    ;
104static const bool config_prof_libunwind =
105#ifdef JEMALLOC_PROF_LIBUNWIND
106    true
107#else
108    false
109#endif
110    ;
111static const bool config_munmap =
112#ifdef JEMALLOC_MUNMAP
113    true
114#else
115    false
116#endif
117    ;
118static const bool config_stats =
119#ifdef JEMALLOC_STATS
120    true
121#else
122    false
123#endif
124    ;
125static const bool config_tcache =
126#ifdef JEMALLOC_TCACHE
127    true
128#else
129    false
130#endif
131    ;
132static const bool config_tls =
133#ifdef JEMALLOC_TLS
134    true
135#else
136    false
137#endif
138    ;
139static const bool config_utrace =
140#ifdef JEMALLOC_UTRACE
141    true
142#else
143    false
144#endif
145    ;
146static const bool config_valgrind =
147#ifdef JEMALLOC_VALGRIND
148    true
149#else
150    false
151#endif
152    ;
153static const bool config_xmalloc =
154#ifdef JEMALLOC_XMALLOC
155    true
156#else
157    false
158#endif
159    ;
160static const bool config_ivsalloc =
161#ifdef JEMALLOC_IVSALLOC
162    true
163#else
164    false
165#endif
166    ;
167
168#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
169#include <libkern/OSAtomic.h>
170#endif
171
172#ifdef JEMALLOC_ZONE
173#include <mach/mach_error.h>
174#include <mach/mach_init.h>
175#include <mach/vm_map.h>
176#include <malloc/malloc.h>
177#endif
178
179#define	RB_COMPACT
180#include "jemalloc/internal/rb.h"
181#include "jemalloc/internal/qr.h"
182#include "jemalloc/internal/ql.h"
183
184/*
185 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
186 * but there are circular dependencies that cannot be broken without
187 * substantial performance degradation.  In order to reduce the effect on
188 * visual code flow, read the header files in multiple passes, with one of the
189 * following cpp variables defined during each pass:
190 *
191 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
192 *                        types.
193 *   JEMALLOC_H_STRUCTS : Data structures.
194 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
195 *   JEMALLOC_H_INLINES : Inline functions.
196 */
197/******************************************************************************/
198#define JEMALLOC_H_TYPES
199
200#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
201
202#define	ZU(z)	((size_t)z)
203
204#ifndef __DECONST
205#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
206#endif
207
208#ifdef JEMALLOC_DEBUG
209   /* Disable inlining to make debugging easier. */
210#  define JEMALLOC_INLINE
211#  define inline
212#else
213#  define JEMALLOC_ENABLE_INLINE
214#  define JEMALLOC_INLINE static inline
215#endif
216
217/* Smallest size class to support. */
218#define	LG_TINY_MIN		3
219#define	TINY_MIN		(1U << LG_TINY_MIN)
220
221/*
222 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
223 * classes).
224 */
225#ifndef LG_QUANTUM
226#  ifdef __i386__
227#    define LG_QUANTUM		4
228#  endif
229#  ifdef __ia64__
230#    define LG_QUANTUM		4
231#  endif
232#  ifdef __alpha__
233#    define LG_QUANTUM		4
234#  endif
235#  ifdef __sparc64__
236#    define LG_QUANTUM		4
237#  endif
238#  if (defined(__amd64__) || defined(__x86_64__))
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __arm__
242#    define LG_QUANTUM		3
243#  endif
244#  ifdef __mips__
245#    define LG_QUANTUM		3
246#  endif
247#  ifdef __powerpc__
248#    define LG_QUANTUM		4
249#  endif
250#  ifdef __s390x__
251#    define LG_QUANTUM		4
252#  endif
253#  ifdef __SH4__
254#    define LG_QUANTUM		4
255#  endif
256#  ifdef __tile__
257#    define LG_QUANTUM		4
258#  endif
259#  ifndef LG_QUANTUM
260#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
261#  endif
262#endif
263
264#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
265#define	QUANTUM_MASK		(QUANTUM - 1)
266
267/* Return the smallest quantum multiple that is >= a. */
268#define	QUANTUM_CEILING(a)						\
269	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
270
271#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
272#define	LONG_MASK		(LONG - 1)
273
274/* Return the smallest long multiple that is >= a. */
275#define	LONG_CEILING(a)							\
276	(((a) + LONG_MASK) & ~LONG_MASK)
277
278#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
279#define	PTR_MASK		(SIZEOF_PTR - 1)
280
281/* Return the smallest (void *) multiple that is >= a. */
282#define	PTR_CEILING(a)							\
283	(((a) + PTR_MASK) & ~PTR_MASK)
284
285/*
286 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
287 * In addition, this controls the spacing of cacheline-spaced size classes.
288 */
289#define	LG_CACHELINE		6
290#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
291#define	CACHELINE_MASK		(CACHELINE - 1)
292
293/* Return the smallest cacheline multiple that is >= s. */
294#define	CACHELINE_CEILING(s)						\
295	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
296
297/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
298#ifdef PAGE_MASK
299#  undef PAGE_MASK
300#endif
301#define	LG_PAGE		STATIC_PAGE_SHIFT
302#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
303#define	PAGE_MASK	((size_t)(PAGE - 1))
304
305/* Return the smallest pagesize multiple that is >= s. */
306#define	PAGE_CEILING(s)							\
307	(((s) + PAGE_MASK) & ~PAGE_MASK)
308
309/* Return the nearest aligned address at or below a. */
310#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
311	((void *)((uintptr_t)(a) & (-(alignment))))
312
313/* Return the offset between a and the nearest aligned address at or below a. */
314#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
315	((size_t)((uintptr_t)(a) & (alignment - 1)))
316
317/* Return the smallest alignment multiple that is >= s. */
318#define	ALIGNMENT_CEILING(s, alignment)					\
319	(((s) + (alignment - 1)) & (-(alignment)))
320
321#ifdef JEMALLOC_VALGRIND
322/*
323 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
324 * so that when Valgrind reports errors, there are no extra stack frames
325 * in the backtraces.
326 *
327 * The size that is reported to valgrind must be consistent through a chain of
328 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
329 * jemalloc, so it is critical that all callers of these macros provide usize
330 * rather than request size.  As a result, buffer overflow detection is
331 * technically weakened for the standard API, though it is generally accepted
332 * practice to consider any extra bytes reported by malloc_usable_size() as
333 * usable space.
334 */
335#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
336	if (config_valgrind && opt_valgrind && cond)			\
337		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
338} while (0)
339#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
340    old_rzsize, zero)  do {						\
341	if (config_valgrind && opt_valgrind) {				\
342		size_t rzsize = p2rz(ptr);				\
343									\
344		if (ptr == old_ptr) {					\
345			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
346			    usize, rzsize);				\
347			if (zero && old_usize < usize) {		\
348				VALGRIND_MAKE_MEM_DEFINED(		\
349				    (void *)((uintptr_t)ptr +		\
350				    old_usize), usize - old_usize);	\
351			}						\
352		} else {						\
353			if (old_ptr != NULL) {				\
354				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
355				    old_rzsize);			\
356			}						\
357			if (ptr != NULL) {				\
358				size_t copy_size = (old_usize < usize)	\
359				    ?  old_usize : usize;		\
360				size_t tail_size = usize - copy_size;	\
361				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
362				    rzsize, false);			\
363				if (copy_size > 0) {			\
364					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
365					    copy_size);			\
366				}					\
367				if (zero && tail_size > 0) {		\
368					VALGRIND_MAKE_MEM_DEFINED(	\
369					    (void *)((uintptr_t)ptr +	\
370					    copy_size), tail_size);	\
371				}					\
372			}						\
373		}							\
374	}								\
375} while (0)
376#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
377	if (config_valgrind && opt_valgrind)				\
378		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
379} while (0)
380#else
381#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
382#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
383#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
384#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
385#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
386#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
387#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
388    old_rzsize, zero)
389#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
390#endif
391
392#include "jemalloc/internal/util.h"
393#include "jemalloc/internal/atomic.h"
394#include "jemalloc/internal/prng.h"
395#include "jemalloc/internal/ckh.h"
396#include "jemalloc/internal/size_classes.h"
397#include "jemalloc/internal/stats.h"
398#include "jemalloc/internal/ctl.h"
399#include "jemalloc/internal/mutex.h"
400#include "jemalloc/internal/tsd.h"
401#include "jemalloc/internal/mb.h"
402#include "jemalloc/internal/extent.h"
403#include "jemalloc/internal/arena.h"
404#include "jemalloc/internal/bitmap.h"
405#include "jemalloc/internal/base.h"
406#include "jemalloc/internal/chunk.h"
407#include "jemalloc/internal/huge.h"
408#include "jemalloc/internal/rtree.h"
409#include "jemalloc/internal/tcache.h"
410#include "jemalloc/internal/hash.h"
411#include "jemalloc/internal/quarantine.h"
412#include "jemalloc/internal/prof.h"
413
414#undef JEMALLOC_H_TYPES
415/******************************************************************************/
416#define JEMALLOC_H_STRUCTS
417
418#include "jemalloc/internal/util.h"
419#include "jemalloc/internal/atomic.h"
420#include "jemalloc/internal/prng.h"
421#include "jemalloc/internal/ckh.h"
422#include "jemalloc/internal/size_classes.h"
423#include "jemalloc/internal/stats.h"
424#include "jemalloc/internal/ctl.h"
425#include "jemalloc/internal/mutex.h"
426#include "jemalloc/internal/tsd.h"
427#include "jemalloc/internal/mb.h"
428#include "jemalloc/internal/bitmap.h"
429#include "jemalloc/internal/extent.h"
430#include "jemalloc/internal/arena.h"
431#include "jemalloc/internal/base.h"
432#include "jemalloc/internal/chunk.h"
433#include "jemalloc/internal/huge.h"
434#include "jemalloc/internal/rtree.h"
435#include "jemalloc/internal/tcache.h"
436#include "jemalloc/internal/hash.h"
437#include "jemalloc/internal/quarantine.h"
438#include "jemalloc/internal/prof.h"
439
440typedef struct {
441	uint64_t	allocated;
442	uint64_t	deallocated;
443} thread_allocated_t;
444/*
445 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
446 * argument.
447 */
448#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
449
450#undef JEMALLOC_H_STRUCTS
451/******************************************************************************/
452#define JEMALLOC_H_EXTERNS
453
454extern bool	opt_abort;
455extern bool	opt_junk;
456extern size_t	opt_quarantine;
457extern bool	opt_redzone;
458extern bool	opt_utrace;
459extern bool	opt_valgrind;
460extern bool	opt_xmalloc;
461extern bool	opt_zero;
462extern size_t	opt_narenas;
463
464/* Number of CPUs. */
465extern unsigned		ncpus;
466
467extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
468/*
469 * Arenas that are used to service external requests.  Not all elements of the
470 * arenas array are necessarily used; arenas are created lazily as needed.
471 */
472extern arena_t		**arenas;
473extern unsigned		narenas;
474
475arena_t	*arenas_extend(unsigned ind);
476void	arenas_cleanup(void *arg);
477arena_t	*choose_arena_hard(void);
478void	jemalloc_prefork(void);
479void	jemalloc_postfork_parent(void);
480void	jemalloc_postfork_child(void);
481
482#include "jemalloc/internal/util.h"
483#include "jemalloc/internal/atomic.h"
484#include "jemalloc/internal/prng.h"
485#include "jemalloc/internal/ckh.h"
486#include "jemalloc/internal/size_classes.h"
487#include "jemalloc/internal/stats.h"
488#include "jemalloc/internal/ctl.h"
489#include "jemalloc/internal/mutex.h"
490#include "jemalloc/internal/tsd.h"
491#include "jemalloc/internal/mb.h"
492#include "jemalloc/internal/bitmap.h"
493#include "jemalloc/internal/extent.h"
494#include "jemalloc/internal/arena.h"
495#include "jemalloc/internal/base.h"
496#include "jemalloc/internal/chunk.h"
497#include "jemalloc/internal/huge.h"
498#include "jemalloc/internal/rtree.h"
499#include "jemalloc/internal/tcache.h"
500#include "jemalloc/internal/hash.h"
501#include "jemalloc/internal/quarantine.h"
502#include "jemalloc/internal/prof.h"
503
504#undef JEMALLOC_H_EXTERNS
505/******************************************************************************/
506#define JEMALLOC_H_INLINES
507
508#include "jemalloc/internal/util.h"
509#include "jemalloc/internal/atomic.h"
510#include "jemalloc/internal/prng.h"
511#include "jemalloc/internal/ckh.h"
512#include "jemalloc/internal/size_classes.h"
513#include "jemalloc/internal/stats.h"
514#include "jemalloc/internal/ctl.h"
515#include "jemalloc/internal/mutex.h"
516#include "jemalloc/internal/tsd.h"
517#include "jemalloc/internal/mb.h"
518#include "jemalloc/internal/extent.h"
519#include "jemalloc/internal/base.h"
520#include "jemalloc/internal/chunk.h"
521#include "jemalloc/internal/huge.h"
522
523#ifndef JEMALLOC_ENABLE_INLINE
524malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
525
526size_t	s2u(size_t size);
527size_t	sa2u(size_t size, size_t alignment);
528arena_t	*choose_arena(arena_t *arena);
529#endif
530
531#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
532/*
533 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
534 * for allocations.
535 */
536malloc_tsd_externs(arenas, arena_t *)
537malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
538
539/*
540 * Compute usable size that would result from allocating an object with the
541 * specified size.
542 */
543JEMALLOC_INLINE size_t
544s2u(size_t size)
545{
546
547	if (size <= SMALL_MAXCLASS)
548		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
549	if (size <= arena_maxclass)
550		return (PAGE_CEILING(size));
551	return (CHUNK_CEILING(size));
552}
553
554/*
555 * Compute usable size that would result from allocating an object with the
556 * specified size and alignment.
557 */
558JEMALLOC_INLINE size_t
559sa2u(size_t size, size_t alignment)
560{
561	size_t usize;
562
563	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
564
565	/*
566	 * Round size up to the nearest multiple of alignment.
567	 *
568	 * This done, we can take advantage of the fact that for each small
569	 * size class, every object is aligned at the smallest power of two
570	 * that is non-zero in the base two representation of the size.  For
571	 * example:
572	 *
573	 *   Size |   Base 2 | Minimum alignment
574	 *   -----+----------+------------------
575	 *     96 |  1100000 |  32
576	 *    144 | 10100000 |  32
577	 *    192 | 11000000 |  64
578	 */
579	usize = ALIGNMENT_CEILING(size, alignment);
580	/*
581	 * (usize < size) protects against the combination of maximal
582	 * alignment and size greater than maximal alignment.
583	 */
584	if (usize < size) {
585		/* size_t overflow. */
586		return (0);
587	}
588
589	if (usize <= arena_maxclass && alignment <= PAGE) {
590		if (usize <= SMALL_MAXCLASS)
591			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
592		return (PAGE_CEILING(usize));
593	} else {
594		size_t run_size;
595
596		/*
597		 * We can't achieve subpage alignment, so round up alignment
598		 * permanently; it makes later calculations simpler.
599		 */
600		alignment = PAGE_CEILING(alignment);
601		usize = PAGE_CEILING(size);
602		/*
603		 * (usize < size) protects against very large sizes within
604		 * PAGE of SIZE_T_MAX.
605		 *
606		 * (usize + alignment < usize) protects against the
607		 * combination of maximal alignment and usize large enough
608		 * to cause overflow.  This is similar to the first overflow
609		 * check above, but it needs to be repeated due to the new
610		 * usize value, which may now be *equal* to maximal
611		 * alignment, whereas before we only detected overflow if the
612		 * original size was *greater* than maximal alignment.
613		 */
614		if (usize < size || usize + alignment < usize) {
615			/* size_t overflow. */
616			return (0);
617		}
618
619		/*
620		 * Calculate the size of the over-size run that arena_palloc()
621		 * would need to allocate in order to guarantee the alignment.
622		 * If the run wouldn't fit within a chunk, round up to a huge
623		 * allocation size.
624		 */
625		run_size = usize + alignment - PAGE;
626		if (run_size <= arena_maxclass)
627			return (PAGE_CEILING(usize));
628		return (CHUNK_CEILING(usize));
629	}
630}
631
632/* Choose an arena based on a per-thread value. */
633JEMALLOC_INLINE arena_t *
634choose_arena(arena_t *arena)
635{
636	arena_t *ret;
637
638	if (arena != NULL)
639		return (arena);
640
641	if ((ret = *arenas_tsd_get()) == NULL) {
642		ret = choose_arena_hard();
643		assert(ret != NULL);
644	}
645
646	return (ret);
647}
648#endif
649
650#include "jemalloc/internal/bitmap.h"
651#include "jemalloc/internal/rtree.h"
652#include "jemalloc/internal/tcache.h"
653#include "jemalloc/internal/arena.h"
654#include "jemalloc/internal/hash.h"
655#include "jemalloc/internal/quarantine.h"
656
657#ifndef JEMALLOC_ENABLE_INLINE
658void	*imalloc(size_t size);
659void	*icalloc(size_t size);
660void	*ipalloc(size_t usize, size_t alignment, bool zero);
661size_t	isalloc(const void *ptr, bool demote);
662size_t	ivsalloc(const void *ptr, bool demote);
663size_t	u2rz(size_t usize);
664size_t	p2rz(const void *ptr);
665void	idalloc(void *ptr);
666void	iqalloc(void *ptr);
667void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
668    bool zero, bool no_move);
669malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
670#endif
671
672#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
673JEMALLOC_INLINE void *
674imalloc(size_t size)
675{
676
677	assert(size != 0);
678
679	if (size <= arena_maxclass)
680		return (arena_malloc(NULL, size, false, true));
681	else
682		return (huge_malloc(size, false));
683}
684
685JEMALLOC_INLINE void *
686icalloc(size_t size)
687{
688
689	if (size <= arena_maxclass)
690		return (arena_malloc(NULL, size, true, true));
691	else
692		return (huge_malloc(size, true));
693}
694
695JEMALLOC_INLINE void *
696ipalloc(size_t usize, size_t alignment, bool zero)
697{
698	void *ret;
699
700	assert(usize != 0);
701	assert(usize == sa2u(usize, alignment));
702
703	if (usize <= arena_maxclass && alignment <= PAGE)
704		ret = arena_malloc(NULL, usize, zero, true);
705	else {
706		if (usize <= arena_maxclass) {
707			ret = arena_palloc(choose_arena(NULL), usize, alignment,
708			    zero);
709		} else if (alignment <= chunksize)
710			ret = huge_malloc(usize, zero);
711		else
712			ret = huge_palloc(usize, alignment, zero);
713	}
714
715	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
716	return (ret);
717}
718
719/*
720 * Typical usage:
721 *   void *ptr = [...]
722 *   size_t sz = isalloc(ptr, config_prof);
723 */
724JEMALLOC_INLINE size_t
725isalloc(const void *ptr, bool demote)
726{
727	size_t ret;
728	arena_chunk_t *chunk;
729
730	assert(ptr != NULL);
731	/* Demotion only makes sense if config_prof is true. */
732	assert(config_prof || demote == false);
733
734	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
735	if (chunk != ptr) {
736		/* Region. */
737		ret = arena_salloc(ptr, demote);
738	} else
739		ret = huge_salloc(ptr);
740
741	return (ret);
742}
743
744JEMALLOC_INLINE size_t
745ivsalloc(const void *ptr, bool demote)
746{
747
748	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
749	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
750		return (0);
751
752	return (isalloc(ptr, demote));
753}
754
755JEMALLOC_INLINE size_t
756u2rz(size_t usize)
757{
758	size_t ret;
759
760	if (usize <= SMALL_MAXCLASS) {
761		size_t binind = SMALL_SIZE2BIN(usize);
762		ret = arena_bin_info[binind].redzone_size;
763	} else
764		ret = 0;
765
766	return (ret);
767}
768
769JEMALLOC_INLINE size_t
770p2rz(const void *ptr)
771{
772	size_t usize = isalloc(ptr, false);
773
774	return (u2rz(usize));
775}
776
777JEMALLOC_INLINE void
778idalloc(void *ptr)
779{
780	arena_chunk_t *chunk;
781
782	assert(ptr != NULL);
783
784	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
785	if (chunk != ptr)
786		arena_dalloc(chunk->arena, chunk, ptr, true);
787	else
788		huge_dalloc(ptr, true);
789}
790
791JEMALLOC_INLINE void
792iqalloc(void *ptr)
793{
794
795	if (config_fill && opt_quarantine)
796		quarantine(ptr);
797	else
798		idalloc(ptr);
799}
800
801JEMALLOC_INLINE void *
802iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
803    bool no_move)
804{
805	void *ret;
806	size_t oldsize;
807
808	assert(ptr != NULL);
809	assert(size != 0);
810
811	oldsize = isalloc(ptr, config_prof);
812
813	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
814	    != 0) {
815		size_t usize, copysize;
816
817		/*
818		 * Existing object alignment is inadequate; allocate new space
819		 * and copy.
820		 */
821		if (no_move)
822			return (NULL);
823		usize = sa2u(size + extra, alignment);
824		if (usize == 0)
825			return (NULL);
826		ret = ipalloc(usize, alignment, zero);
827		if (ret == NULL) {
828			if (extra == 0)
829				return (NULL);
830			/* Try again, without extra this time. */
831			usize = sa2u(size, alignment);
832			if (usize == 0)
833				return (NULL);
834			ret = ipalloc(usize, alignment, zero);
835			if (ret == NULL)
836				return (NULL);
837		}
838		/*
839		 * Copy at most size bytes (not size+extra), since the caller
840		 * has no expectation that the extra bytes will be reliably
841		 * preserved.
842		 */
843		copysize = (size < oldsize) ? size : oldsize;
844		memcpy(ret, ptr, copysize);
845		iqalloc(ptr);
846		return (ret);
847	}
848
849	if (no_move) {
850		if (size <= arena_maxclass) {
851			return (arena_ralloc_no_move(ptr, oldsize, size,
852			    extra, zero));
853		} else {
854			return (huge_ralloc_no_move(ptr, oldsize, size,
855			    extra));
856		}
857	} else {
858		if (size + extra <= arena_maxclass) {
859			return (arena_ralloc(ptr, oldsize, size, extra,
860			    alignment, zero, true));
861		} else {
862			return (huge_ralloc(ptr, oldsize, size, extra,
863			    alignment, zero));
864		}
865	}
866}
867
868malloc_tsd_externs(thread_allocated, thread_allocated_t)
869malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
870    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
871#endif
872
873#include "jemalloc/internal/prof.h"
874
875#undef JEMALLOC_H_INLINES
876/******************************************************************************/
877