jemalloc_internal.h revision 234543
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include "libc_private.h"
4#include "namespace.h"
5
6#include <sys/mman.h>
7#include <sys/param.h>
8#include <sys/syscall.h>
9#if !defined(SYS_write) && defined(__NR_write)
10#define	SYS_write __NR_write
11#endif
12#include <sys/time.h>
13#include <sys/types.h>
14#include <sys/uio.h>
15
16#include <errno.h>
17#include <limits.h>
18#ifndef SIZE_T_MAX
19#  define SIZE_T_MAX	SIZE_MAX
20#endif
21#include <pthread.h>
22#include <sched.h>
23#include <stdarg.h>
24#include <stdbool.h>
25#include <stdio.h>
26#include <stdlib.h>
27#include <stdint.h>
28#include <stddef.h>
29#ifndef offsetof
30#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
31#endif
32#include <inttypes.h>
33#include <string.h>
34#include <strings.h>
35#include <ctype.h>
36#include <unistd.h>
37#include <fcntl.h>
38#include <pthread.h>
39#include <math.h>
40
41#include "un-namespace.h"
42#include "libc_private.h"
43
44#define	JEMALLOC_NO_DEMANGLE
45#include "../jemalloc.h"
46
47#ifdef JEMALLOC_UTRACE
48#include <sys/ktrace.h>
49#endif
50
51#ifdef JEMALLOC_VALGRIND
52#include <valgrind/valgrind.h>
53#include <valgrind/memcheck.h>
54#endif
55
56#include "jemalloc/internal/private_namespace.h"
57
58#ifdef JEMALLOC_CC_SILENCE
59#define	UNUSED JEMALLOC_ATTR(unused)
60#else
61#define	UNUSED
62#endif
63
64static const bool config_debug =
65#ifdef JEMALLOC_DEBUG
66    true
67#else
68    false
69#endif
70    ;
71static const bool config_dss =
72#ifdef JEMALLOC_DSS
73    true
74#else
75    false
76#endif
77    ;
78static const bool config_fill =
79#ifdef JEMALLOC_FILL
80    true
81#else
82    false
83#endif
84    ;
85static const bool config_lazy_lock =
86#ifdef JEMALLOC_LAZY_LOCK
87    true
88#else
89    false
90#endif
91    ;
92static const bool config_prof =
93#ifdef JEMALLOC_PROF
94    true
95#else
96    false
97#endif
98    ;
99static const bool config_prof_libgcc =
100#ifdef JEMALLOC_PROF_LIBGCC
101    true
102#else
103    false
104#endif
105    ;
106static const bool config_prof_libunwind =
107#ifdef JEMALLOC_PROF_LIBUNWIND
108    true
109#else
110    false
111#endif
112    ;
113static const bool config_munmap =
114#ifdef JEMALLOC_MUNMAP
115    true
116#else
117    false
118#endif
119    ;
120static const bool config_stats =
121#ifdef JEMALLOC_STATS
122    true
123#else
124    false
125#endif
126    ;
127static const bool config_tcache =
128#ifdef JEMALLOC_TCACHE
129    true
130#else
131    false
132#endif
133    ;
134static const bool config_tls =
135#ifdef JEMALLOC_TLS
136    true
137#else
138    false
139#endif
140    ;
141static const bool config_utrace =
142#ifdef JEMALLOC_UTRACE
143    true
144#else
145    false
146#endif
147    ;
148static const bool config_valgrind =
149#ifdef JEMALLOC_VALGRIND
150    true
151#else
152    false
153#endif
154    ;
155static const bool config_xmalloc =
156#ifdef JEMALLOC_XMALLOC
157    true
158#else
159    false
160#endif
161    ;
162static const bool config_ivsalloc =
163#ifdef JEMALLOC_IVSALLOC
164    true
165#else
166    false
167#endif
168    ;
169
170#ifdef JEMALLOC_ATOMIC9
171#include <machine/atomic.h>
172#endif
173
174#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
175#include <libkern/OSAtomic.h>
176#endif
177
178#ifdef JEMALLOC_ZONE
179#include <mach/mach_error.h>
180#include <mach/mach_init.h>
181#include <mach/vm_map.h>
182#include <malloc/malloc.h>
183#endif
184
185#define	RB_COMPACT
186#include "jemalloc/internal/rb.h"
187#include "jemalloc/internal/qr.h"
188#include "jemalloc/internal/ql.h"
189
190/*
191 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
192 * but there are circular dependencies that cannot be broken without
193 * substantial performance degradation.  In order to reduce the effect on
194 * visual code flow, read the header files in multiple passes, with one of the
195 * following cpp variables defined during each pass:
196 *
197 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
198 *                        types.
199 *   JEMALLOC_H_STRUCTS : Data structures.
200 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
201 *   JEMALLOC_H_INLINES : Inline functions.
202 */
203/******************************************************************************/
204#define JEMALLOC_H_TYPES
205
206#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
207
208#define	ZU(z)	((size_t)z)
209
210#ifndef __DECONST
211#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
212#endif
213
214#ifdef JEMALLOC_DEBUG
215   /* Disable inlining to make debugging easier. */
216#  define JEMALLOC_INLINE
217#  define inline
218#else
219#  define JEMALLOC_ENABLE_INLINE
220#  define JEMALLOC_INLINE static inline
221#endif
222
223/* Smallest size class to support. */
224#define	LG_TINY_MIN		3
225#define	TINY_MIN		(1U << LG_TINY_MIN)
226
227/*
228 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
229 * classes).
230 */
231#ifndef LG_QUANTUM
232#  ifdef __i386__
233#    define LG_QUANTUM		4
234#  endif
235#  ifdef __ia64__
236#    define LG_QUANTUM		4
237#  endif
238#  ifdef __alpha__
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __sparc64__
242#    define LG_QUANTUM		4
243#  endif
244#  if (defined(__amd64__) || defined(__x86_64__))
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __arm__
248#    define LG_QUANTUM		3
249#  endif
250#  ifdef __mips__
251#    define LG_QUANTUM		3
252#  endif
253#  ifdef __powerpc__
254#    define LG_QUANTUM		4
255#  endif
256#  ifdef __s390x__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __SH4__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __tile__
263#    define LG_QUANTUM		4
264#  endif
265#  ifndef LG_QUANTUM
266#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
267#  endif
268#endif
269
270#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
271#define	QUANTUM_MASK		(QUANTUM - 1)
272
273/* Return the smallest quantum multiple that is >= a. */
274#define	QUANTUM_CEILING(a)						\
275	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
276
277#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
278#define	LONG_MASK		(LONG - 1)
279
280/* Return the smallest long multiple that is >= a. */
281#define	LONG_CEILING(a)							\
282	(((a) + LONG_MASK) & ~LONG_MASK)
283
284#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
285#define	PTR_MASK		(SIZEOF_PTR - 1)
286
287/* Return the smallest (void *) multiple that is >= a. */
288#define	PTR_CEILING(a)							\
289	(((a) + PTR_MASK) & ~PTR_MASK)
290
291/*
292 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
293 * In addition, this controls the spacing of cacheline-spaced size classes.
294 */
295#define	LG_CACHELINE		6
296#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
297#define	CACHELINE_MASK		(CACHELINE - 1)
298
299/* Return the smallest cacheline multiple that is >= s. */
300#define	CACHELINE_CEILING(s)						\
301	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
302
303/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
304#ifdef PAGE_MASK
305#  undef PAGE_MASK
306#endif
307#define	LG_PAGE		STATIC_PAGE_SHIFT
308#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
309#define	PAGE_MASK	((size_t)(PAGE - 1))
310
311/* Return the smallest pagesize multiple that is >= s. */
312#define	PAGE_CEILING(s)							\
313	(((s) + PAGE_MASK) & ~PAGE_MASK)
314
315/* Return the nearest aligned address at or below a. */
316#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
317	((void *)((uintptr_t)(a) & (-(alignment))))
318
319/* Return the offset between a and the nearest aligned address at or below a. */
320#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
321	((size_t)((uintptr_t)(a) & (alignment - 1)))
322
323/* Return the smallest alignment multiple that is >= s. */
324#define	ALIGNMENT_CEILING(s, alignment)					\
325	(((s) + (alignment - 1)) & (-(alignment)))
326
327#ifdef JEMALLOC_VALGRIND
328/*
329 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
330 * so that when Valgrind reports errors, there are no extra stack frames
331 * in the backtraces.
332 *
333 * The size that is reported to valgrind must be consistent through a chain of
334 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
335 * jemalloc, so it is critical that all callers of these macros provide usize
336 * rather than request size.  As a result, buffer overflow detection is
337 * technically weakened for the standard API, though it is generally accepted
338 * practice to consider any extra bytes reported by malloc_usable_size() as
339 * usable space.
340 */
341#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
342	if (config_valgrind && opt_valgrind && cond)			\
343		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
344} while (0)
345#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
346    old_rzsize, zero)  do {						\
347	if (config_valgrind && opt_valgrind) {				\
348		size_t rzsize = p2rz(ptr);				\
349									\
350		if (ptr == old_ptr) {					\
351			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
352			    usize, rzsize);				\
353			if (zero && old_usize < usize) {		\
354				VALGRIND_MAKE_MEM_DEFINED(		\
355				    (void *)((uintptr_t)ptr +		\
356				    old_usize), usize - old_usize);	\
357			}						\
358		} else {						\
359			if (old_ptr != NULL) {				\
360				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
361				    old_rzsize);			\
362			}						\
363			if (ptr != NULL) {				\
364				size_t copy_size = (old_usize < usize)	\
365				    ?  old_usize : usize;		\
366				size_t tail_size = usize - copy_size;	\
367				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
368				    rzsize, false);			\
369				if (copy_size > 0) {			\
370					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
371					    copy_size);			\
372				}					\
373				if (zero && tail_size > 0) {		\
374					VALGRIND_MAKE_MEM_DEFINED(	\
375					    (void *)((uintptr_t)ptr +	\
376					    copy_size), tail_size);	\
377				}					\
378			}						\
379		}							\
380	}								\
381} while (0)
382#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
383	if (config_valgrind && opt_valgrind)				\
384		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
385} while (0)
386#else
387#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
388#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
389#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
390#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
391#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
392#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
393#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
394    old_rzsize, zero)
395#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
396#endif
397
398#include "jemalloc/internal/util.h"
399#include "jemalloc/internal/atomic.h"
400#include "jemalloc/internal/prng.h"
401#include "jemalloc/internal/ckh.h"
402#include "jemalloc/internal/size_classes.h"
403#include "jemalloc/internal/stats.h"
404#include "jemalloc/internal/ctl.h"
405#include "jemalloc/internal/mutex.h"
406#include "jemalloc/internal/tsd.h"
407#include "jemalloc/internal/mb.h"
408#include "jemalloc/internal/extent.h"
409#include "jemalloc/internal/arena.h"
410#include "jemalloc/internal/bitmap.h"
411#include "jemalloc/internal/base.h"
412#include "jemalloc/internal/chunk.h"
413#include "jemalloc/internal/huge.h"
414#include "jemalloc/internal/rtree.h"
415#include "jemalloc/internal/tcache.h"
416#include "jemalloc/internal/hash.h"
417#include "jemalloc/internal/quarantine.h"
418#include "jemalloc/internal/prof.h"
419
420#undef JEMALLOC_H_TYPES
421/******************************************************************************/
422#define JEMALLOC_H_STRUCTS
423
424#include "jemalloc/internal/util.h"
425#include "jemalloc/internal/atomic.h"
426#include "jemalloc/internal/prng.h"
427#include "jemalloc/internal/ckh.h"
428#include "jemalloc/internal/size_classes.h"
429#include "jemalloc/internal/stats.h"
430#include "jemalloc/internal/ctl.h"
431#include "jemalloc/internal/mutex.h"
432#include "jemalloc/internal/tsd.h"
433#include "jemalloc/internal/mb.h"
434#include "jemalloc/internal/bitmap.h"
435#include "jemalloc/internal/extent.h"
436#include "jemalloc/internal/arena.h"
437#include "jemalloc/internal/base.h"
438#include "jemalloc/internal/chunk.h"
439#include "jemalloc/internal/huge.h"
440#include "jemalloc/internal/rtree.h"
441#include "jemalloc/internal/tcache.h"
442#include "jemalloc/internal/hash.h"
443#include "jemalloc/internal/quarantine.h"
444#include "jemalloc/internal/prof.h"
445
446typedef struct {
447	uint64_t	allocated;
448	uint64_t	deallocated;
449} thread_allocated_t;
450/*
451 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
452 * argument.
453 */
454#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
455
456#undef JEMALLOC_H_STRUCTS
457/******************************************************************************/
458#define JEMALLOC_H_EXTERNS
459
460extern bool	opt_abort;
461extern bool	opt_junk;
462extern size_t	opt_quarantine;
463extern bool	opt_redzone;
464extern bool	opt_utrace;
465extern bool	opt_valgrind;
466extern bool	opt_xmalloc;
467extern bool	opt_zero;
468extern size_t	opt_narenas;
469
470/* Number of CPUs. */
471extern unsigned		ncpus;
472
473extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
474/*
475 * Arenas that are used to service external requests.  Not all elements of the
476 * arenas array are necessarily used; arenas are created lazily as needed.
477 */
478extern arena_t		**arenas;
479extern unsigned		narenas;
480
481arena_t	*arenas_extend(unsigned ind);
482void	arenas_cleanup(void *arg);
483arena_t	*choose_arena_hard(void);
484void	jemalloc_prefork(void);
485void	jemalloc_postfork_parent(void);
486void	jemalloc_postfork_child(void);
487
488#include "jemalloc/internal/util.h"
489#include "jemalloc/internal/atomic.h"
490#include "jemalloc/internal/prng.h"
491#include "jemalloc/internal/ckh.h"
492#include "jemalloc/internal/size_classes.h"
493#include "jemalloc/internal/stats.h"
494#include "jemalloc/internal/ctl.h"
495#include "jemalloc/internal/mutex.h"
496#include "jemalloc/internal/tsd.h"
497#include "jemalloc/internal/mb.h"
498#include "jemalloc/internal/bitmap.h"
499#include "jemalloc/internal/extent.h"
500#include "jemalloc/internal/arena.h"
501#include "jemalloc/internal/base.h"
502#include "jemalloc/internal/chunk.h"
503#include "jemalloc/internal/huge.h"
504#include "jemalloc/internal/rtree.h"
505#include "jemalloc/internal/tcache.h"
506#include "jemalloc/internal/hash.h"
507#include "jemalloc/internal/quarantine.h"
508#include "jemalloc/internal/prof.h"
509
510#undef JEMALLOC_H_EXTERNS
511/******************************************************************************/
512#define JEMALLOC_H_INLINES
513
514#include "jemalloc/internal/util.h"
515#include "jemalloc/internal/atomic.h"
516#include "jemalloc/internal/prng.h"
517#include "jemalloc/internal/ckh.h"
518#include "jemalloc/internal/size_classes.h"
519#include "jemalloc/internal/stats.h"
520#include "jemalloc/internal/ctl.h"
521#include "jemalloc/internal/mutex.h"
522#include "jemalloc/internal/tsd.h"
523#include "jemalloc/internal/mb.h"
524#include "jemalloc/internal/extent.h"
525#include "jemalloc/internal/base.h"
526#include "jemalloc/internal/chunk.h"
527#include "jemalloc/internal/huge.h"
528
529#ifndef JEMALLOC_ENABLE_INLINE
530malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
531
532size_t	s2u(size_t size);
533size_t	sa2u(size_t size, size_t alignment);
534arena_t	*choose_arena(arena_t *arena);
535#endif
536
537#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
538/*
539 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
540 * for allocations.
541 */
542malloc_tsd_externs(arenas, arena_t *)
543malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
544
545/*
546 * Compute usable size that would result from allocating an object with the
547 * specified size.
548 */
549JEMALLOC_INLINE size_t
550s2u(size_t size)
551{
552
553	if (size <= SMALL_MAXCLASS)
554		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
555	if (size <= arena_maxclass)
556		return (PAGE_CEILING(size));
557	return (CHUNK_CEILING(size));
558}
559
560/*
561 * Compute usable size that would result from allocating an object with the
562 * specified size and alignment.
563 */
564JEMALLOC_INLINE size_t
565sa2u(size_t size, size_t alignment)
566{
567	size_t usize;
568
569	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
570
571	/*
572	 * Round size up to the nearest multiple of alignment.
573	 *
574	 * This done, we can take advantage of the fact that for each small
575	 * size class, every object is aligned at the smallest power of two
576	 * that is non-zero in the base two representation of the size.  For
577	 * example:
578	 *
579	 *   Size |   Base 2 | Minimum alignment
580	 *   -----+----------+------------------
581	 *     96 |  1100000 |  32
582	 *    144 | 10100000 |  32
583	 *    192 | 11000000 |  64
584	 */
585	usize = ALIGNMENT_CEILING(size, alignment);
586	/*
587	 * (usize < size) protects against the combination of maximal
588	 * alignment and size greater than maximal alignment.
589	 */
590	if (usize < size) {
591		/* size_t overflow. */
592		return (0);
593	}
594
595	if (usize <= arena_maxclass && alignment <= PAGE) {
596		if (usize <= SMALL_MAXCLASS)
597			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
598		return (PAGE_CEILING(usize));
599	} else {
600		size_t run_size;
601
602		/*
603		 * We can't achieve subpage alignment, so round up alignment
604		 * permanently; it makes later calculations simpler.
605		 */
606		alignment = PAGE_CEILING(alignment);
607		usize = PAGE_CEILING(size);
608		/*
609		 * (usize < size) protects against very large sizes within
610		 * PAGE of SIZE_T_MAX.
611		 *
612		 * (usize + alignment < usize) protects against the
613		 * combination of maximal alignment and usize large enough
614		 * to cause overflow.  This is similar to the first overflow
615		 * check above, but it needs to be repeated due to the new
616		 * usize value, which may now be *equal* to maximal
617		 * alignment, whereas before we only detected overflow if the
618		 * original size was *greater* than maximal alignment.
619		 */
620		if (usize < size || usize + alignment < usize) {
621			/* size_t overflow. */
622			return (0);
623		}
624
625		/*
626		 * Calculate the size of the over-size run that arena_palloc()
627		 * would need to allocate in order to guarantee the alignment.
628		 * If the run wouldn't fit within a chunk, round up to a huge
629		 * allocation size.
630		 */
631		run_size = usize + alignment - PAGE;
632		if (run_size <= arena_maxclass)
633			return (PAGE_CEILING(usize));
634		return (CHUNK_CEILING(usize));
635	}
636}
637
638/* Choose an arena based on a per-thread value. */
639JEMALLOC_INLINE arena_t *
640choose_arena(arena_t *arena)
641{
642	arena_t *ret;
643
644	if (arena != NULL)
645		return (arena);
646
647	if ((ret = *arenas_tsd_get()) == NULL) {
648		ret = choose_arena_hard();
649		assert(ret != NULL);
650	}
651
652	return (ret);
653}
654#endif
655
656#include "jemalloc/internal/bitmap.h"
657#include "jemalloc/internal/rtree.h"
658#include "jemalloc/internal/tcache.h"
659#include "jemalloc/internal/arena.h"
660#include "jemalloc/internal/hash.h"
661#include "jemalloc/internal/quarantine.h"
662
663#ifndef JEMALLOC_ENABLE_INLINE
664void	*imalloc(size_t size);
665void	*icalloc(size_t size);
666void	*ipalloc(size_t usize, size_t alignment, bool zero);
667size_t	isalloc(const void *ptr, bool demote);
668size_t	ivsalloc(const void *ptr, bool demote);
669size_t	u2rz(size_t usize);
670size_t	p2rz(const void *ptr);
671void	idalloc(void *ptr);
672void	iqalloc(void *ptr);
673void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
674    bool zero, bool no_move);
675malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
676#endif
677
678#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
679JEMALLOC_INLINE void *
680imalloc(size_t size)
681{
682
683	assert(size != 0);
684
685	if (size <= arena_maxclass)
686		return (arena_malloc(NULL, size, false, true));
687	else
688		return (huge_malloc(size, false));
689}
690
691JEMALLOC_INLINE void *
692icalloc(size_t size)
693{
694
695	if (size <= arena_maxclass)
696		return (arena_malloc(NULL, size, true, true));
697	else
698		return (huge_malloc(size, true));
699}
700
701JEMALLOC_INLINE void *
702ipalloc(size_t usize, size_t alignment, bool zero)
703{
704	void *ret;
705
706	assert(usize != 0);
707	assert(usize == sa2u(usize, alignment));
708
709	if (usize <= arena_maxclass && alignment <= PAGE)
710		ret = arena_malloc(NULL, usize, zero, true);
711	else {
712		if (usize <= arena_maxclass) {
713			ret = arena_palloc(choose_arena(NULL), usize, alignment,
714			    zero);
715		} else if (alignment <= chunksize)
716			ret = huge_malloc(usize, zero);
717		else
718			ret = huge_palloc(usize, alignment, zero);
719	}
720
721	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
722	return (ret);
723}
724
725/*
726 * Typical usage:
727 *   void *ptr = [...]
728 *   size_t sz = isalloc(ptr, config_prof);
729 */
730JEMALLOC_INLINE size_t
731isalloc(const void *ptr, bool demote)
732{
733	size_t ret;
734	arena_chunk_t *chunk;
735
736	assert(ptr != NULL);
737	/* Demotion only makes sense if config_prof is true. */
738	assert(config_prof || demote == false);
739
740	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
741	if (chunk != ptr)
742		ret = arena_salloc(ptr, demote);
743	else
744		ret = huge_salloc(ptr);
745
746	return (ret);
747}
748
749JEMALLOC_INLINE size_t
750ivsalloc(const void *ptr, bool demote)
751{
752
753	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
754	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
755		return (0);
756
757	return (isalloc(ptr, demote));
758}
759
760JEMALLOC_INLINE size_t
761u2rz(size_t usize)
762{
763	size_t ret;
764
765	if (usize <= SMALL_MAXCLASS) {
766		size_t binind = SMALL_SIZE2BIN(usize);
767		ret = arena_bin_info[binind].redzone_size;
768	} else
769		ret = 0;
770
771	return (ret);
772}
773
774JEMALLOC_INLINE size_t
775p2rz(const void *ptr)
776{
777	size_t usize = isalloc(ptr, false);
778
779	return (u2rz(usize));
780}
781
782JEMALLOC_INLINE void
783idalloc(void *ptr)
784{
785	arena_chunk_t *chunk;
786
787	assert(ptr != NULL);
788
789	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
790	if (chunk != ptr)
791		arena_dalloc(chunk->arena, chunk, ptr, true);
792	else
793		huge_dalloc(ptr, true);
794}
795
796JEMALLOC_INLINE void
797iqalloc(void *ptr)
798{
799
800	if (config_fill && opt_quarantine)
801		quarantine(ptr);
802	else
803		idalloc(ptr);
804}
805
806JEMALLOC_INLINE void *
807iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
808    bool no_move)
809{
810	void *ret;
811	size_t oldsize;
812
813	assert(ptr != NULL);
814	assert(size != 0);
815
816	oldsize = isalloc(ptr, config_prof);
817
818	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
819	    != 0) {
820		size_t usize, copysize;
821
822		/*
823		 * Existing object alignment is inadequate; allocate new space
824		 * and copy.
825		 */
826		if (no_move)
827			return (NULL);
828		usize = sa2u(size + extra, alignment);
829		if (usize == 0)
830			return (NULL);
831		ret = ipalloc(usize, alignment, zero);
832		if (ret == NULL) {
833			if (extra == 0)
834				return (NULL);
835			/* Try again, without extra this time. */
836			usize = sa2u(size, alignment);
837			if (usize == 0)
838				return (NULL);
839			ret = ipalloc(usize, alignment, zero);
840			if (ret == NULL)
841				return (NULL);
842		}
843		/*
844		 * Copy at most size bytes (not size+extra), since the caller
845		 * has no expectation that the extra bytes will be reliably
846		 * preserved.
847		 */
848		copysize = (size < oldsize) ? size : oldsize;
849		memcpy(ret, ptr, copysize);
850		iqalloc(ptr);
851		return (ret);
852	}
853
854	if (no_move) {
855		if (size <= arena_maxclass) {
856			return (arena_ralloc_no_move(ptr, oldsize, size,
857			    extra, zero));
858		} else {
859			return (huge_ralloc_no_move(ptr, oldsize, size,
860			    extra));
861		}
862	} else {
863		if (size + extra <= arena_maxclass) {
864			return (arena_ralloc(ptr, oldsize, size, extra,
865			    alignment, zero, true));
866		} else {
867			return (huge_ralloc(ptr, oldsize, size, extra,
868			    alignment, zero));
869		}
870	}
871}
872
873malloc_tsd_externs(thread_allocated, thread_allocated_t)
874malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
875    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
876#endif
877
878#include "jemalloc/internal/prof.h"
879
880#undef JEMALLOC_H_INLINES
881/******************************************************************************/
882#endif /* JEMALLOC_INTERNAL_H */
883