1234370Sjasone#define	JEMALLOC_HUGE_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7234370Sjasoneuint64_t	huge_nmalloc;
8234370Sjasoneuint64_t	huge_ndalloc;
9234370Sjasonesize_t		huge_allocated;
10234370Sjasone
11234370Sjasonemalloc_mutex_t	huge_mtx;
12234370Sjasone
13234370Sjasone/******************************************************************************/
14234370Sjasone
15234370Sjasone/* Tree of chunks that are stand-alone huge allocations. */
16234370Sjasonestatic extent_tree_t	huge;
17234370Sjasone
18234370Sjasonevoid *
19234370Sjasonehuge_malloc(size_t size, bool zero)
20234370Sjasone{
21234370Sjasone
22234370Sjasone	return (huge_palloc(size, chunksize, zero));
23234370Sjasone}
24234370Sjasone
25234370Sjasonevoid *
26234370Sjasonehuge_palloc(size_t size, size_t alignment, bool zero)
27234370Sjasone{
28234370Sjasone	void *ret;
29234370Sjasone	size_t csize;
30234370Sjasone	extent_node_t *node;
31234569Sjasone	bool is_zeroed;
32234370Sjasone
33234370Sjasone	/* Allocate one or more contiguous chunks for this request. */
34234370Sjasone
35234370Sjasone	csize = CHUNK_CEILING(size);
36234370Sjasone	if (csize == 0) {
37234370Sjasone		/* size is large enough to cause size_t wrap-around. */
38234370Sjasone		return (NULL);
39234370Sjasone	}
40234370Sjasone
41234370Sjasone	/* Allocate an extent node with which to track the chunk. */
42234370Sjasone	node = base_node_alloc();
43234370Sjasone	if (node == NULL)
44234370Sjasone		return (NULL);
45234370Sjasone
46234569Sjasone	/*
47234569Sjasone	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
48234569Sjasone	 * it is possible to make correct junk/zero fill decisions below.
49234569Sjasone	 */
50234569Sjasone	is_zeroed = zero;
51242844Sjasone	ret = chunk_alloc(csize, alignment, false, &is_zeroed,
52242844Sjasone	    chunk_dss_prec_get());
53234370Sjasone	if (ret == NULL) {
54234370Sjasone		base_node_dealloc(node);
55234370Sjasone		return (NULL);
56234370Sjasone	}
57234370Sjasone
58234370Sjasone	/* Insert node into huge. */
59234370Sjasone	node->addr = ret;
60234370Sjasone	node->size = csize;
61234370Sjasone
62234370Sjasone	malloc_mutex_lock(&huge_mtx);
63234370Sjasone	extent_tree_ad_insert(&huge, node);
64234370Sjasone	if (config_stats) {
65234370Sjasone		stats_cactive_add(csize);
66234370Sjasone		huge_nmalloc++;
67234370Sjasone		huge_allocated += csize;
68234370Sjasone	}
69234370Sjasone	malloc_mutex_unlock(&huge_mtx);
70234370Sjasone
71234370Sjasone	if (config_fill && zero == false) {
72234370Sjasone		if (opt_junk)
73234370Sjasone			memset(ret, 0xa5, csize);
74234569Sjasone		else if (opt_zero && is_zeroed == false)
75234370Sjasone			memset(ret, 0, csize);
76234370Sjasone	}
77234370Sjasone
78234370Sjasone	return (ret);
79234370Sjasone}
80234370Sjasone
81234370Sjasonevoid *
82234370Sjasonehuge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
83234370Sjasone{
84234370Sjasone
85234370Sjasone	/*
86234370Sjasone	 * Avoid moving the allocation if the size class can be left the same.
87234370Sjasone	 */
88234370Sjasone	if (oldsize > arena_maxclass
89234370Sjasone	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
90234370Sjasone	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
91234370Sjasone		assert(CHUNK_CEILING(oldsize) == oldsize);
92234370Sjasone		if (config_fill && opt_junk && size < oldsize) {
93234370Sjasone			memset((void *)((uintptr_t)ptr + size), 0x5a,
94234370Sjasone			    oldsize - size);
95234370Sjasone		}
96234370Sjasone		return (ptr);
97234370Sjasone	}
98234370Sjasone
99234370Sjasone	/* Reallocation would require a move. */
100234370Sjasone	return (NULL);
101234370Sjasone}
102234370Sjasone
103234370Sjasonevoid *
104234370Sjasonehuge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
105242844Sjasone    size_t alignment, bool zero, bool try_tcache_dalloc)
106234370Sjasone{
107234370Sjasone	void *ret;
108234370Sjasone	size_t copysize;
109234370Sjasone
110234370Sjasone	/* Try to avoid moving the allocation. */
111234370Sjasone	ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
112234370Sjasone	if (ret != NULL)
113234370Sjasone		return (ret);
114234370Sjasone
115234370Sjasone	/*
116234370Sjasone	 * size and oldsize are different enough that we need to use a
117234370Sjasone	 * different size class.  In that case, fall back to allocating new
118234370Sjasone	 * space and copying.
119234370Sjasone	 */
120234370Sjasone	if (alignment > chunksize)
121234370Sjasone		ret = huge_palloc(size + extra, alignment, zero);
122234370Sjasone	else
123234370Sjasone		ret = huge_malloc(size + extra, zero);
124234370Sjasone
125234370Sjasone	if (ret == NULL) {
126234370Sjasone		if (extra == 0)
127234370Sjasone			return (NULL);
128234370Sjasone		/* Try again, this time without extra. */
129234370Sjasone		if (alignment > chunksize)
130234370Sjasone			ret = huge_palloc(size, alignment, zero);
131234370Sjasone		else
132234370Sjasone			ret = huge_malloc(size, zero);
133234370Sjasone
134234370Sjasone		if (ret == NULL)
135234370Sjasone			return (NULL);
136234370Sjasone	}
137234370Sjasone
138234370Sjasone	/*
139234370Sjasone	 * Copy at most size bytes (not size+extra), since the caller has no
140234370Sjasone	 * expectation that the extra bytes will be reliably preserved.
141234370Sjasone	 */
142234370Sjasone	copysize = (size < oldsize) ? size : oldsize;
143234370Sjasone
144235238Sjasone#ifdef JEMALLOC_MREMAP
145234370Sjasone	/*
146234370Sjasone	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
147234370Sjasone	 * source nor the destination are in dss.
148234370Sjasone	 */
149234370Sjasone	if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
150234370Sjasone	    == false && chunk_in_dss(ret) == false))) {
151234370Sjasone		size_t newsize = huge_salloc(ret);
152234370Sjasone
153234370Sjasone		/*
154234370Sjasone		 * Remove ptr from the tree of huge allocations before
155234370Sjasone		 * performing the remap operation, in order to avoid the
156234370Sjasone		 * possibility of another thread acquiring that mapping before
157234370Sjasone		 * this one removes it from the tree.
158234370Sjasone		 */
159234370Sjasone		huge_dalloc(ptr, false);
160234370Sjasone		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
161234370Sjasone		    ret) == MAP_FAILED) {
162234370Sjasone			/*
163234370Sjasone			 * Assuming no chunk management bugs in the allocator,
164234370Sjasone			 * the only documented way an error can occur here is
165234370Sjasone			 * if the application changed the map type for a
166234370Sjasone			 * portion of the old allocation.  This is firmly in
167234370Sjasone			 * undefined behavior territory, so write a diagnostic
168234370Sjasone			 * message, and optionally abort.
169234370Sjasone			 */
170234370Sjasone			char buf[BUFERROR_BUF];
171234370Sjasone
172235238Sjasone			buferror(buf, sizeof(buf));
173234370Sjasone			malloc_printf("<jemalloc>: Error in mremap(): %s\n",
174234370Sjasone			    buf);
175234370Sjasone			if (opt_abort)
176234370Sjasone				abort();
177234370Sjasone			memcpy(ret, ptr, copysize);
178234370Sjasone			chunk_dealloc_mmap(ptr, oldsize);
179234370Sjasone		}
180234370Sjasone	} else
181234370Sjasone#endif
182234370Sjasone	{
183234370Sjasone		memcpy(ret, ptr, copysize);
184242844Sjasone		iqallocx(ptr, try_tcache_dalloc);
185234370Sjasone	}
186234370Sjasone	return (ret);
187234370Sjasone}
188234370Sjasone
189234370Sjasonevoid
190234370Sjasonehuge_dalloc(void *ptr, bool unmap)
191234370Sjasone{
192234370Sjasone	extent_node_t *node, key;
193234370Sjasone
194234370Sjasone	malloc_mutex_lock(&huge_mtx);
195234370Sjasone
196234370Sjasone	/* Extract from tree of huge allocations. */
197234370Sjasone	key.addr = ptr;
198234370Sjasone	node = extent_tree_ad_search(&huge, &key);
199234370Sjasone	assert(node != NULL);
200234370Sjasone	assert(node->addr == ptr);
201234370Sjasone	extent_tree_ad_remove(&huge, node);
202234370Sjasone
203234370Sjasone	if (config_stats) {
204234370Sjasone		stats_cactive_sub(node->size);
205234370Sjasone		huge_ndalloc++;
206234370Sjasone		huge_allocated -= node->size;
207234370Sjasone	}
208234370Sjasone
209234370Sjasone	malloc_mutex_unlock(&huge_mtx);
210234370Sjasone
211234370Sjasone	if (unmap && config_fill && config_dss && opt_junk)
212234370Sjasone		memset(node->addr, 0x5a, node->size);
213234370Sjasone
214234370Sjasone	chunk_dealloc(node->addr, node->size, unmap);
215234370Sjasone
216234370Sjasone	base_node_dealloc(node);
217234370Sjasone}
218234370Sjasone
219234370Sjasonesize_t
220234370Sjasonehuge_salloc(const void *ptr)
221234370Sjasone{
222234370Sjasone	size_t ret;
223234370Sjasone	extent_node_t *node, key;
224234370Sjasone
225234370Sjasone	malloc_mutex_lock(&huge_mtx);
226234370Sjasone
227234370Sjasone	/* Extract from tree of huge allocations. */
228234370Sjasone	key.addr = __DECONST(void *, ptr);
229234370Sjasone	node = extent_tree_ad_search(&huge, &key);
230234370Sjasone	assert(node != NULL);
231234370Sjasone
232234370Sjasone	ret = node->size;
233234370Sjasone
234234370Sjasone	malloc_mutex_unlock(&huge_mtx);
235234370Sjasone
236234370Sjasone	return (ret);
237234370Sjasone}
238234370Sjasone
239234370Sjasoneprof_ctx_t *
240234370Sjasonehuge_prof_ctx_get(const void *ptr)
241234370Sjasone{
242234370Sjasone	prof_ctx_t *ret;
243234370Sjasone	extent_node_t *node, key;
244234370Sjasone
245234370Sjasone	malloc_mutex_lock(&huge_mtx);
246234370Sjasone
247234370Sjasone	/* Extract from tree of huge allocations. */
248234370Sjasone	key.addr = __DECONST(void *, ptr);
249234370Sjasone	node = extent_tree_ad_search(&huge, &key);
250234370Sjasone	assert(node != NULL);
251234370Sjasone
252234370Sjasone	ret = node->prof_ctx;
253234370Sjasone
254234370Sjasone	malloc_mutex_unlock(&huge_mtx);
255234370Sjasone
256234370Sjasone	return (ret);
257234370Sjasone}
258234370Sjasone
259234370Sjasonevoid
260234370Sjasonehuge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
261234370Sjasone{
262234370Sjasone	extent_node_t *node, key;
263234370Sjasone
264234370Sjasone	malloc_mutex_lock(&huge_mtx);
265234370Sjasone
266234370Sjasone	/* Extract from tree of huge allocations. */
267234370Sjasone	key.addr = __DECONST(void *, ptr);
268234370Sjasone	node = extent_tree_ad_search(&huge, &key);
269234370Sjasone	assert(node != NULL);
270234370Sjasone
271234370Sjasone	node->prof_ctx = ctx;
272234370Sjasone
273234370Sjasone	malloc_mutex_unlock(&huge_mtx);
274234370Sjasone}
275234370Sjasone
276234370Sjasonebool
277234370Sjasonehuge_boot(void)
278234370Sjasone{
279234370Sjasone
280234370Sjasone	/* Initialize chunks data. */
281234370Sjasone	if (malloc_mutex_init(&huge_mtx))
282234370Sjasone		return (true);
283234370Sjasone	extent_tree_ad_new(&huge);
284234370Sjasone
285234370Sjasone	if (config_stats) {
286234370Sjasone		huge_nmalloc = 0;
287234370Sjasone		huge_ndalloc = 0;
288234370Sjasone		huge_allocated = 0;
289234370Sjasone	}
290234370Sjasone
291234370Sjasone	return (false);
292234370Sjasone}
293234370Sjasone
294234370Sjasonevoid
295234370Sjasonehuge_prefork(void)
296234370Sjasone{
297234370Sjasone
298234370Sjasone	malloc_mutex_prefork(&huge_mtx);
299234370Sjasone}
300234370Sjasone
301234370Sjasonevoid
302234370Sjasonehuge_postfork_parent(void)
303234370Sjasone{
304234370Sjasone
305234370Sjasone	malloc_mutex_postfork_parent(&huge_mtx);
306234370Sjasone}
307234370Sjasone
308234370Sjasonevoid
309234370Sjasonehuge_postfork_child(void)
310234370Sjasone{
311234370Sjasone
312234370Sjasone	malloc_mutex_postfork_child(&huge_mtx);
313234370Sjasone}
314