1234370Sjasone#define	JEMALLOC_CHUNK_DSS_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone/******************************************************************************/
4234370Sjasone/* Data. */
5234370Sjasone
6242844Sjasoneconst char	*dss_prec_names[] = {
7242844Sjasone	"disabled",
8242844Sjasone	"primary",
9242844Sjasone	"secondary",
10242844Sjasone	"N/A"
11242844Sjasone};
12242844Sjasone
13242844Sjasone/* Current dss precedence default, used when creating new arenas. */
14242844Sjasonestatic dss_prec_t	dss_prec_default = DSS_PREC_DEFAULT;
15242844Sjasone
16234370Sjasone/*
17234370Sjasone * Protects sbrk() calls.  This avoids malloc races among threads, though it
18234370Sjasone * does not protect against races with threads that call sbrk() directly.
19234370Sjasone */
20234370Sjasonestatic malloc_mutex_t	dss_mtx;
21234370Sjasone
22234370Sjasone/* Base address of the DSS. */
23234370Sjasonestatic void		*dss_base;
24234370Sjasone/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25234370Sjasonestatic void		*dss_prev;
26234370Sjasone/* Current upper limit on DSS addresses. */
27234370Sjasonestatic void		*dss_max;
28234370Sjasone
29234370Sjasone/******************************************************************************/
30234370Sjasone
31234370Sjasone#ifndef JEMALLOC_HAVE_SBRK
32234370Sjasonestatic void *
33234370Sjasonesbrk(intptr_t increment)
34234370Sjasone{
35234370Sjasone
36234370Sjasone	not_implemented();
37234370Sjasone
38234370Sjasone	return (NULL);
39234370Sjasone}
40234370Sjasone#endif
41234370Sjasone
42242844Sjasonedss_prec_t
43242844Sjasonechunk_dss_prec_get(void)
44242844Sjasone{
45242844Sjasone	dss_prec_t ret;
46242844Sjasone
47242844Sjasone	if (config_dss == false)
48242844Sjasone		return (dss_prec_disabled);
49242844Sjasone	malloc_mutex_lock(&dss_mtx);
50242844Sjasone	ret = dss_prec_default;
51242844Sjasone	malloc_mutex_unlock(&dss_mtx);
52242844Sjasone	return (ret);
53242844Sjasone}
54242844Sjasone
55242844Sjasonebool
56242844Sjasonechunk_dss_prec_set(dss_prec_t dss_prec)
57242844Sjasone{
58242844Sjasone
59242844Sjasone	if (config_dss == false)
60242844Sjasone		return (true);
61242844Sjasone	malloc_mutex_lock(&dss_mtx);
62242844Sjasone	dss_prec_default = dss_prec;
63242844Sjasone	malloc_mutex_unlock(&dss_mtx);
64242844Sjasone	return (false);
65242844Sjasone}
66242844Sjasone
67234370Sjasonevoid *
68234370Sjasonechunk_alloc_dss(size_t size, size_t alignment, bool *zero)
69234370Sjasone{
70234370Sjasone	void *ret;
71234370Sjasone
72234370Sjasone	cassert(config_dss);
73234370Sjasone	assert(size > 0 && (size & chunksize_mask) == 0);
74234370Sjasone	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
75234370Sjasone
76234370Sjasone	/*
77234370Sjasone	 * sbrk() uses a signed increment argument, so take care not to
78234370Sjasone	 * interpret a huge allocation request as a negative increment.
79234370Sjasone	 */
80234370Sjasone	if ((intptr_t)size < 0)
81234370Sjasone		return (NULL);
82234370Sjasone
83234370Sjasone	malloc_mutex_lock(&dss_mtx);
84234370Sjasone	if (dss_prev != (void *)-1) {
85234370Sjasone		size_t gap_size, cpad_size;
86234370Sjasone		void *cpad, *dss_next;
87234370Sjasone		intptr_t incr;
88234370Sjasone
89234370Sjasone		/*
90234370Sjasone		 * The loop is necessary to recover from races with other
91234370Sjasone		 * threads that are using the DSS for something other than
92234370Sjasone		 * malloc.
93234370Sjasone		 */
94234370Sjasone		do {
95234370Sjasone			/* Get the current end of the DSS. */
96234370Sjasone			dss_max = sbrk(0);
97234370Sjasone			/*
98234370Sjasone			 * Calculate how much padding is necessary to
99234370Sjasone			 * chunk-align the end of the DSS.
100234370Sjasone			 */
101234370Sjasone			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
102234370Sjasone			    chunksize_mask;
103234370Sjasone			/*
104234370Sjasone			 * Compute how much chunk-aligned pad space (if any) is
105234370Sjasone			 * necessary to satisfy alignment.  This space can be
106234370Sjasone			 * recycled for later use.
107234370Sjasone			 */
108234370Sjasone			cpad = (void *)((uintptr_t)dss_max + gap_size);
109234370Sjasone			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
110234370Sjasone			    alignment);
111234370Sjasone			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
112234370Sjasone			dss_next = (void *)((uintptr_t)ret + size);
113234370Sjasone			if ((uintptr_t)ret < (uintptr_t)dss_max ||
114234370Sjasone			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
115234370Sjasone				/* Wrap-around. */
116234370Sjasone				malloc_mutex_unlock(&dss_mtx);
117234370Sjasone				return (NULL);
118234370Sjasone			}
119234370Sjasone			incr = gap_size + cpad_size + size;
120234370Sjasone			dss_prev = sbrk(incr);
121234370Sjasone			if (dss_prev == dss_max) {
122234370Sjasone				/* Success. */
123234370Sjasone				dss_max = dss_next;
124234370Sjasone				malloc_mutex_unlock(&dss_mtx);
125234370Sjasone				if (cpad_size != 0)
126242844Sjasone					chunk_unmap(cpad, cpad_size);
127234569Sjasone				if (*zero) {
128234569Sjasone					VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
129234569Sjasone					memset(ret, 0, size);
130234569Sjasone				}
131234370Sjasone				return (ret);
132234370Sjasone			}
133234370Sjasone		} while (dss_prev != (void *)-1);
134234370Sjasone	}
135234370Sjasone	malloc_mutex_unlock(&dss_mtx);
136234370Sjasone
137234370Sjasone	return (NULL);
138234370Sjasone}
139234370Sjasone
140234370Sjasonebool
141234370Sjasonechunk_in_dss(void *chunk)
142234370Sjasone{
143234370Sjasone	bool ret;
144234370Sjasone
145234370Sjasone	cassert(config_dss);
146234370Sjasone
147234370Sjasone	malloc_mutex_lock(&dss_mtx);
148234370Sjasone	if ((uintptr_t)chunk >= (uintptr_t)dss_base
149234370Sjasone	    && (uintptr_t)chunk < (uintptr_t)dss_max)
150234370Sjasone		ret = true;
151234370Sjasone	else
152234370Sjasone		ret = false;
153234370Sjasone	malloc_mutex_unlock(&dss_mtx);
154234370Sjasone
155234370Sjasone	return (ret);
156234370Sjasone}
157234370Sjasone
158234370Sjasonebool
159234370Sjasonechunk_dss_boot(void)
160234370Sjasone{
161234370Sjasone
162234370Sjasone	cassert(config_dss);
163234370Sjasone
164234370Sjasone	if (malloc_mutex_init(&dss_mtx))
165234370Sjasone		return (true);
166234370Sjasone	dss_base = sbrk(0);
167234370Sjasone	dss_prev = dss_base;
168234370Sjasone	dss_max = dss_base;
169234370Sjasone
170234370Sjasone	return (false);
171234370Sjasone}
172234370Sjasone
173234370Sjasonevoid
174234370Sjasonechunk_dss_prefork(void)
175234370Sjasone{
176234370Sjasone
177234370Sjasone	if (config_dss)
178234370Sjasone		malloc_mutex_prefork(&dss_mtx);
179234370Sjasone}
180234370Sjasone
181234370Sjasonevoid
182234370Sjasonechunk_dss_postfork_parent(void)
183234370Sjasone{
184234370Sjasone
185234370Sjasone	if (config_dss)
186234370Sjasone		malloc_mutex_postfork_parent(&dss_mtx);
187234370Sjasone}
188234370Sjasone
189234370Sjasonevoid
190234370Sjasonechunk_dss_postfork_child(void)
191234370Sjasone{
192234370Sjasone
193234370Sjasone	if (config_dss)
194234370Sjasone		malloc_mutex_postfork_child(&dss_mtx);
195234370Sjasone}
196234370Sjasone
197234370Sjasone/******************************************************************************/
198