1234370Sjasone#define	JEMALLOC_TCACHE_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7234370Sjasonemalloc_tsd_data(, tcache, tcache_t *, NULL)
8234370Sjasonemalloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
9234370Sjasone
10234370Sjasonebool	opt_tcache = true;
11234370Sjasonessize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
12234370Sjasone
13234370Sjasonetcache_bin_info_t	*tcache_bin_info;
14234370Sjasonestatic unsigned		stack_nelms; /* Total stack elms per tcache. */
15234370Sjasone
16234370Sjasonesize_t			nhbins;
17234370Sjasonesize_t			tcache_maxclass;
18234370Sjasone
19234370Sjasone/******************************************************************************/
20234370Sjasone
21234543Sjasonesize_t	tcache_salloc(const void *ptr)
22234543Sjasone{
23234543Sjasone
24234543Sjasone	return (arena_salloc(ptr, false));
25234543Sjasone}
26234543Sjasone
27235238Sjasonevoid
28235238Sjasonetcache_event_hard(tcache_t *tcache)
29235238Sjasone{
30235238Sjasone	size_t binind = tcache->next_gc_bin;
31235238Sjasone	tcache_bin_t *tbin = &tcache->tbins[binind];
32235238Sjasone	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
33235238Sjasone
34235238Sjasone	if (tbin->low_water > 0) {
35235238Sjasone		/*
36235238Sjasone		 * Flush (ceiling) 3/4 of the objects below the low water mark.
37235238Sjasone		 */
38235238Sjasone		if (binind < NBINS) {
39235238Sjasone			tcache_bin_flush_small(tbin, binind, tbin->ncached -
40235238Sjasone			    tbin->low_water + (tbin->low_water >> 2), tcache);
41235238Sjasone		} else {
42235238Sjasone			tcache_bin_flush_large(tbin, binind, tbin->ncached -
43235238Sjasone			    tbin->low_water + (tbin->low_water >> 2), tcache);
44235238Sjasone		}
45235238Sjasone		/*
46235238Sjasone		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
47235238Sjasone		 * fill count is always at least 1.
48235238Sjasone		 */
49235238Sjasone		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
50235238Sjasone			tbin->lg_fill_div++;
51235238Sjasone	} else if (tbin->low_water < 0) {
52235238Sjasone		/*
53235238Sjasone		 * Increase fill count by 2X.  Make sure lg_fill_div stays
54235238Sjasone		 * greater than 0.
55235238Sjasone		 */
56235238Sjasone		if (tbin->lg_fill_div > 1)
57235238Sjasone			tbin->lg_fill_div--;
58235238Sjasone	}
59235238Sjasone	tbin->low_water = tbin->ncached;
60235238Sjasone
61235238Sjasone	tcache->next_gc_bin++;
62235238Sjasone	if (tcache->next_gc_bin == nhbins)
63235238Sjasone		tcache->next_gc_bin = 0;
64235238Sjasone	tcache->ev_cnt = 0;
65235238Sjasone}
66235238Sjasone
67234370Sjasonevoid *
68234370Sjasonetcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
69234370Sjasone{
70234370Sjasone	void *ret;
71234370Sjasone
72234370Sjasone	arena_tcache_fill_small(tcache->arena, tbin, binind,
73234370Sjasone	    config_prof ? tcache->prof_accumbytes : 0);
74234370Sjasone	if (config_prof)
75234370Sjasone		tcache->prof_accumbytes = 0;
76234370Sjasone	ret = tcache_alloc_easy(tbin);
77234370Sjasone
78234370Sjasone	return (ret);
79234370Sjasone}
80234370Sjasone
81234370Sjasonevoid
82234370Sjasonetcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
83234370Sjasone    tcache_t *tcache)
84234370Sjasone{
85234370Sjasone	void *ptr;
86234370Sjasone	unsigned i, nflush, ndeferred;
87234370Sjasone	bool merged_stats = false;
88234370Sjasone
89234370Sjasone	assert(binind < NBINS);
90234370Sjasone	assert(rem <= tbin->ncached);
91234370Sjasone
92234370Sjasone	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93234370Sjasone		/* Lock the arena bin associated with the first object. */
94234370Sjasone		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
95234370Sjasone		    tbin->avail[0]);
96234370Sjasone		arena_t *arena = chunk->arena;
97234370Sjasone		arena_bin_t *bin = &arena->bins[binind];
98234370Sjasone
99234370Sjasone		if (config_prof && arena == tcache->arena) {
100251300Sjasone			if (arena_prof_accum(arena, tcache->prof_accumbytes))
101251300Sjasone				prof_idump();
102234370Sjasone			tcache->prof_accumbytes = 0;
103234370Sjasone		}
104234370Sjasone
105234370Sjasone		malloc_mutex_lock(&bin->lock);
106234370Sjasone		if (config_stats && arena == tcache->arena) {
107234370Sjasone			assert(merged_stats == false);
108234370Sjasone			merged_stats = true;
109234370Sjasone			bin->stats.nflushes++;
110234370Sjasone			bin->stats.nrequests += tbin->tstats.nrequests;
111234370Sjasone			tbin->tstats.nrequests = 0;
112234370Sjasone		}
113234370Sjasone		ndeferred = 0;
114234370Sjasone		for (i = 0; i < nflush; i++) {
115234370Sjasone			ptr = tbin->avail[i];
116234370Sjasone			assert(ptr != NULL);
117234370Sjasone			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
118234370Sjasone			if (chunk->arena == arena) {
119234370Sjasone				size_t pageind = ((uintptr_t)ptr -
120234370Sjasone				    (uintptr_t)chunk) >> LG_PAGE;
121234370Sjasone				arena_chunk_map_t *mapelm =
122235238Sjasone				    arena_mapp_get(chunk, pageind);
123234370Sjasone				if (config_fill && opt_junk) {
124234370Sjasone					arena_alloc_junk_small(ptr,
125234370Sjasone					    &arena_bin_info[binind], true);
126234370Sjasone				}
127235238Sjasone				arena_dalloc_bin_locked(arena, chunk, ptr,
128235238Sjasone				    mapelm);
129234370Sjasone			} else {
130234370Sjasone				/*
131234370Sjasone				 * This object was allocated via a different
132234370Sjasone				 * arena bin than the one that is currently
133234370Sjasone				 * locked.  Stash the object, so that it can be
134234370Sjasone				 * handled in a future pass.
135234370Sjasone				 */
136234370Sjasone				tbin->avail[ndeferred] = ptr;
137234370Sjasone				ndeferred++;
138234370Sjasone			}
139234370Sjasone		}
140234370Sjasone		malloc_mutex_unlock(&bin->lock);
141234370Sjasone	}
142234370Sjasone	if (config_stats && merged_stats == false) {
143234370Sjasone		/*
144234370Sjasone		 * The flush loop didn't happen to flush to this thread's
145234370Sjasone		 * arena, so the stats didn't get merged.  Manually do so now.
146234370Sjasone		 */
147234370Sjasone		arena_bin_t *bin = &tcache->arena->bins[binind];
148234370Sjasone		malloc_mutex_lock(&bin->lock);
149234370Sjasone		bin->stats.nflushes++;
150234370Sjasone		bin->stats.nrequests += tbin->tstats.nrequests;
151234370Sjasone		tbin->tstats.nrequests = 0;
152234370Sjasone		malloc_mutex_unlock(&bin->lock);
153234370Sjasone	}
154234370Sjasone
155234370Sjasone	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
156234370Sjasone	    rem * sizeof(void *));
157234370Sjasone	tbin->ncached = rem;
158234370Sjasone	if ((int)tbin->ncached < tbin->low_water)
159234370Sjasone		tbin->low_water = tbin->ncached;
160234370Sjasone}
161234370Sjasone
162234370Sjasonevoid
163234370Sjasonetcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
164234370Sjasone    tcache_t *tcache)
165234370Sjasone{
166234370Sjasone	void *ptr;
167234370Sjasone	unsigned i, nflush, ndeferred;
168234370Sjasone	bool merged_stats = false;
169234370Sjasone
170234370Sjasone	assert(binind < nhbins);
171234370Sjasone	assert(rem <= tbin->ncached);
172234370Sjasone
173234370Sjasone	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
174234370Sjasone		/* Lock the arena associated with the first object. */
175234370Sjasone		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
176234370Sjasone		    tbin->avail[0]);
177234370Sjasone		arena_t *arena = chunk->arena;
178251300Sjasone		UNUSED bool idump;
179234370Sjasone
180251300Sjasone		if (config_prof)
181251300Sjasone			idump = false;
182234370Sjasone		malloc_mutex_lock(&arena->lock);
183234370Sjasone		if ((config_prof || config_stats) && arena == tcache->arena) {
184234370Sjasone			if (config_prof) {
185251300Sjasone				idump = arena_prof_accum_locked(arena,
186234370Sjasone				    tcache->prof_accumbytes);
187234370Sjasone				tcache->prof_accumbytes = 0;
188234370Sjasone			}
189234370Sjasone			if (config_stats) {
190234370Sjasone				merged_stats = true;
191234370Sjasone				arena->stats.nrequests_large +=
192234370Sjasone				    tbin->tstats.nrequests;
193234370Sjasone				arena->stats.lstats[binind - NBINS].nrequests +=
194234370Sjasone				    tbin->tstats.nrequests;
195234370Sjasone				tbin->tstats.nrequests = 0;
196234370Sjasone			}
197234370Sjasone		}
198234370Sjasone		ndeferred = 0;
199234370Sjasone		for (i = 0; i < nflush; i++) {
200234370Sjasone			ptr = tbin->avail[i];
201234370Sjasone			assert(ptr != NULL);
202234370Sjasone			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
203234370Sjasone			if (chunk->arena == arena)
204235238Sjasone				arena_dalloc_large_locked(arena, chunk, ptr);
205234370Sjasone			else {
206234370Sjasone				/*
207234370Sjasone				 * This object was allocated via a different
208234370Sjasone				 * arena than the one that is currently locked.
209234370Sjasone				 * Stash the object, so that it can be handled
210234370Sjasone				 * in a future pass.
211234370Sjasone				 */
212234370Sjasone				tbin->avail[ndeferred] = ptr;
213234370Sjasone				ndeferred++;
214234370Sjasone			}
215234370Sjasone		}
216234370Sjasone		malloc_mutex_unlock(&arena->lock);
217251300Sjasone		if (config_prof && idump)
218251300Sjasone			prof_idump();
219234370Sjasone	}
220234370Sjasone	if (config_stats && merged_stats == false) {
221234370Sjasone		/*
222234370Sjasone		 * The flush loop didn't happen to flush to this thread's
223234370Sjasone		 * arena, so the stats didn't get merged.  Manually do so now.
224234370Sjasone		 */
225234370Sjasone		arena_t *arena = tcache->arena;
226234370Sjasone		malloc_mutex_lock(&arena->lock);
227234370Sjasone		arena->stats.nrequests_large += tbin->tstats.nrequests;
228234370Sjasone		arena->stats.lstats[binind - NBINS].nrequests +=
229234370Sjasone		    tbin->tstats.nrequests;
230234370Sjasone		tbin->tstats.nrequests = 0;
231234370Sjasone		malloc_mutex_unlock(&arena->lock);
232234370Sjasone	}
233234370Sjasone
234234370Sjasone	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
235234370Sjasone	    rem * sizeof(void *));
236234370Sjasone	tbin->ncached = rem;
237234370Sjasone	if ((int)tbin->ncached < tbin->low_water)
238234370Sjasone		tbin->low_water = tbin->ncached;
239234370Sjasone}
240234370Sjasone
241234370Sjasonevoid
242234370Sjasonetcache_arena_associate(tcache_t *tcache, arena_t *arena)
243234370Sjasone{
244234370Sjasone
245234370Sjasone	if (config_stats) {
246234370Sjasone		/* Link into list of extant tcaches. */
247234370Sjasone		malloc_mutex_lock(&arena->lock);
248234370Sjasone		ql_elm_new(tcache, link);
249234370Sjasone		ql_tail_insert(&arena->tcache_ql, tcache, link);
250234370Sjasone		malloc_mutex_unlock(&arena->lock);
251234370Sjasone	}
252234370Sjasone	tcache->arena = arena;
253234370Sjasone}
254234370Sjasone
255234370Sjasonevoid
256234370Sjasonetcache_arena_dissociate(tcache_t *tcache)
257234370Sjasone{
258234370Sjasone
259234370Sjasone	if (config_stats) {
260234370Sjasone		/* Unlink from list of extant tcaches. */
261234370Sjasone		malloc_mutex_lock(&tcache->arena->lock);
262234370Sjasone		ql_remove(&tcache->arena->tcache_ql, tcache, link);
263234370Sjasone		malloc_mutex_unlock(&tcache->arena->lock);
264234370Sjasone		tcache_stats_merge(tcache, tcache->arena);
265234370Sjasone	}
266234370Sjasone}
267234370Sjasone
268234370Sjasonetcache_t *
269234370Sjasonetcache_create(arena_t *arena)
270234370Sjasone{
271234370Sjasone	tcache_t *tcache;
272234370Sjasone	size_t size, stack_offset;
273234370Sjasone	unsigned i;
274234370Sjasone
275234370Sjasone	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
276234370Sjasone	/* Naturally align the pointer stacks. */
277234370Sjasone	size = PTR_CEILING(size);
278234370Sjasone	stack_offset = size;
279234370Sjasone	size += stack_nelms * sizeof(void *);
280234370Sjasone	/*
281234370Sjasone	 * Round up to the nearest multiple of the cacheline size, in order to
282234370Sjasone	 * avoid the possibility of false cacheline sharing.
283234370Sjasone	 *
284234370Sjasone	 * That this works relies on the same logic as in ipalloc(), but we
285234370Sjasone	 * cannot directly call ipalloc() here due to tcache bootstrapping
286234370Sjasone	 * issues.
287234370Sjasone	 */
288234370Sjasone	size = (size + CACHELINE_MASK) & (-CACHELINE);
289234370Sjasone
290234370Sjasone	if (size <= SMALL_MAXCLASS)
291234370Sjasone		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
292234370Sjasone	else if (size <= tcache_maxclass)
293234370Sjasone		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
294234370Sjasone	else
295242844Sjasone		tcache = (tcache_t *)icallocx(size, false, arena);
296234370Sjasone
297234370Sjasone	if (tcache == NULL)
298234370Sjasone		return (NULL);
299234370Sjasone
300234370Sjasone	tcache_arena_associate(tcache, arena);
301234370Sjasone
302234370Sjasone	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
303234370Sjasone	for (i = 0; i < nhbins; i++) {
304234370Sjasone		tcache->tbins[i].lg_fill_div = 1;
305234370Sjasone		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
306234370Sjasone		    (uintptr_t)stack_offset);
307234370Sjasone		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
308234370Sjasone	}
309234370Sjasone
310234370Sjasone	tcache_tsd_set(&tcache);
311234370Sjasone
312234370Sjasone	return (tcache);
313234370Sjasone}
314234370Sjasone
315234370Sjasonevoid
316234370Sjasonetcache_destroy(tcache_t *tcache)
317234370Sjasone{
318234370Sjasone	unsigned i;
319234370Sjasone	size_t tcache_size;
320234370Sjasone
321234370Sjasone	tcache_arena_dissociate(tcache);
322234370Sjasone
323234370Sjasone	for (i = 0; i < NBINS; i++) {
324234370Sjasone		tcache_bin_t *tbin = &tcache->tbins[i];
325234370Sjasone		tcache_bin_flush_small(tbin, i, 0, tcache);
326234370Sjasone
327234370Sjasone		if (config_stats && tbin->tstats.nrequests != 0) {
328234370Sjasone			arena_t *arena = tcache->arena;
329234370Sjasone			arena_bin_t *bin = &arena->bins[i];
330234370Sjasone			malloc_mutex_lock(&bin->lock);
331234370Sjasone			bin->stats.nrequests += tbin->tstats.nrequests;
332234370Sjasone			malloc_mutex_unlock(&bin->lock);
333234370Sjasone		}
334234370Sjasone	}
335234370Sjasone
336234370Sjasone	for (; i < nhbins; i++) {
337234370Sjasone		tcache_bin_t *tbin = &tcache->tbins[i];
338234370Sjasone		tcache_bin_flush_large(tbin, i, 0, tcache);
339234370Sjasone
340234370Sjasone		if (config_stats && tbin->tstats.nrequests != 0) {
341234370Sjasone			arena_t *arena = tcache->arena;
342234370Sjasone			malloc_mutex_lock(&arena->lock);
343234370Sjasone			arena->stats.nrequests_large += tbin->tstats.nrequests;
344234370Sjasone			arena->stats.lstats[i - NBINS].nrequests +=
345234370Sjasone			    tbin->tstats.nrequests;
346234370Sjasone			malloc_mutex_unlock(&arena->lock);
347234370Sjasone		}
348234370Sjasone	}
349234370Sjasone
350251300Sjasone	if (config_prof && tcache->prof_accumbytes > 0 &&
351251300Sjasone	    arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
352251300Sjasone		prof_idump();
353234370Sjasone
354234370Sjasone	tcache_size = arena_salloc(tcache, false);
355234370Sjasone	if (tcache_size <= SMALL_MAXCLASS) {
356234370Sjasone		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
357234370Sjasone		arena_t *arena = chunk->arena;
358234370Sjasone		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
359234370Sjasone		    LG_PAGE;
360235238Sjasone		arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
361234370Sjasone
362235238Sjasone		arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
363234370Sjasone	} else if (tcache_size <= tcache_maxclass) {
364234370Sjasone		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
365234370Sjasone		arena_t *arena = chunk->arena;
366234370Sjasone
367234370Sjasone		arena_dalloc_large(arena, chunk, tcache);
368234370Sjasone	} else
369242844Sjasone		idallocx(tcache, false);
370234370Sjasone}
371234370Sjasone
372234370Sjasonevoid
373234370Sjasonetcache_thread_cleanup(void *arg)
374234370Sjasone{
375234370Sjasone	tcache_t *tcache = *(tcache_t **)arg;
376234370Sjasone
377234370Sjasone	if (tcache == TCACHE_STATE_DISABLED) {
378234370Sjasone		/* Do nothing. */
379234370Sjasone	} else if (tcache == TCACHE_STATE_REINCARNATED) {
380234370Sjasone		/*
381234370Sjasone		 * Another destructor called an allocator function after this
382234370Sjasone		 * destructor was called.  Reset tcache to
383234370Sjasone		 * TCACHE_STATE_PURGATORY in order to receive another callback.
384234370Sjasone		 */
385234370Sjasone		tcache = TCACHE_STATE_PURGATORY;
386234370Sjasone		tcache_tsd_set(&tcache);
387234370Sjasone	} else if (tcache == TCACHE_STATE_PURGATORY) {
388234370Sjasone		/*
389234370Sjasone		 * The previous time this destructor was called, we set the key
390234370Sjasone		 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
391234370Sjasone		 * cause re-creation of the tcache.  This time, do nothing, so
392234370Sjasone		 * that the destructor will not be called again.
393234370Sjasone		 */
394234370Sjasone	} else if (tcache != NULL) {
395234370Sjasone		assert(tcache != TCACHE_STATE_PURGATORY);
396234370Sjasone		tcache_destroy(tcache);
397234370Sjasone		tcache = TCACHE_STATE_PURGATORY;
398234370Sjasone		tcache_tsd_set(&tcache);
399234370Sjasone	}
400234370Sjasone}
401234370Sjasone
402234370Sjasonevoid
403234370Sjasonetcache_stats_merge(tcache_t *tcache, arena_t *arena)
404234370Sjasone{
405234370Sjasone	unsigned i;
406234370Sjasone
407234370Sjasone	/* Merge and reset tcache stats. */
408234370Sjasone	for (i = 0; i < NBINS; i++) {
409234370Sjasone		arena_bin_t *bin = &arena->bins[i];
410234370Sjasone		tcache_bin_t *tbin = &tcache->tbins[i];
411234370Sjasone		malloc_mutex_lock(&bin->lock);
412234370Sjasone		bin->stats.nrequests += tbin->tstats.nrequests;
413234370Sjasone		malloc_mutex_unlock(&bin->lock);
414234370Sjasone		tbin->tstats.nrequests = 0;
415234370Sjasone	}
416234370Sjasone
417234370Sjasone	for (; i < nhbins; i++) {
418234370Sjasone		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
419234370Sjasone		tcache_bin_t *tbin = &tcache->tbins[i];
420234370Sjasone		arena->stats.nrequests_large += tbin->tstats.nrequests;
421234370Sjasone		lstats->nrequests += tbin->tstats.nrequests;
422234370Sjasone		tbin->tstats.nrequests = 0;
423234370Sjasone	}
424234370Sjasone}
425234370Sjasone
426234370Sjasonebool
427234370Sjasonetcache_boot0(void)
428234370Sjasone{
429234370Sjasone	unsigned i;
430234370Sjasone
431234370Sjasone	/*
432234370Sjasone	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
433234370Sjasone	 * known.
434234370Sjasone	 */
435234370Sjasone	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
436234370Sjasone		tcache_maxclass = SMALL_MAXCLASS;
437234370Sjasone	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
438234370Sjasone		tcache_maxclass = arena_maxclass;
439234370Sjasone	else
440234370Sjasone		tcache_maxclass = (1U << opt_lg_tcache_max);
441234370Sjasone
442234370Sjasone	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
443234370Sjasone
444234370Sjasone	/* Initialize tcache_bin_info. */
445234370Sjasone	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
446234370Sjasone	    sizeof(tcache_bin_info_t));
447234370Sjasone	if (tcache_bin_info == NULL)
448234370Sjasone		return (true);
449234370Sjasone	stack_nelms = 0;
450234370Sjasone	for (i = 0; i < NBINS; i++) {
451234370Sjasone		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
452234370Sjasone			tcache_bin_info[i].ncached_max =
453234370Sjasone			    (arena_bin_info[i].nregs << 1);
454234370Sjasone		} else {
455234370Sjasone			tcache_bin_info[i].ncached_max =
456234370Sjasone			    TCACHE_NSLOTS_SMALL_MAX;
457234370Sjasone		}
458234370Sjasone		stack_nelms += tcache_bin_info[i].ncached_max;
459234370Sjasone	}
460234370Sjasone	for (; i < nhbins; i++) {
461234370Sjasone		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
462234370Sjasone		stack_nelms += tcache_bin_info[i].ncached_max;
463234370Sjasone	}
464234370Sjasone
465234370Sjasone	return (false);
466234370Sjasone}
467234370Sjasone
468234370Sjasonebool
469234370Sjasonetcache_boot1(void)
470234370Sjasone{
471234370Sjasone
472234370Sjasone	if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
473234370Sjasone		return (true);
474234370Sjasone
475234370Sjasone	return (false);
476234370Sjasone}
477