1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/param.h>
29#include <sys/domainset.h>
30#include <sys/fail.h>
31#include <sys/limits.h>
32#include <sys/lock.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/mutex.h>
36#include <sys/queue.h>
37#include <sys/random.h>
38#include <sys/sdt.h>
39#include <sys/sysctl.h>
40#include <sys/systm.h>
41#include <sys/taskqueue.h>
42
43#include <machine/atomic.h>
44#include <machine/smp.h>
45
46#include <dev/random/randomdev.h>
47#include <dev/random/random_harvestq.h>
48
49#include <dev/random/fenestrasX/fx_brng.h>
50#include <dev/random/fenestrasX/fx_hash.h>
51#include <dev/random/fenestrasX/fx_pool.h>
52#include <dev/random/fenestrasX/fx_priv.h>
53#include <dev/random/fenestrasX/fx_pub.h>
54
55/*
56 * Timer-based reseed interval growth factor and limit in seconds. (�� 3.2)
57 */
58#define	FXENT_RESSED_INTVL_GFACT	3
59#define	FXENT_RESEED_INTVL_MAX		3600
60
61/*
62 * Pool reseed schedule.  Initially, only pool 0 is active.  Until the timer
63 * interval reaches INTVL_MAX, only pool 0 is used.
64 *
65 * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
66 * (if active) every 3^k timer reseeds.  (�� 3.3)
67 *
68 * (Entropy harvesting only round robins across active pools.)
69 */
70#define	FXENT_RESEED_BASE		3
71
72/*
73 * Number of bytes from high quality sources to allocate to pool 0 before
74 * normal round-robin allocation after each timer reseed. (�� 3.4)
75 */
76#define	FXENT_HI_SRC_POOL0_BYTES	32
77
78/*
79 * �� 3.1
80 *
81 * Low sources provide unconditioned entropy, such as mouse movements; high
82 * sources are assumed to provide high-quality random bytes.  Pull sources are
83 * those which can be polled, i.e., anything randomdev calls a "random_source."
84 *
85 * In the whitepaper, low sources are pull.  For us, at least in the existing
86 * design, low-quality sources push into some global ring buffer and then get
87 * forwarded into the RNG by a thread that continually polls.  Presumably their
88 * design batches low entopy signals in some way (SHA512?) and only requests
89 * them dynamically on reseed.  I'm not sure what the benefit is vs feeding
90 * into the pools directly.
91 */
92enum fxrng_ent_access_cls {
93	FXRNG_PUSH,
94	FXRNG_PULL,
95};
96enum fxrng_ent_source_cls {
97	FXRNG_HI,
98	FXRNG_LO,
99	FXRNG_GARBAGE,
100};
101struct fxrng_ent_cls {
102	enum fxrng_ent_access_cls	entc_axx_cls;
103	enum fxrng_ent_source_cls	entc_src_cls;
104};
105
106static const struct fxrng_ent_cls fxrng_hi_pull = {
107	.entc_axx_cls = FXRNG_PULL,
108	.entc_src_cls = FXRNG_HI,
109};
110static const struct fxrng_ent_cls fxrng_hi_push = {
111	.entc_axx_cls = FXRNG_PUSH,
112	.entc_src_cls = FXRNG_HI,
113};
114static const struct fxrng_ent_cls fxrng_lo_push = {
115	.entc_axx_cls = FXRNG_PUSH,
116	.entc_src_cls = FXRNG_LO,
117};
118static const struct fxrng_ent_cls fxrng_garbage = {
119	.entc_axx_cls = FXRNG_PUSH,
120	.entc_src_cls = FXRNG_GARBAGE,
121};
122
123/*
124 * This table is a mapping of randomdev's current source abstractions to the
125 * designations above; at some point, if the design seems reasonable, it would
126 * make more sense to pull this up into the abstraction layer instead.
127 */
128static const struct fxrng_ent_char {
129	const struct fxrng_ent_cls	*entc_cls;
130} fxrng_ent_char[ENTROPYSOURCE] = {
131	[RANDOM_CACHED] = {
132		.entc_cls = &fxrng_hi_push,
133	},
134	[RANDOM_ATTACH] = {
135		.entc_cls = &fxrng_lo_push,
136	},
137	[RANDOM_KEYBOARD] = {
138		.entc_cls = &fxrng_lo_push,
139	},
140	[RANDOM_MOUSE] = {
141		.entc_cls = &fxrng_lo_push,
142	},
143	[RANDOM_NET_TUN] = {
144		.entc_cls = &fxrng_lo_push,
145	},
146	[RANDOM_NET_ETHER] = {
147		.entc_cls = &fxrng_lo_push,
148	},
149	[RANDOM_NET_NG] = {
150		.entc_cls = &fxrng_lo_push,
151	},
152	[RANDOM_INTERRUPT] = {
153		.entc_cls = &fxrng_lo_push,
154	},
155	[RANDOM_SWI] = {
156		.entc_cls = &fxrng_lo_push,
157	},
158	[RANDOM_FS_ATIME] = {
159		.entc_cls = &fxrng_lo_push,
160	},
161	[RANDOM_UMA] = {
162		.entc_cls = &fxrng_lo_push,
163	},
164	[RANDOM_CALLOUT] = {
165		.entc_cls = &fxrng_lo_push,
166	},
167	[RANDOM_PURE_OCTEON] = {
168		.entc_cls = &fxrng_hi_push,	/* Could be made pull. */
169	},
170	[RANDOM_PURE_SAFE] = {
171		.entc_cls = &fxrng_hi_push,
172	},
173	[RANDOM_PURE_GLXSB] = {
174		.entc_cls = &fxrng_hi_push,
175	},
176	[RANDOM_PURE_HIFN] = {
177		.entc_cls = &fxrng_hi_push,
178	},
179	[RANDOM_PURE_RDRAND] = {
180		.entc_cls = &fxrng_hi_pull,
181	},
182	[RANDOM_PURE_NEHEMIAH] = {
183		.entc_cls = &fxrng_hi_pull,
184	},
185	[RANDOM_PURE_RNDTEST] = {
186		.entc_cls = &fxrng_garbage,
187	},
188	[RANDOM_PURE_VIRTIO] = {
189		.entc_cls = &fxrng_hi_pull,
190	},
191	[RANDOM_PURE_BROADCOM] = {
192		.entc_cls = &fxrng_hi_push,
193	},
194	[RANDOM_PURE_CCP] = {
195		.entc_cls = &fxrng_hi_pull,
196	},
197	[RANDOM_PURE_DARN] = {
198		.entc_cls = &fxrng_hi_pull,
199	},
200	[RANDOM_PURE_TPM] = {
201		.entc_cls = &fxrng_hi_push,
202	},
203	[RANDOM_PURE_VMGENID] = {
204		.entc_cls = &fxrng_hi_push,
205	},
206};
207
208/* Useful for single-bit-per-source state. */
209BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
210
211/* XXX Borrowed from not-yet-committed D22702. */
212#ifndef BIT_TEST_SET_ATOMIC_ACQ
213#define	BIT_TEST_SET_ATOMIC_ACQ(_s, n, p)	\
214	(atomic_testandset_acq_long(		\
215	    &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
216#endif
217#define	FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
218	BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
219
220/* For special behavior on first-time entropy sources. (�� 3.1) */
221static struct fxrng_bits __read_mostly fxrng_seen;
222
223/* For special behavior for high-entropy sources after a reseed. (�� 3.4) */
224_Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
225static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
226
227/* Entropy pools.  Lock order is ENT -> RNG(root) -> RNG(leaf). */
228static struct mtx fxent_pool_lk;
229MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
230#define	FXENT_LOCK()		mtx_lock(&fxent_pool_lk)
231#define	FXENT_UNLOCK()		mtx_unlock(&fxent_pool_lk)
232#define	FXENT_ASSERT(rng)	mtx_assert(&fxent_pool_lk, MA_OWNED)
233#define	FXENT_ASSERT_NOT(rng)	mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
234static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
235static unsigned __read_mostly fxent_nactpools = 1;
236static struct timeout_task fxent_reseed_timer;
237static int __read_mostly fxent_timer_ready;
238
239/*
240 * Track number of bytes of entropy harvested from high-quality sources prior
241 * to initial keying.  The idea is to collect more jitter entropy when fewer
242 * high-quality bytes were available and less if we had other good sources.  We
243 * want to provide always-on availability but don't necessarily have *any*
244 * great sources on some platforms.
245 *
246 * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
247 * make more sense to pull this up into the abstraction layer instead.
248 *
249 * Jitter entropy is unimplemented for now.
250 */
251static unsigned long fxrng_preseed_ent;
252
253void
254fxrng_pools_init(void)
255{
256	size_t i;
257
258	for (i = 0; i < nitems(fxent_pool); i++)
259		fxrng_hash_init(&fxent_pool[i]);
260}
261
262static inline bool
263fxrng_hi_source(enum random_entropy_source src)
264{
265	return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
266}
267
268/*
269 * A racy check that this high-entropy source's event should contribute to
270 * pool0 on the basis of per-source byte count.  The check is racy for two
271 * reasons:
272 *   - Performance: The vast majority of the time, we've already taken 32 bytes
273 *     from any present high quality source and the racy check lets us avoid
274 *     dirtying the cache for the global array.
275 *   - Correctness: It's fine that the check is racy.  The failure modes are:
276 *     ��� False positive: We will detect when we take the lock.
277 *     ��� False negative: We still collect the entropy; it just won't be
278 *       preferentially placed in pool0 in this case.
279 */
280static inline bool
281fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
282{
283	return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
284	    FXENT_HI_SRC_POOL0_BYTES);
285}
286
287/*
288 * Top level entropy processing API from randomdev.
289 *
290 * Invoked by the core randomdev subsystem both for preload entropy, "push"
291 * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
292 */
293void
294fxrng_event_processor(struct harvest_event *event)
295{
296	enum random_entropy_source src;
297	unsigned pool;
298	bool first_time, first_32;
299
300	src = event->he_source;
301
302	ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
303	    "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
304	    (unsigned)event->he_size, sizeof(event->he_entropy));
305
306	/*
307	 * Zero bytes of source entropy doesn't count as observing this source
308	 * for the first time.  We still harvest the counter entropy.
309	 */
310	first_time = event->he_size > 0 &&
311	    !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
312	if (__predict_false(first_time)) {
313		/*
314		 * "The first time [any source] provides entropy, it is used to
315		 * directly reseed the root PRNG.  The entropy pools are
316		 * bypassed." (�� 3.1)
317		 *
318		 * Unlike Windows, we cannot rely on loader(8) seed material
319		 * being present, so we perform initial keying in the kernel.
320		 * We use brng_generation 0 to represent an unkeyed state.
321		 *
322		 * Prior to initial keying, it doesn't make sense to try to mix
323		 * the entropy directly with the root PRNG state, as the root
324		 * PRNG is unkeyed.  Instead, we collect pre-keying dynamic
325		 * entropy in pool0 and do not bump the root PRNG seed version
326		 * or set its key.  Initial keying will incorporate pool0 and
327		 * bump the brng_generation (seed version).
328		 *
329		 * After initial keying, we do directly mix in first-time
330		 * entropy sources.  We use the root BRNG to generate 32 bytes
331		 * and use fxrng_hash to mix it with the new entropy source and
332		 * re-key with the first 256 bits of hash output.
333		 */
334		FXENT_LOCK();
335		FXRNG_BRNG_LOCK(&fxrng_root);
336		if (__predict_true(fxrng_root.brng_generation > 0)) {
337			/* Bypass the pools: */
338			FXENT_UNLOCK();
339			fxrng_brng_src_reseed(event);
340			FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
341			return;
342		}
343
344		/*
345		 * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
346		 * lock, so we only need to hold on to the pool lock to prevent
347		 * initial keying without this entropy.
348		 */
349		FXRNG_BRNG_UNLOCK(&fxrng_root);
350
351		/* Root PRNG hasn't been keyed yet, just accumulate event. */
352		fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
353		    sizeof(event->he_somecounter));
354		fxrng_hash_update(&fxent_pool[0], event->he_entropy,
355		    event->he_size);
356
357		if (fxrng_hi_source(src)) {
358			/* Prevent overflow. */
359			if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
360				fxrng_preseed_ent += event->he_size;
361		}
362		FXENT_UNLOCK();
363		return;
364	}
365	/* !first_time */
366
367	/*
368	 * "The first 32 bytes produced by a high entropy source after a reseed
369	 * from the pools is always put in pool 0." (�� 3.4)
370	 *
371	 * The first-32-byte tracking data in fxrng_reseed_seen is reset in
372	 * fxent_timer_reseed_npools() below.
373	 */
374	first_32 = event->he_size > 0 &&
375	    fxrng_hi_source(src) &&
376	    atomic_load_acq_int(&fxent_nactpools) > 1 &&
377	    fxrng_hi_pool0_eligible_racy(src);
378	if (__predict_false(first_32)) {
379		unsigned rem, seen;
380
381		FXENT_LOCK();
382		seen = fxrng_reseed_seen[src];
383		if (seen == FXENT_HI_SRC_POOL0_BYTES)
384			goto round_robin;
385
386		rem = FXENT_HI_SRC_POOL0_BYTES - seen;
387		rem = MIN(rem, event->he_size);
388
389		fxrng_reseed_seen[src] = seen + rem;
390
391		/*
392		 * We put 'rem' bytes in pool0, and any remaining bytes are
393		 * round-robin'd across other pools.
394		 */
395		fxrng_hash_update(&fxent_pool[0],
396		    ((uint8_t *)event->he_entropy) + event->he_size - rem,
397		    rem);
398		if (rem == event->he_size) {
399			fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
400			    sizeof(event->he_somecounter));
401			FXENT_UNLOCK();
402			return;
403		}
404
405		/*
406		 * If fewer bytes were needed than this even provied, We only
407		 * take the last rem bytes of the entropy buffer and leave the
408		 * timecounter to be round-robin'd with the remaining entropy.
409		 */
410		event->he_size -= rem;
411		goto round_robin;
412	}
413	/* !first_32 */
414
415	FXENT_LOCK();
416
417round_robin:
418	FXENT_ASSERT();
419	pool = event->he_destination % fxent_nactpools;
420	fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
421	    event->he_size);
422	fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
423	    sizeof(event->he_somecounter));
424
425	if (__predict_false(fxrng_hi_source(src) &&
426	    atomic_load_acq_64(&fxrng_root_generation) == 0)) {
427		/* Prevent overflow. */
428		if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
429			fxrng_preseed_ent += event->he_size;
430	}
431	FXENT_UNLOCK();
432}
433
434/*
435 * Top level "seeded" API/signal from randomdev.
436 *
437 * This is our warning that a request is coming: we need to be seeded.  In
438 * fenestrasX, a request for random bytes _never_ fails.  "We (ed: ditto) have
439 * observed that there are many callers that never check for the error code,
440 * even if they are generating cryptographic key material." (�� 1.6)
441 *
442 * If we returned 'false', both read_random(9) and chacha20_randomstir()
443 * (arc4random(9)) will blindly charge on with something almost certainly worse
444 * than what we've got, or are able to get quickly enough.
445 */
446bool
447fxrng_alg_seeded(void)
448{
449	uint8_t hash[FXRNG_HASH_SZ];
450	sbintime_t sbt;
451
452	/* The vast majority of the time, we expect to already be seeded. */
453	if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
454		return (true);
455
456	/*
457	 * Take the lock and recheck; only one thread needs to do the initial
458	 * seeding work.
459	 */
460	FXENT_LOCK();
461	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
462		FXENT_UNLOCK();
463		return (true);
464	}
465	/* XXX Any one-off initial seeding goes here. */
466
467	fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
468	fxrng_hash_init(&fxent_pool[0]);
469
470	fxrng_brng_reseed(hash, sizeof(hash));
471	FXENT_UNLOCK();
472
473	randomdev_unblock();
474	explicit_bzero(hash, sizeof(hash));
475
476	/*
477	 * This may be called too early for taskqueue_thread to be initialized.
478	 * fxent_pool_timer_init will detect if we've already unblocked and
479	 * queue the first timer reseed at that point.
480	 */
481	if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
482		sbt = SBT_1S;
483		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
484		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
485	}
486	return (true);
487}
488
489/*
490 * Timer-based reseeds and pool expansion.
491 */
492static void
493fxent_timer_reseed_npools(unsigned n)
494{
495	/*
496	 * 64 * 8 => moderately large 512 bytes.  Could be static, as we are
497	 * only used in a static context.  On the other hand, this is in
498	 * threadqueue TASK context and we're likely nearly at top of stack
499	 * already.
500	 */
501	uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
502	unsigned i;
503
504	ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
505
506	FXENT_ASSERT();
507	/*
508	 * Collect entropy from pools 0..n-1 by concatenating the output hashes
509	 * and then feeding them into fxrng_brng_reseed, which will hash the
510	 * aggregate together with the current root PRNG keystate to produce a
511	 * new key.  It will also bump the global generation counter
512	 * appropriately.
513	 */
514	for (i = 0; i < n; i++) {
515		fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
516		    FXRNG_HASH_SZ);
517		fxrng_hash_init(&fxent_pool[i]);
518	}
519
520	fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
521	explicit_bzero(hash, n * FXRNG_HASH_SZ);
522
523	/*
524	 * "The first 32 bytes produced by a high entropy source after a reseed
525	 * from the pools is always put in pool 0." (�� 3.4)
526	 *
527	 * So here we reset the tracking (somewhat naively given the majority
528	 * of sources on most machines are not what we consider "high", but at
529	 * 32 bytes it's smaller than a cache line), so the next 32 bytes are
530	 * prioritized into pool0.
531	 *
532	 * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
533	 */
534	memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
535	FXENT_ASSERT();
536}
537
538static void
539fxent_timer_reseed(void *ctx __unused, int pending __unused)
540{
541	static unsigned reseed_intvl_sec = 1;
542	/* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
543	static uint64_t reseed_number = 1;
544
545	unsigned next_ival, i, k;
546	sbintime_t sbt;
547
548	if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
549		next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
550		if (next_ival > FXENT_RESEED_INTVL_MAX)
551			next_ival = FXENT_RESEED_INTVL_MAX;
552		FXENT_LOCK();
553		fxent_timer_reseed_npools(1);
554		FXENT_UNLOCK();
555	} else {
556		/*
557		 * The creation of entropy pools beyond 0 is enabled when the
558		 * reseed interval hits the maximum. (�� 3.3)
559		 */
560		next_ival = reseed_intvl_sec;
561
562		/*
563		 * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
564		 * general, pool n..0 every 3^n reseeds.
565		 */
566		k = reseed_number;
567		reseed_number++;
568
569		/* Count how many pools, from [0, i), to use for reseed. */
570		for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
571			if ((k % FXENT_RESEED_BASE) != 0)
572				break;
573			k /= FXENT_RESEED_BASE;
574		}
575
576		/*
577		 * If we haven't activated pool i yet, activate it and only
578		 * reseed from [0, i-1).  (�� 3.3)
579		 */
580		FXENT_LOCK();
581		if (i == fxent_nactpools + 1) {
582			fxent_timer_reseed_npools(fxent_nactpools);
583			fxent_nactpools++;
584		} else {
585			/* Just reseed from [0, i). */
586			fxent_timer_reseed_npools(i);
587		}
588		FXENT_UNLOCK();
589	}
590
591	/* Schedule the next reseed. */
592	sbt = next_ival * SBT_1S;
593	taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
594	    -sbt, (sbt / 3), C_PREL(2));
595
596	reseed_intvl_sec = next_ival;
597}
598
599static void
600fxent_pool_timer_init(void *dummy __unused)
601{
602	sbintime_t sbt;
603
604	TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
605	    fxent_timer_reseed, NULL);
606
607	if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
608		sbt = SBT_1S;
609		taskqueue_enqueue_timeout_sbt(taskqueue_thread,
610		    &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
611	}
612	atomic_store_rel_int(&fxent_timer_ready, 1);
613}
614/* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
615SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
616    fxent_pool_timer_init, NULL);
617