1/*-
2 * Copyright (c) 2017 Oliver Pinter
3 * Copyright (c) 2000-2015 Mark R V Murray
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer
11 *    in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/fcntl.h>
37#include <sys/filio.h>
38#include <sys/kernel.h>
39#include <sys/kthread.h>
40#include <sys/lock.h>
41#include <sys/module.h>
42#include <sys/malloc.h>
43#include <sys/poll.h>
44#include <sys/proc.h>
45#include <sys/random.h>
46#include <sys/sbuf.h>
47#include <sys/selinfo.h>
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50#include <sys/uio.h>
51#include <sys/unistd.h>
52
53#include <crypto/rijndael/rijndael-api-fst.h>
54#include <crypto/sha2/sha256.h>
55
56#include <dev/random/hash.h>
57#include <dev/random/randomdev.h>
58#include <dev/random/random_harvestq.h>
59
60#define	RANDOM_UNIT	0
61
62/*
63 * In loadable random, the core randomdev.c / random(9) routines have static
64 * visibility and an alternative name to avoid conflicting with the function
65 * pointers of the real names in the core kernel.  random_alg_context_init
66 * installs pointers to the loadable static names into the core kernel's
67 * function pointers at SI_SUB_RANDOM:SI_ORDER_SECOND.
68 */
69#if defined(RANDOM_LOADABLE)
70static int (read_random_uio)(struct uio *, bool);
71static void (read_random)(void *, u_int);
72static bool (is_random_seeded)(void);
73#endif
74
75static d_read_t randomdev_read;
76static d_write_t randomdev_write;
77static d_poll_t randomdev_poll;
78static d_ioctl_t randomdev_ioctl;
79
80static struct cdevsw random_cdevsw = {
81	.d_name = "random",
82	.d_version = D_VERSION,
83	.d_read = randomdev_read,
84	.d_write = randomdev_write,
85	.d_poll = randomdev_poll,
86	.d_ioctl = randomdev_ioctl,
87};
88
89/* For use with make_dev(9)/destroy_dev(9). */
90static struct cdev *random_dev;
91
92#if defined(RANDOM_LOADABLE)
93static void
94random_alg_context_init(void *dummy __unused)
95{
96	_read_random_uio = (read_random_uio);
97	_read_random = (read_random);
98	_is_random_seeded = (is_random_seeded);
99}
100SYSINIT(random_device, SI_SUB_RANDOM, SI_ORDER_SECOND, random_alg_context_init,
101    NULL);
102#endif
103
104static struct selinfo rsel;
105
106/*
107 * This is the read uio(9) interface for random(4).
108 */
109/* ARGSUSED */
110static int
111randomdev_read(struct cdev *dev __unused, struct uio *uio, int flags)
112{
113
114	return ((read_random_uio)(uio, (flags & O_NONBLOCK) != 0));
115}
116
117/*
118 * If the random device is not seeded, blocks until it is seeded.
119 *
120 * Returns zero when the random device is seeded.
121 *
122 * If the 'interruptible' parameter is true, and the device is unseeded, this
123 * routine may be interrupted.  If interrupted, it will return either ERESTART
124 * or EINTR.
125 */
126#define SEEDWAIT_INTERRUPTIBLE		true
127#define SEEDWAIT_UNINTERRUPTIBLE	false
128static int
129randomdev_wait_until_seeded(bool interruptible)
130{
131	int error, spamcount, slpflags;
132
133	slpflags = interruptible ? PCATCH : 0;
134
135	error = 0;
136	spamcount = 0;
137	while (!p_random_alg_context->ra_seeded()) {
138		/* keep tapping away at the pre-read until we seed/unblock. */
139		p_random_alg_context->ra_pre_read();
140		/* Only bother the console every 10 seconds or so */
141		if (spamcount == 0)
142			printf("random: %s unblock wait\n", __func__);
143		spamcount = (spamcount + 1) % 100;
144		error = tsleep(p_random_alg_context, slpflags, "randseed",
145		    hz / 10);
146		if (error == ERESTART || error == EINTR) {
147			KASSERT(interruptible,
148			    ("unexpected wake of non-interruptible sleep"));
149			break;
150		}
151		/* Squash tsleep timeout condition */
152		if (error == EWOULDBLOCK)
153			error = 0;
154		KASSERT(error == 0, ("unexpected tsleep error %d", error));
155	}
156	return (error);
157}
158
159int
160(read_random_uio)(struct uio *uio, bool nonblock)
161{
162	/* 16 MiB takes about 0.08 s CPU time on my 2017 AMD Zen CPU */
163#define SIGCHK_PERIOD (16 * 1024 * 1024)
164	const size_t sigchk_period = SIGCHK_PERIOD;
165	CTASSERT(SIGCHK_PERIOD % PAGE_SIZE == 0);
166#undef SIGCHK_PERIOD
167
168	uint8_t *random_buf;
169	size_t total_read, read_len;
170	ssize_t bufsize;
171	int error;
172
173
174	KASSERT(uio->uio_rw == UIO_READ, ("%s: bogus write", __func__));
175	KASSERT(uio->uio_resid >= 0, ("%s: bogus negative resid", __func__));
176
177	p_random_alg_context->ra_pre_read();
178	error = 0;
179	/* (Un)Blocking logic */
180	if (!p_random_alg_context->ra_seeded()) {
181		if (nonblock)
182			error = EWOULDBLOCK;
183		else
184			error = randomdev_wait_until_seeded(
185			    SEEDWAIT_INTERRUPTIBLE);
186	}
187	if (error != 0)
188		return (error);
189
190	read_rate_increment(howmany(uio->uio_resid + 1, sizeof(uint32_t)));
191	total_read = 0;
192
193	/* Easy to deal with the trivial 0 byte case. */
194	if (__predict_false(uio->uio_resid == 0))
195		return (0);
196
197	/*
198	 * If memory is plentiful, use maximally sized requests to avoid
199	 * per-call algorithm overhead.  But fall back to a single page
200	 * allocation if the full request isn't immediately available.
201	 */
202	bufsize = MIN(sigchk_period, (size_t)uio->uio_resid);
203	random_buf = malloc(bufsize, M_ENTROPY, M_NOWAIT);
204	if (random_buf == NULL) {
205		bufsize = PAGE_SIZE;
206		random_buf = malloc(bufsize, M_ENTROPY, M_WAITOK);
207	}
208
209	error = 0;
210	while (uio->uio_resid > 0 && error == 0) {
211		read_len = MIN((size_t)uio->uio_resid, bufsize);
212
213		p_random_alg_context->ra_read(random_buf, read_len);
214
215		/*
216		 * uiomove() may yield the CPU before each 'read_len' bytes (up
217		 * to bufsize) are copied out.
218		 */
219		error = uiomove(random_buf, read_len, uio);
220		total_read += read_len;
221
222		/*
223		 * Poll for signals every few MBs to avoid very long
224		 * uninterruptible syscalls.
225		 */
226		if (error == 0 && uio->uio_resid != 0 &&
227		    total_read % sigchk_period == 0) {
228			error = tsleep_sbt(p_random_alg_context, PCATCH,
229			    "randrd", SBT_1NS, 0, C_HARDCLOCK);
230			/* Squash tsleep timeout condition */
231			if (error == EWOULDBLOCK)
232				error = 0;
233		}
234	}
235
236	/*
237	 * Short reads due to signal interrupt should not indicate error.
238	 * Instead, the uio will reflect that the read was shorter than
239	 * requested.
240	 */
241	if (error == ERESTART || error == EINTR)
242		error = 0;
243
244	zfree(random_buf, M_ENTROPY);
245	return (error);
246}
247
248/*-
249 * Kernel API version of read_random().  This is similar to read_random_uio(),
250 * except it doesn't interface with uio(9).  It cannot assumed that random_buf
251 * is a multiple of RANDOM_BLOCKSIZE bytes.
252 *
253 * If the tunable 'kern.random.initial_seeding.bypass_before_seeding' is set
254 * non-zero, silently fail to emit random data (matching the pre-r346250
255 * behavior).  If read_random is called prior to seeding and bypassed because
256 * of this tunable, the condition is reported in the read-only sysctl
257 * 'kern.random.initial_seeding.read_random_bypassed_before_seeding'.
258 */
259void
260(read_random)(void *random_buf, u_int len)
261{
262
263	KASSERT(random_buf != NULL, ("No suitable random buffer in %s", __func__));
264	p_random_alg_context->ra_pre_read();
265
266	if (len == 0)
267		return;
268
269	/* (Un)Blocking logic */
270	if (__predict_false(!p_random_alg_context->ra_seeded())) {
271		if (random_bypass_before_seeding) {
272			if (!read_random_bypassed_before_seeding) {
273				if (!random_bypass_disable_warnings)
274					printf("read_random: WARNING: bypassing"
275					    " request for random data because "
276					    "the random device is not yet "
277					    "seeded and the knob "
278					    "'bypass_before_seeding' was "
279					    "enabled.\n");
280				read_random_bypassed_before_seeding = true;
281			}
282			/* Avoid potentially leaking stack garbage */
283			memset(random_buf, 0, len);
284			return;
285		}
286
287		(void)randomdev_wait_until_seeded(SEEDWAIT_UNINTERRUPTIBLE);
288	}
289	read_rate_increment(roundup2(len, sizeof(uint32_t)));
290	p_random_alg_context->ra_read(random_buf, len);
291}
292
293bool
294(is_random_seeded)(void)
295{
296	return (p_random_alg_context->ra_seeded());
297}
298
299static __inline void
300randomdev_accumulate(uint8_t *buf, u_int count)
301{
302	static u_int destination = 0;
303	static struct harvest_event event;
304	static struct randomdev_hash hash;
305	static uint32_t entropy_data[RANDOM_KEYSIZE_WORDS];
306	uint32_t timestamp;
307	int i;
308
309	/* Extra timing here is helpful to scrape scheduler jitter entropy */
310	randomdev_hash_init(&hash);
311	timestamp = (uint32_t)get_cyclecount();
312	randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
313	randomdev_hash_iterate(&hash, buf, count);
314	timestamp = (uint32_t)get_cyclecount();
315	randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
316	randomdev_hash_finish(&hash, entropy_data);
317	for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) {
318		event.he_somecounter = (uint32_t)get_cyclecount();
319		event.he_size = sizeof(event.he_entropy);
320		event.he_source = RANDOM_CACHED;
321		event.he_destination = destination++; /* Harmless cheating */
322		memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy));
323		p_random_alg_context->ra_event_processor(&event);
324	}
325	explicit_bzero(&event, sizeof(event));
326	explicit_bzero(entropy_data, sizeof(entropy_data));
327}
328
329/* ARGSUSED */
330static int
331randomdev_write(struct cdev *dev __unused, struct uio *uio, int flags __unused)
332{
333	uint8_t *random_buf;
334	int c, error = 0;
335	ssize_t nbytes;
336
337	random_buf = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK);
338	nbytes = uio->uio_resid;
339	while (uio->uio_resid > 0 && error == 0) {
340		c = MIN(uio->uio_resid, PAGE_SIZE);
341		error = uiomove(random_buf, c, uio);
342		if (error)
343			break;
344		randomdev_accumulate(random_buf, c);
345		tsleep(p_random_alg_context, 0, "randwr", hz/10);
346	}
347	if (nbytes != uio->uio_resid && (error == ERESTART || error == EINTR))
348		/* Partial write, not error. */
349		error = 0;
350	free(random_buf, M_ENTROPY);
351	return (error);
352}
353
354/* ARGSUSED */
355static int
356randomdev_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
357{
358
359	if (events & (POLLIN | POLLRDNORM)) {
360		if (p_random_alg_context->ra_seeded())
361			events &= (POLLIN | POLLRDNORM);
362		else
363			selrecord(td, &rsel);
364	}
365	return (events);
366}
367
368/* This will be called by the entropy processor when it seeds itself and becomes secure */
369void
370randomdev_unblock(void)
371{
372
373	selwakeuppri(&rsel, PUSER);
374	wakeup(p_random_alg_context);
375	printf("random: unblocking device.\n");
376#ifndef RANDOM_FENESTRASX
377	/* Do random(9) a favour while we are about it. */
378	(void)atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_NONE, ARC4_ENTR_HAVE);
379#endif
380}
381
382/* ARGSUSED */
383static int
384randomdev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr __unused,
385    int flags __unused, struct thread *td __unused)
386{
387	int error = 0;
388
389	switch (cmd) {
390		/* Really handled in upper layer */
391	case FIOASYNC:
392	case FIONBIO:
393		break;
394	default:
395		error = ENOTTY;
396	}
397
398	return (error);
399}
400
401/* ARGSUSED */
402static int
403randomdev_modevent(module_t mod __unused, int type, void *data __unused)
404{
405	int error = 0;
406
407	switch (type) {
408	case MOD_LOAD:
409		printf("random: entropy device external interface\n");
410		random_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &random_cdevsw,
411		    RANDOM_UNIT, NULL, UID_ROOT, GID_WHEEL, 0644, "random");
412		make_dev_alias(random_dev, "urandom"); /* compatibility */
413		break;
414	case MOD_UNLOAD:
415		error = EBUSY;
416		break;
417	case MOD_SHUTDOWN:
418		break;
419	default:
420		error = EOPNOTSUPP;
421		break;
422	}
423	return (error);
424}
425
426static moduledata_t randomdev_mod = {
427	"random_device",
428	randomdev_modevent,
429	0
430};
431
432DECLARE_MODULE(random_device, randomdev_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
433MODULE_VERSION(random_device, 1);
434MODULE_DEPEND(random_device, crypto, 1, 1, 1);
435MODULE_DEPEND(random_device, random_harvestq, 1, 1, 1);
436