1/* 2 * random.c -- A strong random number generator 3 * 4 * Version 1.89, last modified 19-Sep-99 5 * 6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All 7 * rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, and the entire permission notice in its entirety, 14 * including the disclaimer of warranties. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote 19 * products derived from this software without specific prior 20 * written permission. 21 * 22 * ALTERNATIVELY, this product may be distributed under the terms of 23 * the GNU General Public License, in which case the provisions of the GPL are 24 * required INSTEAD OF the above restrictions. (This clause is 25 * necessary due to a potential bad interaction between the GPL and 26 * the restrictions contained in a BSD-style copyright.) 27 * 28 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF 31 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE 32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 34 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 36 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 38 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 */ 41 42/* 43 * (now, with legal B.S. out of the way.....) 44 * 45 * This routine gathers environmental noise from device drivers, etc., 46 * and returns good random numbers, suitable for cryptographic use. 47 * Besides the obvious cryptographic uses, these numbers are also good 48 * for seeding TCP sequence numbers, and other places where it is 49 * desirable to have numbers which are not only random, but hard to 50 * predict by an attacker. 51 * 52 * Theory of operation 53 * =================== 54 * 55 * Computers are very predictable devices. Hence it is extremely hard 56 * to produce truly random numbers on a computer --- as opposed to 57 * pseudo-random numbers, which can easily generated by using a 58 * algorithm. Unfortunately, it is very easy for attackers to guess 59 * the sequence of pseudo-random number generators, and for some 60 * applications this is not acceptable. So instead, we must try to 61 * gather "environmental noise" from the computer's environment, which 62 * must be hard for outside attackers to observe, and use that to 63 * generate random numbers. In a Unix environment, this is best done 64 * from inside the kernel. 65 * 66 * Sources of randomness from the environment include inter-keyboard 67 * timings, inter-interrupt timings from some interrupts, and other 68 * events which are both (a) non-deterministic and (b) hard for an 69 * outside observer to measure. Randomness from these sources are 70 * added to an "entropy pool", which is mixed using a CRC-like function. 71 * This is not cryptographically strong, but it is adequate assuming 72 * the randomness is not chosen maliciously, and it is fast enough that 73 * the overhead of doing it on every interrupt is very reasonable. 74 * As random bytes are mixed into the entropy pool, the routines keep 75 * an *estimate* of how many bits of randomness have been stored into 76 * the random number generator's internal state. 77 * 78 * When random bytes are desired, they are obtained by taking the SHA 79 * hash of the contents of the "entropy pool". The SHA hash avoids 80 * exposing the internal state of the entropy pool. It is believed to 81 * be computationally infeasible to derive any useful information 82 * about the input of SHA from its output. Even if it is possible to 83 * analyze SHA in some clever way, as long as the amount of data 84 * returned from the generator is less than the inherent entropy in 85 * the pool, the output data is totally unpredictable. For this 86 * reason, the routine decreases its internal estimate of how many 87 * bits of "true randomness" are contained in the entropy pool as it 88 * outputs random numbers. 89 * 90 * If this estimate goes to zero, the routine can still generate 91 * random numbers; however, an attacker may (at least in theory) be 92 * able to infer the future output of the generator from prior 93 * outputs. This requires successful cryptanalysis of SHA, which is 94 * not believed to be feasible, but there is a remote possibility. 95 * Nonetheless, these numbers should be useful for the vast majority 96 * of purposes. 97 * 98 * Exported interfaces ---- output 99 * =============================== 100 * 101 * There are three exported interfaces; the first is one designed to 102 * be used from within the kernel: 103 * 104 * void get_random_bytes(void *buf, int nbytes); 105 * 106 * This interface will return the requested number of random bytes, 107 * and place it in the requested buffer. 108 * 109 * The two other interfaces are two character devices /dev/random and 110 * /dev/urandom. /dev/random is suitable for use when very high 111 * quality randomness is desired (for example, for key generation or 112 * one-time pads), as it will only return a maximum of the number of 113 * bits of randomness (as estimated by the random number generator) 114 * contained in the entropy pool. 115 * 116 * The /dev/urandom device does not have this limit, and will return 117 * as many bytes as are requested. As more and more random bytes are 118 * requested without giving time for the entropy pool to recharge, 119 * this will result in random numbers that are merely cryptographically 120 * strong. For many applications, however, this is acceptable. 121 * 122 * Exported interfaces ---- input 123 * ============================== 124 * 125 * The current exported interfaces for gathering environmental noise 126 * from the devices are: 127 * 128 * void add_keyboard_randomness(unsigned char scancode); 129 * void add_mouse_randomness(__u32 mouse_data); 130 * void add_interrupt_randomness(int irq); 131 * void add_blkdev_randomness(int irq); 132 * 133 * add_keyboard_randomness() uses the inter-keypress timing, as well as the 134 * scancode as random inputs into the "entropy pool". 135 * 136 * add_mouse_randomness() uses the mouse interrupt timing, as well as 137 * the reported position of the mouse from the hardware. 138 * 139 * add_interrupt_randomness() uses the inter-interrupt timing as random 140 * inputs to the entropy pool. Note that not all interrupts are good 141 * sources of randomness! For example, the timer interrupts is not a 142 * good choice, because the periodicity of the interrupts is too 143 * regular, and hence predictable to an attacker. Disk interrupts are 144 * a better measure, since the timing of the disk interrupts are more 145 * unpredictable. 146 * 147 * add_blkdev_randomness() times the finishing time of block requests. 148 * 149 * All of these routines try to estimate how many bits of randomness a 150 * particular randomness source. They do this by keeping track of the 151 * first and second order deltas of the event timings. 152 * 153 * Ensuring unpredictability at system startup 154 * ============================================ 155 * 156 * When any operating system starts up, it will go through a sequence 157 * of actions that are fairly predictable by an adversary, especially 158 * if the start-up does not involve interaction with a human operator. 159 * This reduces the actual number of bits of unpredictability in the 160 * entropy pool below the value in entropy_count. In order to 161 * counteract this effect, it helps to carry information in the 162 * entropy pool across shut-downs and start-ups. To do this, put the 163 * following lines an appropriate script which is run during the boot 164 * sequence: 165 * 166 * echo "Initializing random number generator..." 167 * random_seed=/var/run/random-seed 168 * # Carry a random seed from start-up to start-up 169 * # Load and then save the whole entropy pool 170 * if [ -f $random_seed ]; then 171 * cat $random_seed >/dev/urandom 172 * else 173 * touch $random_seed 174 * fi 175 * chmod 600 $random_seed 176 * poolfile=/proc/sys/kernel/random/poolsize 177 * [ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512 178 * dd if=/dev/urandom of=$random_seed count=1 bs=bytes 179 * 180 * and the following lines in an appropriate script which is run as 181 * the system is shutdown: 182 * 183 * # Carry a random seed from shut-down to start-up 184 * # Save the whole entropy pool 185 * echo "Saving random seed..." 186 * random_seed=/var/run/random-seed 187 * touch $random_seed 188 * chmod 600 $random_seed 189 * poolfile=/proc/sys/kernel/random/poolsize 190 * [ -r $poolfile ] && bytes=`cat $poolfile` || bytes=512 191 * dd if=/dev/urandom of=$random_seed count=1 bs=bytes 192 * 193 * For example, on most modern systems using the System V init 194 * scripts, such code fragments would be found in 195 * /etc/rc.d/init.d/random. On older Linux systems, the correct script 196 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. 197 * 198 * Effectively, these commands cause the contents of the entropy pool 199 * to be saved at shut-down time and reloaded into the entropy pool at 200 * start-up. (The 'dd' in the addition to the bootup script is to 201 * make sure that /etc/random-seed is different for every start-up, 202 * even if the system crashes without executing rc.0.) Even with 203 * complete knowledge of the start-up activities, predicting the state 204 * of the entropy pool requires knowledge of the previous history of 205 * the system. 206 * 207 * Configuring the /dev/random driver under Linux 208 * ============================================== 209 * 210 * The /dev/random driver under Linux uses minor numbers 8 and 9 of 211 * the /dev/mem major number (#1). So if your system does not have 212 * /dev/random and /dev/urandom created already, they can be created 213 * by using the commands: 214 * 215 * mknod /dev/random c 1 8 216 * mknod /dev/urandom c 1 9 217 * 218 * Acknowledgements: 219 * ================= 220 * 221 * Ideas for constructing this random number generator were derived 222 * from Pretty Good Privacy's random number generator, and from private 223 * discussions with Phil Karn. Colin Plumb provided a faster random 224 * number generator, which speed up the mixing function of the entropy 225 * pool, taken from PGPfone. Dale Worley has also contributed many 226 * useful ideas and suggestions to improve this driver. 227 * 228 * Any flaws in the design are solely my responsibility, and should 229 * not be attributed to the Phil, Colin, or any of authors of PGP. 230 * 231 * The code for SHA transform was taken from Peter Gutmann's 232 * implementation, which has been placed in the public domain. 233 * The code for MD5 transform was taken from Colin Plumb's 234 * implementation, which has been placed in the public domain. 235 * The MD5 cryptographic checksum was devised by Ronald Rivest, and is 236 * documented in RFC 1321, "The MD5 Message Digest Algorithm". 237 * 238 * Further background information on this topic may be obtained from 239 * RFC 1750, "Randomness Recommendations for Security", by Donald 240 * Eastlake, Steve Crocker, and Jeff Schiller. 241 */ 242 243#include <linux/utsname.h> 244#include <linux/config.h> 245#include <linux/module.h> 246#include <linux/kernel.h> 247#include <linux/major.h> 248#include <linux/string.h> 249#include <linux/fcntl.h> 250#include <linux/slab.h> 251#include <linux/random.h> 252#include <linux/poll.h> 253#include <linux/init.h> 254 255#include <asm/processor.h> 256#include <asm/uaccess.h> 257#include <asm/irq.h> 258#include <asm/io.h> 259 260/* 261 * Configuration information 262 */ 263#define DEFAULT_POOL_SIZE 512 264#define SECONDARY_POOL_SIZE 128 265#define BATCH_ENTROPY_SIZE 256 266#define USE_SHA 267 268/* 269 * The minimum number of bits of entropy before we wake up a read on 270 * /dev/random. Should always be at least 8, or at least 1 byte. 271 */ 272static int random_read_wakeup_thresh = 8; 273 274/* 275 * If the entropy count falls under this number of bits, then we 276 * should wake up processes which are selecting or polling on write 277 * access to /dev/random. 278 */ 279static int random_write_wakeup_thresh = 128; 280 281/* 282 * A pool of size .poolwords is stirred with a primitive polynomial 283 * of degree .poolwords over GF(2). The taps for various sizes are 284 * defined below. They are chosen to be evenly spaced (minimum RMS 285 * distance from evenly spaced; the numbers in the comments are a 286 * scaled squared error sum) except for the last tap, which is 1 to 287 * get the twisting happening as fast as possible. 288 */ 289static struct poolinfo { 290 int poolwords; 291 int tap1, tap2, tap3, tap4, tap5; 292} poolinfo_table[] = { 293 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ 294 { 2048, 1638, 1231, 819, 411, 1 }, 295 296 /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */ 297 { 1024, 817, 615, 412, 204, 1 }, 298 299 /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */ 300 { 512, 411, 308, 208, 104, 1 }, 301 302 /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */ 303 { 256, 205, 155, 101, 52, 1 }, 304 305 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ 306 { 128, 103, 76, 51, 25, 1 }, 307 308 /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */ 309 { 64, 52, 39, 26, 14, 1 }, 310 311 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ 312 { 32, 26, 20, 14, 7, 1 }, 313 314 { 0, 0, 0, 0, 0, 0 }, 315}; 316 317#define POOLBITS poolwords*32 318#define POOLBYTES poolwords*4 319 320/* 321 * For the purposes of better mixing, we use the CRC-32 polynomial as 322 * well to make a twisted Generalized Feedback Shift Reigster 323 * 324 * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM 325 * Transactions on Modeling and Computer Simulation 2(3):179-194. 326 * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators 327 * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266) 328 * 329 * Thanks to Colin Plumb for suggesting this. 330 * 331 * We have not analyzed the resultant polynomial to prove it primitive; 332 * in fact it almost certainly isn't. Nonetheless, the irreducible factors 333 * of a random large-degree polynomial over GF(2) are more than large enough 334 * that periodicity is not a concern. 335 * 336 * The input hash is much less sensitive than the output hash. All 337 * that we want of it is that it be a good non-cryptographic hash; 338 * i.e. it not produce collisions when fed "random" data of the sort 339 * we expect to see. As long as the pool state differs for different 340 * inputs, we have preserved the input entropy and done a good job. 341 * The fact that an intelligent attacker can construct inputs that 342 * will produce controlled alterations to the pool's state is not 343 * important because we don't consider such inputs to contribute any 344 * randomness. The only property we need with respect to them is that 345 * the attacker can't increase his/her knowledge of the pool's state. 346 * Since all additions are reversible (knowing the final state and the 347 * input, you can reconstruct the initial state), if an attacker has 348 * any uncertainty about the initial state, he/she can only shuffle 349 * that uncertainty about, but never cause any collisions (which would 350 * decrease the uncertainty). 351 * 352 * The chosen system lets the state of the pool be (essentially) the input 353 * modulo the generator polymnomial. Now, for random primitive polynomials, 354 * this is a universal class of hash functions, meaning that the chance 355 * of a collision is limited by the attacker's knowledge of the generator 356 * polynomail, so if it is chosen at random, an attacker can never force 357 * a collision. Here, we use a fixed polynomial, but we *can* assume that 358 * ###--> it is unknown to the processes generating the input entropy. <-### 359 * Because of this important property, this is a good, collision-resistant 360 * hash; hash collisions will occur no more often than chance. 361 */ 362 363/* 364 * Linux 2.2 compatibility 365 */ 366#ifndef DECLARE_WAITQUEUE 367#define DECLARE_WAITQUEUE(WAIT, PTR) struct wait_queue WAIT = { PTR, NULL } 368#endif 369#ifndef DECLARE_WAIT_QUEUE_HEAD 370#define DECLARE_WAIT_QUEUE_HEAD(WAIT) struct wait_queue *WAIT 371#endif 372 373/* 374 * Static global variables 375 */ 376static struct entropy_store *random_state; /* The default global store */ 377static struct entropy_store *sec_random_state; /* secondary store */ 378static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); 379static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 380 381/* 382 * Forward procedure declarations 383 */ 384#ifdef CONFIG_SYSCTL 385static void sysctl_init_random(struct entropy_store *random_state); 386#endif 387 388/***************************************************************** 389 * 390 * Utility functions, with some ASM defined functions for speed 391 * purposes 392 * 393 *****************************************************************/ 394 395/* 396 * Unfortunately, while the GCC optimizer for the i386 understands how 397 * to optimize a static rotate left of x bits, it doesn't know how to 398 * deal with a variable rotate of x bits. So we use a bit of asm magic. 399 */ 400#if !defined(__i386__) 401static inline __u32 rotate_left(int i, __u32 word) 402{ 403 return (word << i) | (word >> (32 - i)); 404 405} 406#else 407static inline __u32 rotate_left(int i, __u32 word) 408{ 409 __asm__("roll %%cl,%0" 410 :"=r" (word) 411 :"0" (word),"c" (i)); 412 return word; 413} 414#endif 415 416/* 417 * More asm magic.... 418 * 419 * For entropy estimation, we need to do an integral base 2 420 * logarithm. 421 * 422 * Note the "12bits" suffix - this is used for numbers between 423 * 0 and 4095 only. This allows a few shortcuts. 424 */ 425static inline __u32 int_ln_12bits(__u32 word) 426{ 427 /* Smear msbit right to make an n-bit mask */ 428 word |= word >> 8; 429 word |= word >> 4; 430 word |= word >> 2; 431 word |= word >> 1; 432 /* Remove one bit to make this a logarithm */ 433 word >>= 1; 434 /* Count the bits set in the word */ 435 word -= (word >> 1) & 0x555; 436 word = (word & 0x333) + ((word >> 2) & 0x333); 437 word += (word >> 4); 438 word += (word >> 8); 439 return word & 15; 440} 441 442#define DEBUG_ENT(fmt, arg...) do {} while (0) 443 444/********************************************************************** 445 * 446 * OS independent entropy store. Here are the functions which handle 447 * storing entropy in an entropy pool. 448 * 449 **********************************************************************/ 450 451struct entropy_store { 452 unsigned add_ptr; 453 int entropy_count; 454 int input_rotate; 455 int extract_count; 456 struct poolinfo poolinfo; 457 __u32 *pool; 458}; 459 460/* 461 * Initialize the entropy store. The input argument is the size of 462 * the random pool. 463 * 464 * Returns an negative error if there is a problem. 465 */ 466static int create_entropy_store(int size, struct entropy_store **ret_bucket) 467{ 468 struct entropy_store *r; 469 struct poolinfo *p; 470 int poolwords; 471 472 poolwords = (size + 3) / 4; /* Convert bytes->words */ 473 /* The pool size must be a multiple of 16 32-bit words */ 474 poolwords = ((poolwords + 15) / 16) * 16; 475 476 for (p = poolinfo_table; p->poolwords; p++) { 477 if (poolwords == p->poolwords) 478 break; 479 } 480 if (p->poolwords == 0) 481 return -EINVAL; 482 483 r = kmalloc(sizeof(struct entropy_store), GFP_KERNEL); 484 if (!r) 485 return -ENOMEM; 486 487 memset (r, 0, sizeof(struct entropy_store)); 488 r->poolinfo = *p; 489 490 r->pool = kmalloc(POOLBYTES, GFP_KERNEL); 491 if (!r->pool) { 492 kfree(r); 493 return -ENOMEM; 494 } 495 memset(r->pool, 0, POOLBYTES); 496 *ret_bucket = r; 497 return 0; 498} 499 500/* Clear the entropy pool and associated counters. */ 501static void clear_entropy_store(struct entropy_store *r) 502{ 503 r->add_ptr = 0; 504 r->entropy_count = 0; 505 r->input_rotate = 0; 506 r->extract_count = 0; 507 memset(r->pool, 0, r->poolinfo.POOLBYTES); 508} 509 510static void free_entropy_store(struct entropy_store *r) 511{ 512 if (r->pool) 513 kfree(r->pool); 514 kfree(r); 515} 516 517/* 518 * This function adds a byte into the entropy "pool". It does not 519 * update the entropy estimate. The caller should call 520 * credit_entropy_store if this is appropriate. 521 * 522 * The pool is stirred with a primitive polynomial of the appropriate 523 * degree, and then twisted. We twist by three bits at a time because 524 * it's cheap to do so and helps slightly in the expected case where 525 * the entropy is concentrated in the low-order bits. 526 */ 527static void add_entropy_words(struct entropy_store *r, const __u32 *in, 528 int nwords) 529{ 530 static __u32 const twist_table[8] = { 531 0, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 532 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; 533 unsigned i; 534 int new_rotate; 535 int wordmask = r->poolinfo.poolwords - 1; 536 __u32 w; 537 538 while (nwords--) { 539 w = rotate_left(r->input_rotate, *in++); 540 i = r->add_ptr = (r->add_ptr - 1) & wordmask; 541 /* 542 * Normally, we add 7 bits of rotation to the pool. 543 * At the beginning of the pool, add an extra 7 bits 544 * rotation, so that successive passes spread the 545 * input bits across the pool evenly. 546 */ 547 new_rotate = r->input_rotate + 14; 548 if (i) 549 new_rotate = r->input_rotate + 7; 550 r->input_rotate = new_rotate & 31; 551 552 /* XOR in the various taps */ 553 w ^= r->pool[(i + r->poolinfo.tap1) & wordmask]; 554 w ^= r->pool[(i + r->poolinfo.tap2) & wordmask]; 555 w ^= r->pool[(i + r->poolinfo.tap3) & wordmask]; 556 w ^= r->pool[(i + r->poolinfo.tap4) & wordmask]; 557 w ^= r->pool[(i + r->poolinfo.tap5) & wordmask]; 558 w ^= r->pool[i]; 559 r->pool[i] = (w >> 3) ^ twist_table[w & 7]; 560 } 561} 562 563/* 564 * Credit (or debit) the entropy store with n bits of entropy 565 */ 566static void credit_entropy_store(struct entropy_store *r, int nbits) 567{ 568 if (r->entropy_count + nbits < 0) { 569 DEBUG_ENT("negative entropy/overflow (%d+%d)\n", 570 r->entropy_count, nbits); 571 r->entropy_count = 0; 572 } else if (r->entropy_count + nbits > r->poolinfo.POOLBITS) { 573 r->entropy_count = r->poolinfo.POOLBITS; 574 } else { 575 r->entropy_count += nbits; 576 if (nbits) 577 DEBUG_ENT("%s added %d bits, now %d\n", 578 r == sec_random_state ? "secondary" : 579 r == random_state ? "primary" : "unknown", 580 nbits, r->entropy_count); 581 } 582} 583 584/********************************************************************** 585 * 586 * Entropy batch input management 587 * 588 * We batch entropy to be added to avoid increasing interrupt latency 589 * 590 **********************************************************************/ 591 592static __u32 *batch_entropy_pool; 593static int *batch_entropy_credit; 594static int batch_max; 595static int batch_head, batch_tail; 596static struct tq_struct batch_tqueue; 597static void batch_entropy_process(void *private_); 598 599/* note: the size must be a power of 2 */ 600static int __init batch_entropy_init(int size, struct entropy_store *r) 601{ 602 batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL); 603 if (!batch_entropy_pool) 604 return -1; 605 batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL); 606 if (!batch_entropy_credit) { 607 kfree(batch_entropy_pool); 608 return -1; 609 } 610 batch_head = batch_tail = 0; 611 batch_max = size; 612 batch_tqueue.routine = batch_entropy_process; 613 batch_tqueue.data = r; 614 return 0; 615} 616 617/* 618 * Changes to the entropy data is put into a queue rather than being added to 619 * the entropy counts directly. This is presumably to avoid doing heavy 620 * hashing calculations during an interrupt in add_timer_randomness(). 621 * Instead, the entropy is only added to the pool once per timer tick. 622 */ 623void batch_entropy_store(u32 a, u32 b, int num) 624{ 625 int new; 626 627 if (!batch_max) 628 return; 629 630 batch_entropy_pool[2*batch_head] = a; 631 batch_entropy_pool[(2*batch_head) + 1] = b; 632 batch_entropy_credit[batch_head] = num; 633 634 new = (batch_head+1) & (batch_max-1); 635 if (new != batch_tail) { 636 queue_task(&batch_tqueue, &tq_timer); 637 batch_head = new; 638 } else { 639 DEBUG_ENT("batch entropy buffer full\n"); 640 } 641} 642 643/* 644 * Flush out the accumulated entropy operations, adding entropy to the passed 645 * store (normally random_state). If that store has enough entropy, alternate 646 * between randomizing the data of the primary and secondary stores. 647 */ 648static void batch_entropy_process(void *private_) 649{ 650 struct entropy_store *r = (struct entropy_store *) private_, *p; 651 int max_entropy = r->poolinfo.POOLBITS; 652 653 if (!batch_max) 654 return; 655 656 p = r; 657 while (batch_head != batch_tail) { 658 if (r->entropy_count >= max_entropy) { 659 r = (r == sec_random_state) ? random_state : 660 sec_random_state; 661 max_entropy = r->poolinfo.POOLBITS; 662 } 663 add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2); 664 credit_entropy_store(r, batch_entropy_credit[batch_tail]); 665 batch_tail = (batch_tail+1) & (batch_max-1); 666 } 667 if (p->entropy_count >= random_read_wakeup_thresh) 668 wake_up_interruptible(&random_read_wait); 669} 670 671/********************************************************************* 672 * 673 * Entropy input management 674 * 675 *********************************************************************/ 676 677/* There is one of these per entropy source */ 678struct timer_rand_state { 679 __u32 last_time; 680 __s32 last_delta,last_delta2; 681 int dont_count_entropy:1; 682}; 683 684static struct timer_rand_state keyboard_timer_state; 685static struct timer_rand_state mouse_timer_state; 686static struct timer_rand_state extract_timer_state; 687static struct timer_rand_state *irq_timer_state[NR_IRQS]; 688static struct timer_rand_state *blkdev_timer_state[MAX_BLKDEV]; 689 690/* 691 * This function adds entropy to the entropy "pool" by using timing 692 * delays. It uses the timer_rand_state structure to make an estimate 693 * of how many bits of entropy this call has added to the pool. 694 * 695 * The number "num" is also added to the pool - it should somehow describe 696 * the type of event which just happened. This is currently 0-255 for 697 * keyboard scan codes, and 256 upwards for interrupts. 698 * On the i386, this is assumed to be at most 16 bits, and the high bits 699 * are used for a high-resolution timer. 700 * 701 */ 702static void add_timer_randomness(struct timer_rand_state *state, unsigned num) 703{ 704 __u32 time; 705 __s32 delta, delta2, delta3; 706 int entropy = 0; 707 708#if defined(__i386__) 709 if (cpu_has_tsc) { 710 __u32 high; 711 rdtsc(time, high); 712 num ^= high; 713 } else { 714 time = jiffies; 715 } 716#elif defined(__x86_64__) 717 __u32 high; 718 rdtsc(time, high); 719 num ^= high; 720#else 721 time = jiffies; 722#endif 723 724 /* 725 * Calculate number of bits of randomness we probably added. 726 * We take into account the first, second and third-order deltas 727 * in order to make our estimate. 728 */ 729 if (!state->dont_count_entropy) { 730 delta = time - state->last_time; 731 state->last_time = time; 732 733 delta2 = delta - state->last_delta; 734 state->last_delta = delta; 735 736 delta3 = delta2 - state->last_delta2; 737 state->last_delta2 = delta2; 738 739 if (delta < 0) 740 delta = -delta; 741 if (delta2 < 0) 742 delta2 = -delta2; 743 if (delta3 < 0) 744 delta3 = -delta3; 745 if (delta > delta2) 746 delta = delta2; 747 if (delta > delta3) 748 delta = delta3; 749 750 /* 751 * delta is now minimum absolute delta. 752 * Round down by 1 bit on general principles, 753 * and limit entropy entimate to 12 bits. 754 */ 755 delta >>= 1; 756 delta &= (1 << 12) - 1; 757 758 entropy = int_ln_12bits(delta); 759 } 760 batch_entropy_store(num, time, entropy); 761} 762 763void add_keyboard_randomness(unsigned char scancode) 764{ 765 static unsigned char last_scancode; 766 /* ignore autorepeat (multiple key down w/o key up) */ 767 if (scancode != last_scancode) { 768 last_scancode = scancode; 769 add_timer_randomness(&keyboard_timer_state, scancode); 770 } 771} 772 773void add_mouse_randomness(__u32 mouse_data) 774{ 775 add_timer_randomness(&mouse_timer_state, mouse_data); 776} 777 778void add_interrupt_randomness(int irq) 779{ 780 if (irq >= NR_IRQS || irq_timer_state[irq] == 0) 781 return; 782 783 add_timer_randomness(irq_timer_state[irq], 0x100+irq); 784} 785 786void add_blkdev_randomness(int major) 787{ 788 if (major >= MAX_BLKDEV) 789 return; 790 791 if (blkdev_timer_state[major] == 0) { 792 rand_initialize_blkdev(major, GFP_ATOMIC); 793 if (blkdev_timer_state[major] == 0) 794 return; 795 } 796 797 add_timer_randomness(blkdev_timer_state[major], 0x200+major); 798} 799 800/****************************************************************** 801 * 802 * Hash function definition 803 * 804 *******************************************************************/ 805 806/* 807 * This chunk of code defines a function 808 * void HASH_TRANSFORM(__u32 digest[HASH_BUFFER_SIZE + HASH_EXTRA_SIZE], 809 * __u32 const data[16]) 810 * 811 * The function hashes the input data to produce a digest in the first 812 * HASH_BUFFER_SIZE words of the digest[] array, and uses HASH_EXTRA_SIZE 813 * more words for internal purposes. (This buffer is exported so the 814 * caller can wipe it once rather than this code doing it each call, 815 * and tacking it onto the end of the digest[] array is the quick and 816 * dirty way of doing it.) 817 * 818 * It so happens that MD5 and SHA share most of the initial vector 819 * used to initialize the digest[] array before the first call: 820 * 1) 0x67452301 821 * 2) 0xefcdab89 822 * 3) 0x98badcfe 823 * 4) 0x10325476 824 * 5) 0xc3d2e1f0 (SHA only) 825 * 826 * For /dev/random purposes, the length of the data being hashed is 827 * fixed in length, so appending a bit count in the usual way is not 828 * cryptographically necessary. 829 */ 830 831#ifdef USE_SHA 832 833#define HASH_BUFFER_SIZE 5 834#define HASH_EXTRA_SIZE 80 835#define HASH_TRANSFORM SHATransform 836 837/* Various size/speed tradeoffs are available. Choose 0..3. */ 838#define SHA_CODE_SIZE 0 839 840/* 841 * SHA transform algorithm, taken from code written by Peter Gutmann, 842 * and placed in the public domain. 843 */ 844 845/* The SHA f()-functions. */ 846 847#define f1(x,y,z) ( z ^ (x & (y^z)) ) /* Rounds 0-19: x ? y : z */ 848#define f2(x,y,z) (x ^ y ^ z) /* Rounds 20-39: XOR */ 849#define f3(x,y,z) ( (x & y) + (z & (x ^ y)) ) /* Rounds 40-59: majority */ 850#define f4(x,y,z) (x ^ y ^ z) /* Rounds 60-79: XOR */ 851 852/* The SHA Mysterious Constants */ 853 854#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ 855#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ 856#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ 857#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ 858 859#define ROTL(n,X) ( ( ( X ) << n ) | ( ( X ) >> ( 32 - n ) ) ) 860 861#define subRound(a, b, c, d, e, f, k, data) \ 862 ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) ) 863 864 865static void SHATransform(__u32 digest[85], __u32 const data[16]) 866{ 867 __u32 A, B, C, D, E; /* Local vars */ 868 __u32 TEMP; 869 int i; 870#define W (digest + HASH_BUFFER_SIZE) /* Expanded data array */ 871 872 /* 873 * Do the preliminary expansion of 16 to 80 words. Doing it 874 * out-of-line line this is faster than doing it in-line on 875 * register-starved machines like the x86, and not really any 876 * slower on real processors. 877 */ 878 memcpy(W, data, 16*sizeof(__u32)); 879 for (i = 0; i < 64; i++) { 880 TEMP = W[i] ^ W[i+2] ^ W[i+8] ^ W[i+13]; 881 W[i+16] = ROTL(1, TEMP); 882 } 883 884 /* Set up first buffer and local data buffer */ 885 A = digest[ 0 ]; 886 B = digest[ 1 ]; 887 C = digest[ 2 ]; 888 D = digest[ 3 ]; 889 E = digest[ 4 ]; 890 891 /* Heavy mangling, in 4 sub-rounds of 20 iterations each. */ 892#if SHA_CODE_SIZE == 0 893 /* 894 * Approximately 50% of the speed of the largest version, but 895 * takes up 1/16 the space. Saves about 6k on an i386 kernel. 896 */ 897 for (i = 0; i < 80; i++) { 898 if (i < 40) { 899 if (i < 20) 900 TEMP = f1(B, C, D) + K1; 901 else 902 TEMP = f2(B, C, D) + K2; 903 } else { 904 if (i < 60) 905 TEMP = f3(B, C, D) + K3; 906 else 907 TEMP = f4(B, C, D) + K4; 908 } 909 TEMP += ROTL(5, A) + E + W[i]; 910 E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; 911 } 912#elif SHA_CODE_SIZE == 1 913 for (i = 0; i < 20; i++) { 914 TEMP = f1(B, C, D) + K1 + ROTL(5, A) + E + W[i]; 915 E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; 916 } 917 for (; i < 40; i++) { 918 TEMP = f2(B, C, D) + K2 + ROTL(5, A) + E + W[i]; 919 E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; 920 } 921 for (; i < 60; i++) { 922 TEMP = f3(B, C, D) + K3 + ROTL(5, A) + E + W[i]; 923 E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; 924 } 925 for (; i < 80; i++) { 926 TEMP = f4(B, C, D) + K4 + ROTL(5, A) + E + W[i]; 927 E = D; D = C; C = ROTL(30, B); B = A; A = TEMP; 928 } 929#elif SHA_CODE_SIZE == 2 930 for (i = 0; i < 20; i += 5) { 931 subRound( A, B, C, D, E, f1, K1, W[ i ] ); 932 subRound( E, A, B, C, D, f1, K1, W[ i+1 ] ); 933 subRound( D, E, A, B, C, f1, K1, W[ i+2 ] ); 934 subRound( C, D, E, A, B, f1, K1, W[ i+3 ] ); 935 subRound( B, C, D, E, A, f1, K1, W[ i+4 ] ); 936 } 937 for (; i < 40; i += 5) { 938 subRound( A, B, C, D, E, f2, K2, W[ i ] ); 939 subRound( E, A, B, C, D, f2, K2, W[ i+1 ] ); 940 subRound( D, E, A, B, C, f2, K2, W[ i+2 ] ); 941 subRound( C, D, E, A, B, f2, K2, W[ i+3 ] ); 942 subRound( B, C, D, E, A, f2, K2, W[ i+4 ] ); 943 } 944 for (; i < 60; i += 5) { 945 subRound( A, B, C, D, E, f3, K3, W[ i ] ); 946 subRound( E, A, B, C, D, f3, K3, W[ i+1 ] ); 947 subRound( D, E, A, B, C, f3, K3, W[ i+2 ] ); 948 subRound( C, D, E, A, B, f3, K3, W[ i+3 ] ); 949 subRound( B, C, D, E, A, f3, K3, W[ i+4 ] ); 950 } 951 for (; i < 80; i += 5) { 952 subRound( A, B, C, D, E, f4, K4, W[ i ] ); 953 subRound( E, A, B, C, D, f4, K4, W[ i+1 ] ); 954 subRound( D, E, A, B, C, f4, K4, W[ i+2 ] ); 955 subRound( C, D, E, A, B, f4, K4, W[ i+3 ] ); 956 subRound( B, C, D, E, A, f4, K4, W[ i+4 ] ); 957 } 958#elif SHA_CODE_SIZE == 3 /* Really large version */ 959 subRound( A, B, C, D, E, f1, K1, W[ 0 ] ); 960 subRound( E, A, B, C, D, f1, K1, W[ 1 ] ); 961 subRound( D, E, A, B, C, f1, K1, W[ 2 ] ); 962 subRound( C, D, E, A, B, f1, K1, W[ 3 ] ); 963 subRound( B, C, D, E, A, f1, K1, W[ 4 ] ); 964 subRound( A, B, C, D, E, f1, K1, W[ 5 ] ); 965 subRound( E, A, B, C, D, f1, K1, W[ 6 ] ); 966 subRound( D, E, A, B, C, f1, K1, W[ 7 ] ); 967 subRound( C, D, E, A, B, f1, K1, W[ 8 ] ); 968 subRound( B, C, D, E, A, f1, K1, W[ 9 ] ); 969 subRound( A, B, C, D, E, f1, K1, W[ 10 ] ); 970 subRound( E, A, B, C, D, f1, K1, W[ 11 ] ); 971 subRound( D, E, A, B, C, f1, K1, W[ 12 ] ); 972 subRound( C, D, E, A, B, f1, K1, W[ 13 ] ); 973 subRound( B, C, D, E, A, f1, K1, W[ 14 ] ); 974 subRound( A, B, C, D, E, f1, K1, W[ 15 ] ); 975 subRound( E, A, B, C, D, f1, K1, W[ 16 ] ); 976 subRound( D, E, A, B, C, f1, K1, W[ 17 ] ); 977 subRound( C, D, E, A, B, f1, K1, W[ 18 ] ); 978 subRound( B, C, D, E, A, f1, K1, W[ 19 ] ); 979 980 subRound( A, B, C, D, E, f2, K2, W[ 20 ] ); 981 subRound( E, A, B, C, D, f2, K2, W[ 21 ] ); 982 subRound( D, E, A, B, C, f2, K2, W[ 22 ] ); 983 subRound( C, D, E, A, B, f2, K2, W[ 23 ] ); 984 subRound( B, C, D, E, A, f2, K2, W[ 24 ] ); 985 subRound( A, B, C, D, E, f2, K2, W[ 25 ] ); 986 subRound( E, A, B, C, D, f2, K2, W[ 26 ] ); 987 subRound( D, E, A, B, C, f2, K2, W[ 27 ] ); 988 subRound( C, D, E, A, B, f2, K2, W[ 28 ] ); 989 subRound( B, C, D, E, A, f2, K2, W[ 29 ] ); 990 subRound( A, B, C, D, E, f2, K2, W[ 30 ] ); 991 subRound( E, A, B, C, D, f2, K2, W[ 31 ] ); 992 subRound( D, E, A, B, C, f2, K2, W[ 32 ] ); 993 subRound( C, D, E, A, B, f2, K2, W[ 33 ] ); 994 subRound( B, C, D, E, A, f2, K2, W[ 34 ] ); 995 subRound( A, B, C, D, E, f2, K2, W[ 35 ] ); 996 subRound( E, A, B, C, D, f2, K2, W[ 36 ] ); 997 subRound( D, E, A, B, C, f2, K2, W[ 37 ] ); 998 subRound( C, D, E, A, B, f2, K2, W[ 38 ] ); 999 subRound( B, C, D, E, A, f2, K2, W[ 39 ] ); 1000 1001 subRound( A, B, C, D, E, f3, K3, W[ 40 ] ); 1002 subRound( E, A, B, C, D, f3, K3, W[ 41 ] ); 1003 subRound( D, E, A, B, C, f3, K3, W[ 42 ] ); 1004 subRound( C, D, E, A, B, f3, K3, W[ 43 ] ); 1005 subRound( B, C, D, E, A, f3, K3, W[ 44 ] ); 1006 subRound( A, B, C, D, E, f3, K3, W[ 45 ] ); 1007 subRound( E, A, B, C, D, f3, K3, W[ 46 ] ); 1008 subRound( D, E, A, B, C, f3, K3, W[ 47 ] ); 1009 subRound( C, D, E, A, B, f3, K3, W[ 48 ] ); 1010 subRound( B, C, D, E, A, f3, K3, W[ 49 ] ); 1011 subRound( A, B, C, D, E, f3, K3, W[ 50 ] ); 1012 subRound( E, A, B, C, D, f3, K3, W[ 51 ] ); 1013 subRound( D, E, A, B, C, f3, K3, W[ 52 ] ); 1014 subRound( C, D, E, A, B, f3, K3, W[ 53 ] ); 1015 subRound( B, C, D, E, A, f3, K3, W[ 54 ] ); 1016 subRound( A, B, C, D, E, f3, K3, W[ 55 ] ); 1017 subRound( E, A, B, C, D, f3, K3, W[ 56 ] ); 1018 subRound( D, E, A, B, C, f3, K3, W[ 57 ] ); 1019 subRound( C, D, E, A, B, f3, K3, W[ 58 ] ); 1020 subRound( B, C, D, E, A, f3, K3, W[ 59 ] ); 1021 1022 subRound( A, B, C, D, E, f4, K4, W[ 60 ] ); 1023 subRound( E, A, B, C, D, f4, K4, W[ 61 ] ); 1024 subRound( D, E, A, B, C, f4, K4, W[ 62 ] ); 1025 subRound( C, D, E, A, B, f4, K4, W[ 63 ] ); 1026 subRound( B, C, D, E, A, f4, K4, W[ 64 ] ); 1027 subRound( A, B, C, D, E, f4, K4, W[ 65 ] ); 1028 subRound( E, A, B, C, D, f4, K4, W[ 66 ] ); 1029 subRound( D, E, A, B, C, f4, K4, W[ 67 ] ); 1030 subRound( C, D, E, A, B, f4, K4, W[ 68 ] ); 1031 subRound( B, C, D, E, A, f4, K4, W[ 69 ] ); 1032 subRound( A, B, C, D, E, f4, K4, W[ 70 ] ); 1033 subRound( E, A, B, C, D, f4, K4, W[ 71 ] ); 1034 subRound( D, E, A, B, C, f4, K4, W[ 72 ] ); 1035 subRound( C, D, E, A, B, f4, K4, W[ 73 ] ); 1036 subRound( B, C, D, E, A, f4, K4, W[ 74 ] ); 1037 subRound( A, B, C, D, E, f4, K4, W[ 75 ] ); 1038 subRound( E, A, B, C, D, f4, K4, W[ 76 ] ); 1039 subRound( D, E, A, B, C, f4, K4, W[ 77 ] ); 1040 subRound( C, D, E, A, B, f4, K4, W[ 78 ] ); 1041 subRound( B, C, D, E, A, f4, K4, W[ 79 ] ); 1042#else 1043#error Illegal SHA_CODE_SIZE 1044#endif 1045 1046 /* Build message digest */ 1047 digest[ 0 ] += A; 1048 digest[ 1 ] += B; 1049 digest[ 2 ] += C; 1050 digest[ 3 ] += D; 1051 digest[ 4 ] += E; 1052 1053 /* W is wiped by the caller */ 1054#undef W 1055} 1056 1057#undef ROTL 1058#undef f1 1059#undef f2 1060#undef f3 1061#undef f4 1062#undef K1 1063#undef K2 1064#undef K3 1065#undef K4 1066#undef subRound 1067 1068#else /* !USE_SHA - Use MD5 */ 1069 1070#define HASH_BUFFER_SIZE 4 1071#define HASH_EXTRA_SIZE 0 1072#define HASH_TRANSFORM MD5Transform 1073 1074/* 1075 * MD5 transform algorithm, taken from code written by Colin Plumb, 1076 * and put into the public domain 1077 */ 1078 1079/* The four core functions - F1 is optimized somewhat */ 1080 1081/* #define F1(x, y, z) (x & y | ~x & z) */ 1082#define F1(x, y, z) (z ^ (x & (y ^ z))) 1083#define F2(x, y, z) F1(z, x, y) 1084#define F3(x, y, z) (x ^ y ^ z) 1085#define F4(x, y, z) (y ^ (x | ~z)) 1086 1087/* This is the central step in the MD5 algorithm. */ 1088#define MD5STEP(f, w, x, y, z, data, s) \ 1089 ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x ) 1090 1091/* 1092 * The core of the MD5 algorithm, this alters an existing MD5 hash to 1093 * reflect the addition of 16 longwords of new data. MD5Update blocks 1094 * the data and converts bytes into longwords for this routine. 1095 */ 1096static void MD5Transform(__u32 buf[HASH_BUFFER_SIZE], __u32 const in[16]) 1097{ 1098 __u32 a, b, c, d; 1099 1100 a = buf[0]; 1101 b = buf[1]; 1102 c = buf[2]; 1103 d = buf[3]; 1104 1105 MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7); 1106 MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12); 1107 MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17); 1108 MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22); 1109 MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7); 1110 MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12); 1111 MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17); 1112 MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22); 1113 MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7); 1114 MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12); 1115 MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17); 1116 MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22); 1117 MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7); 1118 MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12); 1119 MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17); 1120 MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22); 1121 1122 MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5); 1123 MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9); 1124 MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14); 1125 MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20); 1126 MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5); 1127 MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9); 1128 MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14); 1129 MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20); 1130 MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5); 1131 MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9); 1132 MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14); 1133 MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20); 1134 MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5); 1135 MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9); 1136 MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14); 1137 MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20); 1138 1139 MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4); 1140 MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11); 1141 MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16); 1142 MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23); 1143 MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4); 1144 MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11); 1145 MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16); 1146 MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23); 1147 MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4); 1148 MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11); 1149 MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16); 1150 MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23); 1151 MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4); 1152 MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11); 1153 MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16); 1154 MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23); 1155 1156 MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6); 1157 MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10); 1158 MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15); 1159 MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21); 1160 MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6); 1161 MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10); 1162 MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15); 1163 MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21); 1164 MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6); 1165 MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10); 1166 MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15); 1167 MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21); 1168 MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6); 1169 MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10); 1170 MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15); 1171 MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21); 1172 1173 buf[0] += a; 1174 buf[1] += b; 1175 buf[2] += c; 1176 buf[3] += d; 1177} 1178 1179#undef F1 1180#undef F2 1181#undef F3 1182#undef F4 1183#undef MD5STEP 1184 1185#endif /* !USE_SHA */ 1186 1187/********************************************************************* 1188 * 1189 * Entropy extraction routines 1190 * 1191 *********************************************************************/ 1192 1193#define EXTRACT_ENTROPY_USER 1 1194#define EXTRACT_ENTROPY_SECONDARY 2 1195#define TMP_BUF_SIZE (HASH_BUFFER_SIZE + HASH_EXTRA_SIZE) 1196#define SEC_XFER_SIZE (TMP_BUF_SIZE*4) 1197 1198static ssize_t extract_entropy(struct entropy_store *r, void * buf, 1199 size_t nbytes, int flags); 1200 1201/* 1202 * This utility inline function is responsible for transfering entropy 1203 * from the primary pool to the secondary extraction pool. We pull 1204 * randomness under two conditions; one is if there isn't enough entropy 1205 * in the secondary pool. The other is after we have extracted 1024 bytes, 1206 * at which point we do a "catastrophic reseeding". 1207 */ 1208static inline void xfer_secondary_pool(struct entropy_store *r, 1209 size_t nbytes) 1210{ 1211 __u32 tmp[TMP_BUF_SIZE]; 1212 1213 if (r->entropy_count < nbytes * 8 && 1214 r->entropy_count < r->poolinfo.POOLBITS) { 1215 int nwords = min_t(int, 1216 r->poolinfo.poolwords - r->entropy_count/32, 1217 sizeof(tmp) / 4); 1218 1219 DEBUG_ENT("xfer %d from primary to %s (have %d, need %d)\n", 1220 nwords * 32, 1221 r == sec_random_state ? "secondary" : "unknown", 1222 r->entropy_count, nbytes * 8); 1223 1224 extract_entropy(random_state, tmp, nwords * 4, 0); 1225 add_entropy_words(r, tmp, nwords); 1226 credit_entropy_store(r, nwords * 32); 1227 } 1228 if (r->extract_count > 1024) { 1229 DEBUG_ENT("reseeding %s with %d from primary\n", 1230 r == sec_random_state ? "secondary" : "unknown", 1231 sizeof(tmp) * 8); 1232 extract_entropy(random_state, tmp, sizeof(tmp), 0); 1233 add_entropy_words(r, tmp, sizeof(tmp) / 4); 1234 r->extract_count = 0; 1235 } 1236} 1237 1238/* 1239 * This function extracts randomness from the "entropy pool", and 1240 * returns it in a buffer. This function computes how many remaining 1241 * bits of entropy are left in the pool, but it does not restrict the 1242 * number of bytes that are actually obtained. If the EXTRACT_ENTROPY_USER 1243 * flag is given, then the buf pointer is assumed to be in user space. 1244 * 1245 * If the EXTRACT_ENTROPY_SECONDARY flag is given, then we are actually 1246 * extracting entropy from the secondary pool, and can refill from the 1247 * primary pool if needed. 1248 * 1249 * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words. 1250 */ 1251static ssize_t extract_entropy(struct entropy_store *r, void * buf, 1252 size_t nbytes, int flags) 1253{ 1254 ssize_t ret, i; 1255 __u32 tmp[TMP_BUF_SIZE]; 1256 __u32 x; 1257 1258 add_timer_randomness(&extract_timer_state, nbytes); 1259 1260 /* Redundant, but just in case... */ 1261 if (r->entropy_count > r->poolinfo.POOLBITS) 1262 r->entropy_count = r->poolinfo.POOLBITS; 1263 1264 if (flags & EXTRACT_ENTROPY_SECONDARY) 1265 xfer_secondary_pool(r, nbytes); 1266 1267 DEBUG_ENT("%s has %d bits, want %d bits\n", 1268 r == sec_random_state ? "secondary" : 1269 r == random_state ? "primary" : "unknown", 1270 r->entropy_count, nbytes * 8); 1271 1272 if (r->entropy_count / 8 >= nbytes) 1273 r->entropy_count -= nbytes*8; 1274 else 1275 r->entropy_count = 0; 1276 1277 if (r->entropy_count < random_write_wakeup_thresh) 1278 wake_up_interruptible(&random_write_wait); 1279 1280 r->extract_count += nbytes; 1281 1282 ret = 0; 1283 while (nbytes) { 1284 /* 1285 * Check if we need to break out or reschedule.... 1286 */ 1287 if ((flags & EXTRACT_ENTROPY_USER) && current->need_resched) { 1288 if (signal_pending(current)) { 1289 if (ret == 0) 1290 ret = -ERESTARTSYS; 1291 break; 1292 } 1293 schedule(); 1294 } 1295 1296 /* Hash the pool to get the output */ 1297 tmp[0] = 0x67452301; 1298 tmp[1] = 0xefcdab89; 1299 tmp[2] = 0x98badcfe; 1300 tmp[3] = 0x10325476; 1301#ifdef USE_SHA 1302 tmp[4] = 0xc3d2e1f0; 1303#endif 1304 /* 1305 * As we hash the pool, we mix intermediate values of 1306 * the hash back into the pool. This eliminates 1307 * backtracking attacks (where the attacker knows 1308 * the state of the pool plus the current outputs, and 1309 * attempts to find previous ouputs), unless the hash 1310 * function can be inverted. 1311 */ 1312 for (i = 0, x = 0; i < r->poolinfo.poolwords; i += 16, x+=2) { 1313 HASH_TRANSFORM(tmp, r->pool+i); 1314 add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1); 1315 } 1316 1317 /* 1318 * In case the hash function has some recognizable 1319 * output pattern, we fold it in half. 1320 */ 1321 for (i = 0; i < HASH_BUFFER_SIZE/2; i++) 1322 tmp[i] ^= tmp[i + (HASH_BUFFER_SIZE+1)/2]; 1323#if HASH_BUFFER_SIZE & 1 /* There's a middle word to deal with */ 1324 x = tmp[HASH_BUFFER_SIZE/2]; 1325 x ^= (x >> 16); /* Fold it in half */ 1326 ((__u16 *)tmp)[HASH_BUFFER_SIZE-1] = (__u16)x; 1327#endif 1328 1329 /* Copy data to destination buffer */ 1330 i = min(nbytes, HASH_BUFFER_SIZE*sizeof(__u32)/2); 1331 if (flags & EXTRACT_ENTROPY_USER) { 1332 i -= copy_to_user(buf, (__u8 const *)tmp, i); 1333 if (!i) { 1334 ret = -EFAULT; 1335 break; 1336 } 1337 } else 1338 memcpy(buf, (__u8 const *)tmp, i); 1339 nbytes -= i; 1340 buf += i; 1341 ret += i; 1342 add_timer_randomness(&extract_timer_state, nbytes); 1343 } 1344 1345 /* Wipe data just returned from memory */ 1346 memset(tmp, 0, sizeof(tmp)); 1347 1348 return ret; 1349} 1350 1351/* 1352 * This function is the exported kernel interface. It returns some 1353 * number of good random numbers, suitable for seeding TCP sequence 1354 * numbers, etc. 1355 */ 1356void get_random_bytes(void *buf, int nbytes) 1357{ 1358 if (sec_random_state) 1359 extract_entropy(sec_random_state, (char *) buf, nbytes, 1360 EXTRACT_ENTROPY_SECONDARY); 1361 else if (random_state) 1362 extract_entropy(random_state, (char *) buf, nbytes, 0); 1363 else 1364 printk(KERN_NOTICE "get_random_bytes called before " 1365 "random driver initialization\n"); 1366} 1367 1368/********************************************************************* 1369 * 1370 * Functions to interface with Linux 1371 * 1372 *********************************************************************/ 1373 1374/* 1375 * Initialize the random pool with standard stuff. 1376 * 1377 * NOTE: This is an OS-dependent function. 1378 */ 1379static void init_std_data(struct entropy_store *r) 1380{ 1381 struct timeval tv; 1382 __u32 words[2]; 1383 char *p; 1384 int i; 1385 1386 do_gettimeofday(&tv); 1387 words[0] = tv.tv_sec; 1388 words[1] = tv.tv_usec; 1389 add_entropy_words(r, words, 2); 1390 1391 /* 1392 * This doesn't lock system.utsname. However, we are generating 1393 * entropy so a race with a name set here is fine. 1394 */ 1395 p = (char *) &system_utsname; 1396 for (i = sizeof(system_utsname) / sizeof(words); i; i--) { 1397 memcpy(words, p, sizeof(words)); 1398 add_entropy_words(r, words, sizeof(words)/4); 1399 p += sizeof(words); 1400 } 1401} 1402 1403void __init rand_initialize(void) 1404{ 1405 int i; 1406 1407 if (create_entropy_store(DEFAULT_POOL_SIZE, &random_state)) 1408 return; /* Error, return */ 1409 if (batch_entropy_init(BATCH_ENTROPY_SIZE, random_state)) 1410 return; /* Error, return */ 1411 if (create_entropy_store(SECONDARY_POOL_SIZE, &sec_random_state)) 1412 return; /* Error, return */ 1413 clear_entropy_store(random_state); 1414 clear_entropy_store(sec_random_state); 1415 init_std_data(random_state); 1416#ifdef CONFIG_SYSCTL 1417 sysctl_init_random(random_state); 1418#endif 1419 for (i = 0; i < NR_IRQS; i++) 1420 irq_timer_state[i] = NULL; 1421 for (i = 0; i < MAX_BLKDEV; i++) 1422 blkdev_timer_state[i] = NULL; 1423 memset(&keyboard_timer_state, 0, sizeof(struct timer_rand_state)); 1424 memset(&mouse_timer_state, 0, sizeof(struct timer_rand_state)); 1425 memset(&extract_timer_state, 0, sizeof(struct timer_rand_state)); 1426 extract_timer_state.dont_count_entropy = 1; 1427} 1428 1429void rand_initialize_irq(int irq) 1430{ 1431 struct timer_rand_state *state; 1432 1433 if (irq >= NR_IRQS || irq_timer_state[irq]) 1434 return; 1435 1436 /* 1437 * If kmalloc returns null, we just won't use that entropy 1438 * source. 1439 */ 1440 state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); 1441 if (state) { 1442 memset(state, 0, sizeof(struct timer_rand_state)); 1443 irq_timer_state[irq] = state; 1444 } 1445} 1446 1447void rand_initialize_blkdev(int major, int mode) 1448{ 1449 struct timer_rand_state *state; 1450 1451 if (major >= MAX_BLKDEV || blkdev_timer_state[major]) 1452 return; 1453 1454 /* 1455 * If kmalloc returns null, we just won't use that entropy 1456 * source. 1457 */ 1458 state = kmalloc(sizeof(struct timer_rand_state), mode); 1459 if (state) { 1460 memset(state, 0, sizeof(struct timer_rand_state)); 1461 blkdev_timer_state[major] = state; 1462 } 1463} 1464 1465 1466static ssize_t 1467random_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos) 1468{ 1469 DECLARE_WAITQUEUE(wait, current); 1470 ssize_t n, retval = 0, count = 0; 1471 1472 if (nbytes == 0) 1473 return 0; 1474 1475 add_wait_queue(&random_read_wait, &wait); 1476 while (nbytes > 0) { 1477 set_current_state(TASK_INTERRUPTIBLE); 1478 1479 n = nbytes; 1480 if (n > SEC_XFER_SIZE) 1481 n = SEC_XFER_SIZE; 1482 if (n > random_state->entropy_count / 8) 1483 n = random_state->entropy_count / 8; 1484 if (n == 0) { 1485 if (file->f_flags & O_NONBLOCK) { 1486 retval = -EAGAIN; 1487 break; 1488 } 1489 if (signal_pending(current)) { 1490 retval = -ERESTARTSYS; 1491 break; 1492 } 1493 schedule(); 1494 continue; 1495 } 1496 n = extract_entropy(sec_random_state, buf, n, 1497 EXTRACT_ENTROPY_USER | 1498 EXTRACT_ENTROPY_SECONDARY); 1499 if (n < 0) { 1500 retval = n; 1501 break; 1502 } 1503 count += n; 1504 buf += n; 1505 nbytes -= n; 1506 break; /* This break makes the device work */ 1507 /* like a named pipe */ 1508 } 1509 current->state = TASK_RUNNING; 1510 remove_wait_queue(&random_read_wait, &wait); 1511 1512 /* 1513 * If we gave the user some bytes, update the access time. 1514 */ 1515 if (count != 0) { 1516 UPDATE_ATIME(file->f_dentry->d_inode); 1517 } 1518 1519 return (count ? count : retval); 1520} 1521 1522static ssize_t 1523urandom_read(struct file * file, char * buf, 1524 size_t nbytes, loff_t *ppos) 1525{ 1526 return extract_entropy(sec_random_state, buf, nbytes, 1527 EXTRACT_ENTROPY_USER | 1528 EXTRACT_ENTROPY_SECONDARY); 1529} 1530 1531static unsigned int 1532random_poll(struct file *file, poll_table * wait) 1533{ 1534 unsigned int mask; 1535 1536 poll_wait(file, &random_read_wait, wait); 1537 poll_wait(file, &random_write_wait, wait); 1538 mask = 0; 1539 if (random_state->entropy_count >= random_read_wakeup_thresh) 1540 mask |= POLLIN | POLLRDNORM; 1541 if (random_state->entropy_count < random_write_wakeup_thresh) 1542 mask |= POLLOUT | POLLWRNORM; 1543 return mask; 1544} 1545 1546static ssize_t 1547random_write(struct file * file, const char * buffer, 1548 size_t count, loff_t *ppos) 1549{ 1550 int ret = 0; 1551 size_t bytes; 1552 __u32 buf[16]; 1553 const char *p = buffer; 1554 size_t c = count; 1555 1556 while (c > 0) { 1557 bytes = min(c, sizeof(buf)); 1558 1559 bytes -= copy_from_user(&buf, p, bytes); 1560 if (!bytes) { 1561 ret = -EFAULT; 1562 break; 1563 } 1564 c -= bytes; 1565 p += bytes; 1566 1567 add_entropy_words(random_state, buf, (bytes + 3) / 4); 1568 } 1569 if (p == buffer) { 1570 return (ssize_t)ret; 1571 } else { 1572 file->f_dentry->d_inode->i_mtime = CURRENT_TIME; 1573 mark_inode_dirty(file->f_dentry->d_inode); 1574 return (ssize_t)(p - buffer); 1575 } 1576} 1577 1578static int 1579random_ioctl(struct inode * inode, struct file * file, 1580 unsigned int cmd, unsigned long arg) 1581{ 1582 int *p, size, ent_count; 1583 int retval; 1584 1585 switch (cmd) { 1586 case RNDGETENTCNT: 1587 ent_count = random_state->entropy_count; 1588 if (put_user(ent_count, (int *) arg)) 1589 return -EFAULT; 1590 return 0; 1591 case RNDADDTOENTCNT: 1592 if (!capable(CAP_SYS_ADMIN)) 1593 return -EPERM; 1594 if (get_user(ent_count, (int *) arg)) 1595 return -EFAULT; 1596 credit_entropy_store(random_state, ent_count); 1597 /* 1598 * Wake up waiting processes if we have enough 1599 * entropy. 1600 */ 1601 if (random_state->entropy_count >= random_read_wakeup_thresh) 1602 wake_up_interruptible(&random_read_wait); 1603 return 0; 1604 case RNDGETPOOL: 1605 if (!capable(CAP_SYS_ADMIN)) 1606 return -EPERM; 1607 p = (int *) arg; 1608 ent_count = random_state->entropy_count; 1609 if (put_user(ent_count, p++) || 1610 get_user(size, p) || 1611 put_user(random_state->poolinfo.poolwords, p++)) 1612 return -EFAULT; 1613 if (size < 0) 1614 return -EINVAL; 1615 if (size > random_state->poolinfo.poolwords) 1616 size = random_state->poolinfo.poolwords; 1617 if (copy_to_user(p, random_state->pool, size * sizeof(__u32))) 1618 return -EFAULT; 1619 return 0; 1620 case RNDADDENTROPY: 1621 if (!capable(CAP_SYS_ADMIN)) 1622 return -EPERM; 1623 p = (int *) arg; 1624 if (get_user(ent_count, p++)) 1625 return -EFAULT; 1626 if (ent_count < 0) 1627 return -EINVAL; 1628 if (get_user(size, p++)) 1629 return -EFAULT; 1630 retval = random_write(file, (const char *) p, 1631 size, &file->f_pos); 1632 if (retval < 0) 1633 return retval; 1634 credit_entropy_store(random_state, ent_count); 1635 /* 1636 * Wake up waiting processes if we have enough 1637 * entropy. 1638 */ 1639 if (random_state->entropy_count >= random_read_wakeup_thresh) 1640 wake_up_interruptible(&random_read_wait); 1641 return 0; 1642 case RNDZAPENTCNT: 1643 if (!capable(CAP_SYS_ADMIN)) 1644 return -EPERM; 1645 random_state->entropy_count = 0; 1646 return 0; 1647 case RNDCLEARPOOL: 1648 /* Clear the entropy pool and associated counters. */ 1649 if (!capable(CAP_SYS_ADMIN)) 1650 return -EPERM; 1651 clear_entropy_store(random_state); 1652 init_std_data(random_state); 1653 return 0; 1654 default: 1655 return -EINVAL; 1656 } 1657} 1658 1659struct file_operations random_fops = { 1660 read: random_read, 1661 write: random_write, 1662 poll: random_poll, 1663 ioctl: random_ioctl, 1664}; 1665 1666struct file_operations urandom_fops = { 1667 read: urandom_read, 1668 write: random_write, 1669 ioctl: random_ioctl, 1670}; 1671 1672/*************************************************************** 1673 * Random UUID interface 1674 * 1675 * Used here for a Boot ID, but can be useful for other kernel 1676 * drivers. 1677 ***************************************************************/ 1678 1679/* 1680 * Generate random UUID 1681 */ 1682void generate_random_uuid(unsigned char uuid_out[16]) 1683{ 1684 get_random_bytes(uuid_out, 16); 1685 /* Set UUID version to 4 --- truely random generation */ 1686 uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40; 1687 /* Set the UUID variant to DCE */ 1688 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; 1689} 1690 1691/******************************************************************** 1692 * 1693 * Sysctl interface 1694 * 1695 ********************************************************************/ 1696 1697#ifdef CONFIG_SYSCTL 1698 1699#include <linux/sysctl.h> 1700 1701static int sysctl_poolsize; 1702static int min_read_thresh, max_read_thresh; 1703static int min_write_thresh, max_write_thresh; 1704static char sysctl_bootid[16]; 1705 1706/* 1707 * This function handles a request from the user to change the pool size 1708 * of the primary entropy store. 1709 */ 1710static int change_poolsize(int poolsize) 1711{ 1712 struct entropy_store *new_store, *old_store; 1713 int ret; 1714 1715 if ((ret = create_entropy_store(poolsize, &new_store))) 1716 return ret; 1717 1718 add_entropy_words(new_store, random_state->pool, 1719 random_state->poolinfo.poolwords); 1720 credit_entropy_store(new_store, random_state->entropy_count); 1721 1722 sysctl_init_random(new_store); 1723 old_store = random_state; 1724 random_state = batch_tqueue.data = new_store; 1725 free_entropy_store(old_store); 1726 return 0; 1727} 1728 1729static int proc_do_poolsize(ctl_table *table, int write, struct file *filp, 1730 void *buffer, size_t *lenp) 1731{ 1732 int ret; 1733 1734 sysctl_poolsize = random_state->poolinfo.POOLBYTES; 1735 1736 ret = proc_dointvec(table, write, filp, buffer, lenp); 1737 if (ret || !write || 1738 (sysctl_poolsize == random_state->poolinfo.POOLBYTES)) 1739 return ret; 1740 1741 return change_poolsize(sysctl_poolsize); 1742} 1743 1744static int poolsize_strategy(ctl_table *table, int *name, int nlen, 1745 void *oldval, size_t *oldlenp, 1746 void *newval, size_t newlen, void **context) 1747{ 1748 int len; 1749 1750 sysctl_poolsize = random_state->poolinfo.POOLBYTES; 1751 1752 /* 1753 * We only handle the write case, since the read case gets 1754 * handled by the default handler (and we don't care if the 1755 * write case happens twice; it's harmless). 1756 */ 1757 if (newval && newlen) { 1758 len = newlen; 1759 if (len > table->maxlen) 1760 len = table->maxlen; 1761 if (copy_from_user(table->data, newval, len)) 1762 return -EFAULT; 1763 } 1764 1765 if (sysctl_poolsize != random_state->poolinfo.POOLBYTES) 1766 return change_poolsize(sysctl_poolsize); 1767 1768 return 0; 1769} 1770 1771/* 1772 * These functions is used to return both the bootid UUID, and random 1773 * UUID. The difference is in whether table->data is NULL; if it is, 1774 * then a new UUID is generated and returned to the user. 1775 * 1776 * If the user accesses this via the proc interface, it will be returned 1777 * as an ASCII string in the standard UUID format. If accesses via the 1778 * sysctl system call, it is returned as 16 bytes of binary data. 1779 */ 1780static int proc_do_uuid(ctl_table *table, int write, struct file *filp, 1781 void *buffer, size_t *lenp) 1782{ 1783 ctl_table fake_table; 1784 unsigned char buf[64], tmp_uuid[16], *uuid; 1785 1786 uuid = table->data; 1787 if (!uuid) { 1788 uuid = tmp_uuid; 1789 uuid[8] = 0; 1790 } 1791 if (uuid[8] == 0) 1792 generate_random_uuid(uuid); 1793 1794 sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" 1795 "%02x%02x%02x%02x%02x%02x", 1796 uuid[0], uuid[1], uuid[2], uuid[3], 1797 uuid[4], uuid[5], uuid[6], uuid[7], 1798 uuid[8], uuid[9], uuid[10], uuid[11], 1799 uuid[12], uuid[13], uuid[14], uuid[15]); 1800 fake_table.data = buf; 1801 fake_table.maxlen = sizeof(buf); 1802 1803 return proc_dostring(&fake_table, write, filp, buffer, lenp); 1804} 1805 1806static int uuid_strategy(ctl_table *table, int *name, int nlen, 1807 void *oldval, size_t *oldlenp, 1808 void *newval, size_t newlen, void **context) 1809{ 1810 unsigned char tmp_uuid[16], *uuid; 1811 unsigned int len; 1812 1813 if (!oldval || !oldlenp) 1814 return 1; 1815 1816 uuid = table->data; 1817 if (!uuid) { 1818 uuid = tmp_uuid; 1819 uuid[8] = 0; 1820 } 1821 if (uuid[8] == 0) 1822 generate_random_uuid(uuid); 1823 1824 if (get_user(len, oldlenp)) 1825 return -EFAULT; 1826 if (len) { 1827 if (len > 16) 1828 len = 16; 1829 if (copy_to_user(oldval, uuid, len) || 1830 put_user(len, oldlenp)) 1831 return -EFAULT; 1832 } 1833 return 1; 1834} 1835 1836ctl_table random_table[] = { 1837 {RANDOM_POOLSIZE, "poolsize", 1838 &sysctl_poolsize, sizeof(int), 0644, NULL, 1839 &proc_do_poolsize, &poolsize_strategy}, 1840 {RANDOM_ENTROPY_COUNT, "entropy_avail", 1841 NULL, sizeof(int), 0444, NULL, 1842 &proc_dointvec}, 1843 {RANDOM_READ_THRESH, "read_wakeup_threshold", 1844 &random_read_wakeup_thresh, sizeof(int), 0644, NULL, 1845 &proc_dointvec_minmax, &sysctl_intvec, 0, 1846 &min_read_thresh, &max_read_thresh}, 1847 {RANDOM_WRITE_THRESH, "write_wakeup_threshold", 1848 &random_write_wakeup_thresh, sizeof(int), 0644, NULL, 1849 &proc_dointvec_minmax, &sysctl_intvec, 0, 1850 &min_write_thresh, &max_write_thresh}, 1851 {RANDOM_BOOT_ID, "boot_id", 1852 &sysctl_bootid, 16, 0444, NULL, 1853 &proc_do_uuid, &uuid_strategy}, 1854 {RANDOM_UUID, "uuid", 1855 NULL, 16, 0444, NULL, 1856 &proc_do_uuid, &uuid_strategy}, 1857 {0} 1858}; 1859 1860static void sysctl_init_random(struct entropy_store *random_state) 1861{ 1862 min_read_thresh = 8; 1863 min_write_thresh = 0; 1864 max_read_thresh = max_write_thresh = random_state->poolinfo.POOLBITS; 1865 random_table[1].data = &random_state->entropy_count; 1866} 1867#endif /* CONFIG_SYSCTL */ 1868 1869/******************************************************************** 1870 * 1871 * Random funtions for networking 1872 * 1873 ********************************************************************/ 1874 1875/* 1876 * TCP initial sequence number picking. This uses the random number 1877 * generator to pick an initial secret value. This value is hashed 1878 * along with the TCP endpoint information to provide a unique 1879 * starting point for each pair of TCP endpoints. This defeats 1880 * attacks which rely on guessing the initial TCP sequence number. 1881 * This algorithm was suggested by Steve Bellovin. 1882 * 1883 * Using a very strong hash was taking an appreciable amount of the total 1884 * TCP connection establishment time, so this is a weaker hash, 1885 * compensated for by changing the secret periodically. 1886 */ 1887 1888/* F, G and H are basic MD4 functions: selection, majority, parity */ 1889#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) 1890#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) 1891#define H(x, y, z) ((x) ^ (y) ^ (z)) 1892 1893/* 1894 * The generic round function. The application is so specific that 1895 * we don't bother protecting all the arguments with parens, as is generally 1896 * good macro practice, in favor of extra legibility. 1897 * Rotation is separate from addition to prevent recomputation 1898 */ 1899#define ROUND(f, a, b, c, d, x, s) \ 1900 (a += f(b, c, d) + x, a = (a << s) | (a >> (32-s))) 1901#define K1 0 1902#define K2 013240474631UL 1903#define K3 015666365641UL 1904 1905/* 1906 * Basic cut-down MD4 transform. Returns only 32 bits of result. 1907 */ 1908static __u32 halfMD4Transform (__u32 const buf[4], __u32 const in[8]) 1909{ 1910 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; 1911 1912 /* Round 1 */ 1913 ROUND(F, a, b, c, d, in[0] + K1, 3); 1914 ROUND(F, d, a, b, c, in[1] + K1, 7); 1915 ROUND(F, c, d, a, b, in[2] + K1, 11); 1916 ROUND(F, b, c, d, a, in[3] + K1, 19); 1917 ROUND(F, a, b, c, d, in[4] + K1, 3); 1918 ROUND(F, d, a, b, c, in[5] + K1, 7); 1919 ROUND(F, c, d, a, b, in[6] + K1, 11); 1920 ROUND(F, b, c, d, a, in[7] + K1, 19); 1921 1922 /* Round 2 */ 1923 ROUND(G, a, b, c, d, in[1] + K2, 3); 1924 ROUND(G, d, a, b, c, in[3] + K2, 5); 1925 ROUND(G, c, d, a, b, in[5] + K2, 9); 1926 ROUND(G, b, c, d, a, in[7] + K2, 13); 1927 ROUND(G, a, b, c, d, in[0] + K2, 3); 1928 ROUND(G, d, a, b, c, in[2] + K2, 5); 1929 ROUND(G, c, d, a, b, in[4] + K2, 9); 1930 ROUND(G, b, c, d, a, in[6] + K2, 13); 1931 1932 /* Round 3 */ 1933 ROUND(H, a, b, c, d, in[3] + K3, 3); 1934 ROUND(H, d, a, b, c, in[7] + K3, 9); 1935 ROUND(H, c, d, a, b, in[2] + K3, 11); 1936 ROUND(H, b, c, d, a, in[6] + K3, 15); 1937 ROUND(H, a, b, c, d, in[1] + K3, 3); 1938 ROUND(H, d, a, b, c, in[5] + K3, 9); 1939 ROUND(H, c, d, a, b, in[0] + K3, 11); 1940 ROUND(H, b, c, d, a, in[4] + K3, 15); 1941 1942 return buf[1] + b; /* "most hashed" word */ 1943 /* Alternative: return sum of all words? */ 1944} 1945 1946#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1947 1948static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12]) 1949{ 1950 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; 1951 1952 /* Round 1 */ 1953 ROUND(F, a, b, c, d, in[ 0] + K1, 3); 1954 ROUND(F, d, a, b, c, in[ 1] + K1, 7); 1955 ROUND(F, c, d, a, b, in[ 2] + K1, 11); 1956 ROUND(F, b, c, d, a, in[ 3] + K1, 19); 1957 ROUND(F, a, b, c, d, in[ 4] + K1, 3); 1958 ROUND(F, d, a, b, c, in[ 5] + K1, 7); 1959 ROUND(F, c, d, a, b, in[ 6] + K1, 11); 1960 ROUND(F, b, c, d, a, in[ 7] + K1, 19); 1961 ROUND(F, a, b, c, d, in[ 8] + K1, 3); 1962 ROUND(F, d, a, b, c, in[ 9] + K1, 7); 1963 ROUND(F, c, d, a, b, in[10] + K1, 11); 1964 ROUND(F, b, c, d, a, in[11] + K1, 19); 1965 1966 /* Round 2 */ 1967 ROUND(G, a, b, c, d, in[ 1] + K2, 3); 1968 ROUND(G, d, a, b, c, in[ 3] + K2, 5); 1969 ROUND(G, c, d, a, b, in[ 5] + K2, 9); 1970 ROUND(G, b, c, d, a, in[ 7] + K2, 13); 1971 ROUND(G, a, b, c, d, in[ 9] + K2, 3); 1972 ROUND(G, d, a, b, c, in[11] + K2, 5); 1973 ROUND(G, c, d, a, b, in[ 0] + K2, 9); 1974 ROUND(G, b, c, d, a, in[ 2] + K2, 13); 1975 ROUND(G, a, b, c, d, in[ 4] + K2, 3); 1976 ROUND(G, d, a, b, c, in[ 6] + K2, 5); 1977 ROUND(G, c, d, a, b, in[ 8] + K2, 9); 1978 ROUND(G, b, c, d, a, in[10] + K2, 13); 1979 1980 /* Round 3 */ 1981 ROUND(H, a, b, c, d, in[ 3] + K3, 3); 1982 ROUND(H, d, a, b, c, in[ 7] + K3, 9); 1983 ROUND(H, c, d, a, b, in[11] + K3, 11); 1984 ROUND(H, b, c, d, a, in[ 2] + K3, 15); 1985 ROUND(H, a, b, c, d, in[ 6] + K3, 3); 1986 ROUND(H, d, a, b, c, in[10] + K3, 9); 1987 ROUND(H, c, d, a, b, in[ 1] + K3, 11); 1988 ROUND(H, b, c, d, a, in[ 5] + K3, 15); 1989 ROUND(H, a, b, c, d, in[ 9] + K3, 3); 1990 ROUND(H, d, a, b, c, in[ 0] + K3, 9); 1991 ROUND(H, c, d, a, b, in[ 4] + K3, 11); 1992 ROUND(H, b, c, d, a, in[ 8] + K3, 15); 1993 1994 return buf[1] + b; /* "most hashed" word */ 1995 /* Alternative: return sum of all words? */ 1996} 1997#endif 1998 1999#undef ROUND 2000#undef F 2001#undef G 2002#undef H 2003#undef K1 2004#undef K2 2005#undef K3 2006 2007/* This should not be decreased so low that ISNs wrap too fast. */ 2008#define REKEY_INTERVAL 300 2009#define HASH_BITS 24 2010 2011#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2012__u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr, 2013 __u16 sport, __u16 dport) 2014{ 2015 static __u32 rekey_time; 2016 static __u32 count; 2017 static __u32 secret[12]; 2018 struct timeval tv; 2019 __u32 seq; 2020 2021 /* The procedure is the same as for IPv4, but addresses are longer. */ 2022 2023 do_gettimeofday(&tv); /* We need the usecs below... */ 2024 2025 if (!rekey_time || (tv.tv_sec - rekey_time) > REKEY_INTERVAL) { 2026 rekey_time = tv.tv_sec; 2027 /* First five words are overwritten below. */ 2028 get_random_bytes(&secret[5], sizeof(secret)-5*4); 2029 count = (tv.tv_sec/REKEY_INTERVAL) << HASH_BITS; 2030 } 2031 2032 memcpy(secret, saddr, 16); 2033 secret[4]=(sport << 16) + dport; 2034 2035 seq = (twothirdsMD4Transform(daddr, secret) & 2036 ((1<<HASH_BITS)-1)) + count; 2037 2038 seq += tv.tv_usec + tv.tv_sec*1000000; 2039 return seq; 2040} 2041 2042__u32 secure_ipv6_id(__u32 *daddr) 2043{ 2044 static time_t rekey_time; 2045 static __u32 secret[12]; 2046 time_t t; 2047 2048 /* 2049 * Pick a random secret every REKEY_INTERVAL seconds. 2050 */ 2051 t = CURRENT_TIME; 2052 if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) { 2053 rekey_time = t; 2054 /* First word is overwritten below. */ 2055 get_random_bytes(secret, sizeof(secret)); 2056 } 2057 2058 return twothirdsMD4Transform(daddr, secret); 2059} 2060 2061#endif 2062 2063 2064__u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr, 2065 __u16 sport, __u16 dport) 2066{ 2067 static __u32 rekey_time; 2068 static __u32 count; 2069 static __u32 secret[12]; 2070 struct timeval tv; 2071 __u32 seq; 2072 2073 /* 2074 * Pick a random secret every REKEY_INTERVAL seconds. 2075 */ 2076 do_gettimeofday(&tv); /* We need the usecs below... */ 2077 2078 if (!rekey_time || (tv.tv_sec - rekey_time) > REKEY_INTERVAL) { 2079 rekey_time = tv.tv_sec; 2080 /* First three words are overwritten below. */ 2081 get_random_bytes(&secret[3], sizeof(secret)-12); 2082 count = (tv.tv_sec/REKEY_INTERVAL) << HASH_BITS; 2083 } 2084 2085 /* 2086 * Pick a unique starting offset for each TCP connection endpoints 2087 * (saddr, daddr, sport, dport). 2088 * Note that the words are placed into the first words to be 2089 * mixed in with the halfMD4. This is because the starting 2090 * vector is also a random secret (at secret+8), and further 2091 * hashing fixed data into it isn't going to improve anything, 2092 * so we should get started with the variable data. 2093 */ 2094 secret[0]=saddr; 2095 secret[1]=daddr; 2096 secret[2]=(sport << 16) + dport; 2097 2098 seq = (halfMD4Transform(secret+8, secret) & 2099 ((1<<HASH_BITS)-1)) + count; 2100 2101 /* 2102 * As close as possible to RFC 793, which 2103 * suggests using a 250 kHz clock. 2104 * Further reading shows this assumes 2 Mb/s networks. 2105 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. 2106 * That's funny, Linux has one built in! Use it! 2107 * (Networks are faster now - should this be increased?) 2108 */ 2109 seq += tv.tv_usec + tv.tv_sec*1000000; 2110 return seq; 2111} 2112 2113/* The code below is shamelessly stolen from secure_tcp_sequence_number(). 2114 * All blames to Andrey V. Savochkin <saw@msu.ru>. 2115 */ 2116__u32 secure_ip_id(__u32 daddr) 2117{ 2118 static time_t rekey_time; 2119 static __u32 secret[12]; 2120 time_t t; 2121 2122 /* 2123 * Pick a random secret every REKEY_INTERVAL seconds. 2124 */ 2125 t = CURRENT_TIME; 2126 if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) { 2127 rekey_time = t; 2128 /* First word is overwritten below. */ 2129 get_random_bytes(secret+1, sizeof(secret)-4); 2130 } 2131 2132 /* 2133 * Pick a unique starting offset for each IP destination. 2134 * Note that the words are placed into the first words to be 2135 * mixed in with the halfMD4. This is because the starting 2136 * vector is also a random secret (at secret+8), and further 2137 * hashing fixed data into it isn't going to improve anything, 2138 * so we should get started with the variable data. 2139 */ 2140 secret[0]=daddr; 2141 2142 return halfMD4Transform(secret+8, secret); 2143} 2144 2145#ifdef CONFIG_SYN_COOKIES 2146/* 2147 * Secure SYN cookie computation. This is the algorithm worked out by 2148 * Dan Bernstein and Eric Schenk. 2149 * 2150 * For linux I implement the 1 minute counter by looking at the jiffies clock. 2151 * The count is passed in as a parameter, so this code doesn't much care. 2152 */ 2153 2154#define COOKIEBITS 24 /* Upper bits store count */ 2155#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 2156 2157static int syncookie_init; 2158static __u32 syncookie_secret[2][16-3+HASH_BUFFER_SIZE]; 2159 2160__u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport, 2161 __u16 dport, __u32 sseq, __u32 count, __u32 data) 2162{ 2163 __u32 tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE]; 2164 __u32 seq; 2165 2166 /* 2167 * Pick two random secrets the first time we need a cookie. 2168 */ 2169 if (syncookie_init == 0) { 2170 get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); 2171 syncookie_init = 1; 2172 } 2173 2174 /* 2175 * Compute the secure sequence number. 2176 * The output should be: 2177 * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) 2178 * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). 2179 * Where sseq is their sequence number and count increases every 2180 * minute by 1. 2181 * As an extra hack, we add a small "data" value that encodes the 2182 * MSS into the second hash value. 2183 */ 2184 2185 memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0])); 2186 tmp[0]=saddr; 2187 tmp[1]=daddr; 2188 tmp[2]=(sport << 16) + dport; 2189 HASH_TRANSFORM(tmp+16, tmp); 2190 seq = tmp[17] + sseq + (count << COOKIEBITS); 2191 2192 memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1])); 2193 tmp[0]=saddr; 2194 tmp[1]=daddr; 2195 tmp[2]=(sport << 16) + dport; 2196 tmp[3] = count; /* minute counter */ 2197 HASH_TRANSFORM(tmp+16, tmp); 2198 2199 /* Add in the second hash and the data */ 2200 return seq + ((tmp[17] + data) & COOKIEMASK); 2201} 2202 2203/* 2204 * This retrieves the small "data" value from the syncookie. 2205 * If the syncookie is bad, the data returned will be out of 2206 * range. This must be checked by the caller. 2207 * 2208 * The count value used to generate the cookie must be within 2209 * "maxdiff" if the current (passed-in) "count". The return value 2210 * is (__u32)-1 if this test fails. 2211 */ 2212__u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr, __u16 sport, 2213 __u16 dport, __u32 sseq, __u32 count, __u32 maxdiff) 2214{ 2215 __u32 tmp[16 + HASH_BUFFER_SIZE + HASH_EXTRA_SIZE]; 2216 __u32 diff; 2217 2218 if (syncookie_init == 0) 2219 return (__u32)-1; /* Well, duh! */ 2220 2221 /* Strip away the layers from the cookie */ 2222 memcpy(tmp+3, syncookie_secret[0], sizeof(syncookie_secret[0])); 2223 tmp[0]=saddr; 2224 tmp[1]=daddr; 2225 tmp[2]=(sport << 16) + dport; 2226 HASH_TRANSFORM(tmp+16, tmp); 2227 cookie -= tmp[17] + sseq; 2228 /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */ 2229 2230 diff = (count - (cookie >> COOKIEBITS)) & ((__u32)-1 >> COOKIEBITS); 2231 if (diff >= maxdiff) 2232 return (__u32)-1; 2233 2234 memcpy(tmp+3, syncookie_secret[1], sizeof(syncookie_secret[1])); 2235 tmp[0] = saddr; 2236 tmp[1] = daddr; 2237 tmp[2] = (sport << 16) + dport; 2238 tmp[3] = count - diff; /* minute counter */ 2239 HASH_TRANSFORM(tmp+16, tmp); 2240 2241 return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */ 2242} 2243#endif 2244 2245 2246 2247EXPORT_SYMBOL(add_keyboard_randomness); 2248EXPORT_SYMBOL(add_mouse_randomness); 2249EXPORT_SYMBOL(add_interrupt_randomness); 2250EXPORT_SYMBOL(add_blkdev_randomness); 2251EXPORT_SYMBOL(batch_entropy_store); 2252EXPORT_SYMBOL(generate_random_uuid); 2253 2254