crypto.c revision 158702
1/* $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $ */ 2/*- 3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 4 * 5 * This code was written by Angelos D. Keromytis in Athens, Greece, in 6 * February 2000. Network Security Technologies Inc. (NSTI) kindly 7 * supported the development of this code. 8 * 9 * Copyright (c) 2000, 2001 Angelos D. Keromytis 10 * 11 * Permission to use, copy, and modify this software with or without fee 12 * is hereby granted, provided that this entire notice is included in 13 * all source code copies of any software which is or includes a copy or 14 * modification of this software. 15 * 16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 20 * PURPOSE. 21 */ 22 23#include <sys/cdefs.h> 24__FBSDID("$FreeBSD: head/sys/opencrypto/crypto.c 158702 2006-05-17 18:12:44Z pjd $"); 25 26#define CRYPTO_TIMING /* enable timing support */ 27 28#include <sys/param.h> 29#include <sys/systm.h> 30#include <sys/eventhandler.h> 31#include <sys/kernel.h> 32#include <sys/kthread.h> 33#include <sys/lock.h> 34#include <sys/module.h> 35#include <sys/mutex.h> 36#include <sys/malloc.h> 37#include <sys/proc.h> 38#include <sys/sysctl.h> 39 40#include <vm/uma.h> 41#include <opencrypto/cryptodev.h> 42#include <opencrypto/xform.h> /* XXX for M_XDATA */ 43 44/* 45 * Crypto drivers register themselves by allocating a slot in the 46 * crypto_drivers table with crypto_get_driverid() and then registering 47 * each algorithm they support with crypto_register() and crypto_kregister(). 48 */ 49static struct mtx crypto_drivers_mtx; /* lock on driver table */ 50#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 51#define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 52static struct cryptocap *crypto_drivers = NULL; 53static int crypto_drivers_num = 0; 54 55/* 56 * There are two queues for crypto requests; one for symmetric (e.g. 57 * cipher) operations and one for asymmetric (e.g. MOD)operations. 58 * A single mutex is used to lock access to both queues. We could 59 * have one per-queue but having one simplifies handling of block/unblock 60 * operations. 61 */ 62static TAILQ_HEAD(,cryptop) crp_q; /* request queues */ 63static TAILQ_HEAD(,cryptkop) crp_kq; 64static struct mtx crypto_q_mtx; 65#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 66#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 67#define CRYPTO_Q_EMPTY() (TAILQ_EMPTY(&crp_q) && TAILQ_EMPTY(&crp_kq)) 68 69/* 70 * There are two queues for processing completed crypto requests; one 71 * for the symmetric and one for the asymmetric ops. We only need one 72 * but have two to avoid type futzing (cryptop vs. cryptkop). A single 73 * mutex is used to lock access to both queues. Note that this lock 74 * must be separate from the lock on request queues to insure driver 75 * callbacks don't generate lock order reversals. 76 */ 77static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ 78static TAILQ_HEAD(,cryptkop) crp_ret_kq; 79static struct mtx crypto_ret_q_mtx; 80#define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx) 81#define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx) 82 83static uma_zone_t cryptop_zone; 84static uma_zone_t cryptodesc_zone; 85 86int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 87SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 88 &crypto_userasymcrypto, 0, 89 "Enable/disable user-mode access to asymmetric crypto support"); 90int crypto_devallowsoft = 0; /* only use hardware crypto for asym */ 91SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 92 &crypto_devallowsoft, 0, 93 "Enable/disable use of software asym crypto support"); 94 95MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 96 97static void crypto_proc(void); 98static struct proc *cryptoproc; 99static void crypto_ret_proc(void); 100static struct proc *cryptoretproc; 101static void crypto_destroy(void); 102static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 103static int crypto_kinvoke(struct cryptkop *krp); 104 105static struct cryptostats cryptostats; 106SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, 107 cryptostats, "Crypto system statistics"); 108 109#ifdef CRYPTO_TIMING 110static int crypto_timing = 0; 111SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, 112 &crypto_timing, 0, "Enable/disable crypto timing support"); 113#endif 114 115static int 116crypto_init(void) 117{ 118 int error; 119 120 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 121 MTX_DEF|MTX_QUIET); 122 123 TAILQ_INIT(&crp_q); 124 TAILQ_INIT(&crp_kq); 125 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 126 127 TAILQ_INIT(&crp_ret_q); 128 TAILQ_INIT(&crp_ret_kq); 129 mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF); 130 131 cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 132 0, 0, 0, 0, 133 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 134 cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 135 0, 0, 0, 0, 136 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 137 if (cryptodesc_zone == NULL || cryptop_zone == NULL) { 138 printf("crypto_init: cannot setup crypto zones\n"); 139 error = ENOMEM; 140 goto bad; 141 } 142 143 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 144 crypto_drivers = malloc(crypto_drivers_num * 145 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 146 if (crypto_drivers == NULL) { 147 printf("crypto_init: cannot setup crypto drivers\n"); 148 error = ENOMEM; 149 goto bad; 150 } 151 152 error = kthread_create((void (*)(void *)) crypto_proc, NULL, 153 &cryptoproc, 0, 0, "crypto"); 154 if (error) { 155 printf("crypto_init: cannot start crypto thread; error %d", 156 error); 157 goto bad; 158 } 159 160 error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL, 161 &cryptoretproc, 0, 0, "crypto returns"); 162 if (error) { 163 printf("crypto_init: cannot start cryptoret thread; error %d", 164 error); 165 goto bad; 166 } 167 return 0; 168bad: 169 crypto_destroy(); 170 return error; 171} 172 173/* 174 * Signal a crypto thread to terminate. We use the driver 175 * table lock to synchronize the sleep/wakeups so that we 176 * are sure the threads have terminated before we release 177 * the data structures they use. See crypto_finis below 178 * for the other half of this song-and-dance. 179 */ 180static void 181crypto_terminate(struct proc **pp, void *q) 182{ 183 struct proc *p; 184 185 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 186 p = *pp; 187 *pp = NULL; 188 if (p) { 189 wakeup_one(q); 190 PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 191 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 192 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 193 PROC_UNLOCK(p); 194 CRYPTO_DRIVER_LOCK(); 195 } 196} 197 198static void 199crypto_destroy(void) 200{ 201 /* 202 * Terminate any crypto threads. 203 */ 204 CRYPTO_DRIVER_LOCK(); 205 crypto_terminate(&cryptoproc, &crp_q); 206 crypto_terminate(&cryptoretproc, &crp_ret_q); 207 CRYPTO_DRIVER_UNLOCK(); 208 209 /* XXX flush queues??? */ 210 211 /* 212 * Reclaim dynamically allocated resources. 213 */ 214 if (crypto_drivers != NULL) 215 free(crypto_drivers, M_CRYPTO_DATA); 216 217 if (cryptodesc_zone != NULL) 218 uma_zdestroy(cryptodesc_zone); 219 if (cryptop_zone != NULL) 220 uma_zdestroy(cryptop_zone); 221 mtx_destroy(&crypto_q_mtx); 222 mtx_destroy(&crypto_ret_q_mtx); 223 mtx_destroy(&crypto_drivers_mtx); 224} 225 226/* 227 * Initialization code, both for static and dynamic loading. 228 */ 229static int 230crypto_modevent(module_t mod, int type, void *unused) 231{ 232 int error = EINVAL; 233 234 switch (type) { 235 case MOD_LOAD: 236 error = crypto_init(); 237 if (error == 0 && bootverbose) 238 printf("crypto: <crypto core>\n"); 239 break; 240 case MOD_UNLOAD: 241 /*XXX disallow if active sessions */ 242 error = 0; 243 crypto_destroy(); 244 return 0; 245 } 246 return error; 247} 248 249static moduledata_t crypto_mod = { 250 "crypto", 251 crypto_modevent, 252 0 253}; 254MODULE_VERSION(crypto, 1); 255DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 256MODULE_DEPEND(crypto, zlib, 1, 1, 1); 257 258/* 259 * Create a new session. 260 */ 261int 262crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 263{ 264 struct cryptoini *cr; 265 u_int32_t hid, lid; 266 int err = EINVAL; 267 268 CRYPTO_DRIVER_LOCK(); 269 270 if (crypto_drivers == NULL) 271 goto done; 272 273 /* 274 * The algorithm we use here is pretty stupid; just use the 275 * first driver that supports all the algorithms we need. 276 * 277 * XXX We need more smarts here (in real life too, but that's 278 * XXX another story altogether). 279 */ 280 281 for (hid = 0; hid < crypto_drivers_num; hid++) { 282 struct cryptocap *cap = &crypto_drivers[hid]; 283 /* 284 * If it's not initialized or has remaining sessions 285 * referencing it, skip. 286 */ 287 if (cap->cc_newsession == NULL || 288 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) 289 continue; 290 291 /* Hardware required -- ignore software drivers. */ 292 if (hard > 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) 293 continue; 294 /* Software required -- ignore hardware drivers. */ 295 if (hard < 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) 296 continue; 297 298 /* See if all the algorithms are supported. */ 299 for (cr = cri; cr; cr = cr->cri_next) 300 if (cap->cc_alg[cr->cri_alg] == 0) 301 break; 302 303 if (cr == NULL) { 304 /* Ok, all algorithms are supported. */ 305 306 /* 307 * Can't do everything in one session. 308 * 309 * XXX Fix this. We need to inject a "virtual" session layer right 310 * XXX about here. 311 */ 312 313 /* Call the driver initialization routine. */ 314 lid = hid; /* Pass the driver ID. */ 315 err = (*cap->cc_newsession)(cap->cc_arg, &lid, cri); 316 if (err == 0) { 317 /* XXX assert (hid &~ 0xffffff) == 0 */ 318 /* XXX assert (cap->cc_flags &~ 0xff) == 0 */ 319 (*sid) = ((cap->cc_flags & 0xff) << 24) | hid; 320 (*sid) <<= 32; 321 (*sid) |= (lid & 0xffffffff); 322 cap->cc_sessions++; 323 } 324 break; 325 } 326 } 327done: 328 CRYPTO_DRIVER_UNLOCK(); 329 return err; 330} 331 332static void 333crypto_remove(struct cryptocap *cap) 334{ 335 336 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 337 if (cap->cc_sessions == 0 && cap->cc_koperations == 0) 338 bzero(cap, sizeof(*cap)); 339} 340 341/* 342 * Delete an existing session (or a reserved session on an unregistered 343 * driver). 344 */ 345int 346crypto_freesession(u_int64_t sid) 347{ 348 struct cryptocap *cap; 349 u_int32_t hid; 350 int err; 351 352 CRYPTO_DRIVER_LOCK(); 353 354 if (crypto_drivers == NULL) { 355 err = EINVAL; 356 goto done; 357 } 358 359 /* Determine two IDs. */ 360 hid = CRYPTO_SESID2HID(sid); 361 362 if (hid >= crypto_drivers_num) { 363 err = ENOENT; 364 goto done; 365 } 366 cap = &crypto_drivers[hid]; 367 368 if (cap->cc_sessions) 369 cap->cc_sessions--; 370 371 /* Call the driver cleanup routine, if available. */ 372 if (cap->cc_freesession) 373 err = cap->cc_freesession(cap->cc_arg, sid); 374 else 375 err = 0; 376 377 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 378 crypto_remove(cap); 379 380done: 381 CRYPTO_DRIVER_UNLOCK(); 382 return err; 383} 384 385/* 386 * Return an unused driver id. Used by drivers prior to registering 387 * support for the algorithms they handle. 388 */ 389int32_t 390crypto_get_driverid(u_int32_t flags) 391{ 392 struct cryptocap *newdrv; 393 int i; 394 395 CRYPTO_DRIVER_LOCK(); 396 397 for (i = 0; i < crypto_drivers_num; i++) { 398 if (crypto_drivers[i].cc_process == NULL && 399 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 400 break; 401 } 402 } 403 404 /* Out of entries, allocate some more. */ 405 if (i == crypto_drivers_num) { 406 /* Be careful about wrap-around. */ 407 if (2 * crypto_drivers_num <= crypto_drivers_num) { 408 CRYPTO_DRIVER_UNLOCK(); 409 printf("crypto: driver count wraparound!\n"); 410 return -1; 411 } 412 413 newdrv = malloc(2 * crypto_drivers_num * 414 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 415 if (newdrv == NULL) { 416 CRYPTO_DRIVER_UNLOCK(); 417 printf("crypto: no space to expand driver table!\n"); 418 return -1; 419 } 420 421 bcopy(crypto_drivers, newdrv, 422 crypto_drivers_num * sizeof(struct cryptocap)); 423 424 crypto_drivers_num *= 2; 425 426 free(crypto_drivers, M_CRYPTO_DATA); 427 crypto_drivers = newdrv; 428 } 429 430 /* NB: state is zero'd on free */ 431 crypto_drivers[i].cc_sessions = 1; /* Mark */ 432 crypto_drivers[i].cc_flags = flags; 433 if (bootverbose) 434 printf("crypto: assign driver %u, flags %u\n", i, flags); 435 436 CRYPTO_DRIVER_UNLOCK(); 437 438 return i; 439} 440 441static struct cryptocap * 442crypto_checkdriver(u_int32_t hid) 443{ 444 if (crypto_drivers == NULL) 445 return NULL; 446 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 447} 448 449/* 450 * Register support for a key-related algorithm. This routine 451 * is called once for each algorithm supported a driver. 452 */ 453int 454crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 455 int (*kprocess)(void*, struct cryptkop *, int), 456 void *karg) 457{ 458 struct cryptocap *cap; 459 int err; 460 461 CRYPTO_DRIVER_LOCK(); 462 463 cap = crypto_checkdriver(driverid); 464 if (cap != NULL && 465 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 466 /* 467 * XXX Do some performance testing to determine placing. 468 * XXX We probably need an auxiliary data structure that 469 * XXX describes relative performances. 470 */ 471 472 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 473 if (bootverbose) 474 printf("crypto: driver %u registers key alg %u flags %u\n" 475 , driverid 476 , kalg 477 , flags 478 ); 479 480 if (cap->cc_kprocess == NULL) { 481 cap->cc_karg = karg; 482 cap->cc_kprocess = kprocess; 483 } 484 err = 0; 485 } else 486 err = EINVAL; 487 488 CRYPTO_DRIVER_UNLOCK(); 489 return err; 490} 491 492/* 493 * Register support for a non-key-related algorithm. This routine 494 * is called once for each such algorithm supported by a driver. 495 */ 496int 497crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 498 u_int32_t flags, 499 int (*newses)(void*, u_int32_t*, struct cryptoini*), 500 int (*freeses)(void*, u_int64_t), 501 int (*process)(void*, struct cryptop *, int), 502 void *arg) 503{ 504 struct cryptocap *cap; 505 int err; 506 507 CRYPTO_DRIVER_LOCK(); 508 509 cap = crypto_checkdriver(driverid); 510 /* NB: algorithms are in the range [1..max] */ 511 if (cap != NULL && 512 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 513 /* 514 * XXX Do some performance testing to determine placing. 515 * XXX We probably need an auxiliary data structure that 516 * XXX describes relative performances. 517 */ 518 519 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 520 cap->cc_max_op_len[alg] = maxoplen; 521 if (bootverbose) 522 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n" 523 , driverid 524 , alg 525 , flags 526 , maxoplen 527 ); 528 529 if (cap->cc_process == NULL) { 530 cap->cc_arg = arg; 531 cap->cc_newsession = newses; 532 cap->cc_process = process; 533 cap->cc_freesession = freeses; 534 cap->cc_sessions = 0; /* Unmark */ 535 } 536 err = 0; 537 } else 538 err = EINVAL; 539 540 CRYPTO_DRIVER_UNLOCK(); 541 return err; 542} 543 544/* 545 * Unregister a crypto driver. If there are pending sessions using it, 546 * leave enough information around so that subsequent calls using those 547 * sessions will correctly detect the driver has been unregistered and 548 * reroute requests. 549 */ 550int 551crypto_unregister(u_int32_t driverid, int alg) 552{ 553 struct cryptocap *cap; 554 u_int32_t ses, kops; 555 int i, err; 556 557 CRYPTO_DRIVER_LOCK(); 558 559 cap = crypto_checkdriver(driverid); 560 if (cap != NULL && 561 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 562 cap->cc_alg[alg] != 0) { 563 cap->cc_alg[alg] = 0; 564 cap->cc_max_op_len[alg] = 0; 565 566 /* Was this the last algorithm ? */ 567 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 568 if (cap->cc_alg[i] != 0) 569 break; 570 571 if (i == CRYPTO_ALGORITHM_MAX + 1) { 572 ses = cap->cc_sessions; 573 kops = cap->cc_koperations; 574 bzero(cap, sizeof(*cap)); 575 if (ses != 0 || kops != 0) { 576 /* 577 * If there are pending sessions, just mark as invalid. 578 */ 579 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 580 cap->cc_sessions = ses; 581 cap->cc_koperations = kops; 582 } 583 } 584 err = 0; 585 } else 586 err = EINVAL; 587 588 CRYPTO_DRIVER_UNLOCK(); 589 return err; 590} 591 592/* 593 * Unregister all algorithms associated with a crypto driver. 594 * If there are pending sessions using it, leave enough information 595 * around so that subsequent calls using those sessions will 596 * correctly detect the driver has been unregistered and reroute 597 * requests. 598 */ 599int 600crypto_unregister_all(u_int32_t driverid) 601{ 602 struct cryptocap *cap; 603 u_int32_t ses, kops; 604 int i, err; 605 606 CRYPTO_DRIVER_LOCK(); 607 608 cap = crypto_checkdriver(driverid); 609 if (cap != NULL) { 610 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 611 cap->cc_alg[i] = 0; 612 cap->cc_max_op_len[i] = 0; 613 } 614 ses = cap->cc_sessions; 615 kops = cap->cc_koperations; 616 bzero(cap, sizeof(*cap)); 617 if (ses != 0 || kops != 0) { 618 /* 619 * If there are pending sessions, just mark as invalid. 620 */ 621 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 622 cap->cc_sessions = ses; 623 cap->cc_koperations = kops; 624 } 625 err = 0; 626 } else 627 err = EINVAL; 628 629 CRYPTO_DRIVER_UNLOCK(); 630 return err; 631} 632 633/* 634 * Clear blockage on a driver. The what parameter indicates whether 635 * the driver is now ready for cryptop's and/or cryptokop's. 636 */ 637int 638crypto_unblock(u_int32_t driverid, int what) 639{ 640 struct cryptocap *cap; 641 int needwakeup, err; 642 643 CRYPTO_Q_LOCK(); 644 cap = crypto_checkdriver(driverid); 645 if (cap != NULL) { 646 needwakeup = 0; 647 if (what & CRYPTO_SYMQ) { 648 needwakeup |= cap->cc_qblocked; 649 cap->cc_qblocked = 0; 650 } 651 if (what & CRYPTO_ASYMQ) { 652 needwakeup |= cap->cc_kqblocked; 653 cap->cc_kqblocked = 0; 654 } 655 if (needwakeup) 656 wakeup_one(&crp_q); 657 err = 0; 658 } else 659 err = EINVAL; 660 CRYPTO_Q_UNLOCK(); 661 662 return err; 663} 664 665/* 666 * Add a crypto request to a queue, to be processed by the kernel thread. 667 */ 668int 669crypto_dispatch(struct cryptop *crp) 670{ 671 struct cryptocap *cap; 672 u_int32_t hid; 673 int result; 674 675 cryptostats.cs_ops++; 676 677#ifdef CRYPTO_TIMING 678 if (crypto_timing) 679 binuptime(&crp->crp_tstamp); 680#endif 681 682 hid = CRYPTO_SESID2HID(crp->crp_sid); 683 684 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 685 /* 686 * Caller marked the request to be processed 687 * immediately; dispatch it directly to the 688 * driver unless the driver is currently blocked. 689 */ 690 cap = crypto_checkdriver(hid); 691 /* Driver cannot disappeared when there is an active session. */ 692 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); 693 if (!cap->cc_qblocked) { 694 result = crypto_invoke(cap, crp, 0); 695 if (result != ERESTART) 696 return (result); 697 else { 698 /* 699 * The driver ran out of resources, mark the 700 * driver ``blocked'' for cryptop's and put 701 * the request on the queue. 702 * 703 * XXX ops are placed at the tail so their 704 * order is preserved but this can place them 705 * behind batch'd ops. 706 */ 707 cap->cc_qblocked = 1; 708 cryptostats.cs_blocks++; 709 } 710 } 711 } 712 CRYPTO_Q_LOCK(); 713 if (CRYPTO_Q_EMPTY()) 714 wakeup_one(&crp_q); 715 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 716 CRYPTO_Q_UNLOCK(); 717 return 0; 718} 719 720/* 721 * Add an asymetric crypto request to a queue, 722 * to be processed by the kernel thread. 723 */ 724int 725crypto_kdispatch(struct cryptkop *krp) 726{ 727 int result; 728 729 cryptostats.cs_kops++; 730 731 result = crypto_kinvoke(krp); 732 if (result != ERESTART) 733 return (result); 734 CRYPTO_Q_LOCK(); 735 if (CRYPTO_Q_EMPTY()) 736 wakeup_one(&crp_q); 737 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 738 CRYPTO_Q_UNLOCK(); 739 740 return 0; 741} 742 743/* 744 * Dispatch an assymetric crypto request to the appropriate crypto devices. 745 */ 746static int 747crypto_kinvoke(struct cryptkop *krp) 748{ 749 struct cryptocap *cap = NULL; 750 u_int32_t hid; 751 int error = 0; 752 753 KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 754 KASSERT(krp->krp_callback != NULL, 755 ("%s: krp->crp_callback == NULL", __func__)); 756 757 CRYPTO_DRIVER_LOCK(); 758 for (hid = 0; hid < crypto_drivers_num; hid++) { 759 cap = &crypto_drivers[hid]; 760 if (cap == NULL) 761 continue; 762 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 763 !crypto_devallowsoft) { 764 continue; 765 } 766 if (cap->cc_kprocess == NULL) 767 continue; 768 if (!(cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED)) 769 continue; 770 if (cap->cc_kqblocked) { 771 error = ERESTART; 772 continue; 773 } 774 error = 0; 775 break; 776 } 777 krp->krp_hid = hid; 778 if (hid < crypto_drivers_num) { 779 cap->cc_koperations++; 780 CRYPTO_DRIVER_UNLOCK(); 781 error = cap->cc_kprocess(cap->cc_karg, krp, 0); 782 CRYPTO_DRIVER_LOCK(); 783 if (error == ERESTART) { 784 cap->cc_koperations--; 785 cap->cc_kqblocked = 1; 786 CRYPTO_DRIVER_UNLOCK(); 787 cryptostats.cs_kblocks++; 788 return (error); 789 } 790 } else { 791 error = ENODEV; 792 } 793 CRYPTO_DRIVER_UNLOCK(); 794 795 if (error) { 796 krp->krp_status = error; 797 crypto_kdone(krp); 798 } 799 return 0; 800} 801 802#ifdef CRYPTO_TIMING 803static void 804crypto_tstat(struct cryptotstat *ts, struct bintime *bt) 805{ 806 struct bintime now, delta; 807 struct timespec t; 808 uint64_t u; 809 810 binuptime(&now); 811 u = now.frac; 812 delta.frac = now.frac - bt->frac; 813 delta.sec = now.sec - bt->sec; 814 if (u < delta.frac) 815 delta.sec--; 816 bintime2timespec(&delta, &t); 817 timespecadd(&ts->acc, &t); 818 if (timespeccmp(&t, &ts->min, <)) 819 ts->min = t; 820 if (timespeccmp(&t, &ts->max, >)) 821 ts->max = t; 822 ts->count++; 823 824 *bt = now; 825} 826#endif 827 828/* 829 * Dispatch a crypto request to the appropriate crypto devices. 830 */ 831static int 832crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 833{ 834 835 KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 836 KASSERT(crp->crp_callback != NULL, 837 ("%s: crp->crp_callback == NULL", __func__)); 838 KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); 839 840#ifdef CRYPTO_TIMING 841 if (crypto_timing) 842 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 843#endif 844 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 845 struct cryptodesc *crd; 846 u_int64_t nid; 847 848 /* 849 * Driver has unregistered; migrate the session and return 850 * an error to the caller so they'll resubmit the op. 851 * 852 * XXX: What if there are more already queued requests for this 853 * session? 854 */ 855 crypto_freesession(crp->crp_sid); 856 857 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 858 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 859 860 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 861 crp->crp_sid = nid; 862 863 crp->crp_etype = EAGAIN; 864 crypto_done(crp); 865 return 0; 866 } else { 867 /* 868 * Invoke the driver to process the request. 869 */ 870 return cap->cc_process(cap->cc_arg, crp, hint); 871 } 872} 873 874/* 875 * Release a set of crypto descriptors. 876 */ 877void 878crypto_freereq(struct cryptop *crp) 879{ 880 struct cryptodesc *crd; 881 882 if (crp == NULL) 883 return; 884 885 while ((crd = crp->crp_desc) != NULL) { 886 crp->crp_desc = crd->crd_next; 887 uma_zfree(cryptodesc_zone, crd); 888 } 889 890 uma_zfree(cryptop_zone, crp); 891} 892 893/* 894 * Acquire a set of crypto descriptors. 895 */ 896struct cryptop * 897crypto_getreq(int num) 898{ 899 struct cryptodesc *crd; 900 struct cryptop *crp; 901 902 crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); 903 if (crp != NULL) { 904 while (num--) { 905 crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); 906 if (crd == NULL) { 907 crypto_freereq(crp); 908 return NULL; 909 } 910 911 crd->crd_next = crp->crp_desc; 912 crp->crp_desc = crd; 913 } 914 } 915 return crp; 916} 917 918/* 919 * Invoke the callback on behalf of the driver. 920 */ 921void 922crypto_done(struct cryptop *crp) 923{ 924 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 925 ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 926 crp->crp_flags |= CRYPTO_F_DONE; 927 if (crp->crp_etype != 0) 928 cryptostats.cs_errs++; 929#ifdef CRYPTO_TIMING 930 if (crypto_timing) 931 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 932#endif 933 /* 934 * CBIMM means unconditionally do the callback immediately; 935 * CBIFSYNC means do the callback immediately only if the 936 * operation was done synchronously. Both are used to avoid 937 * doing extraneous context switches; the latter is mostly 938 * used with the software crypto driver. 939 */ 940 if ((crp->crp_flags & CRYPTO_F_CBIMM) || 941 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 942 (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) { 943 /* 944 * Do the callback directly. This is ok when the 945 * callback routine does very little (e.g. the 946 * /dev/crypto callback method just does a wakeup). 947 */ 948#ifdef CRYPTO_TIMING 949 if (crypto_timing) { 950 /* 951 * NB: We must copy the timestamp before 952 * doing the callback as the cryptop is 953 * likely to be reclaimed. 954 */ 955 struct bintime t = crp->crp_tstamp; 956 crypto_tstat(&cryptostats.cs_cb, &t); 957 crp->crp_callback(crp); 958 crypto_tstat(&cryptostats.cs_finis, &t); 959 } else 960#endif 961 crp->crp_callback(crp); 962 } else { 963 /* 964 * Normal case; queue the callback for the thread. 965 */ 966 CRYPTO_RETQ_LOCK(); 967 if (TAILQ_EMPTY(&crp_ret_q)) 968 wakeup_one(&crp_ret_q); /* shared wait channel */ 969 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); 970 CRYPTO_RETQ_UNLOCK(); 971 } 972} 973 974/* 975 * Invoke the callback on behalf of the driver. 976 */ 977void 978crypto_kdone(struct cryptkop *krp) 979{ 980 struct cryptocap *cap; 981 982 if (krp->krp_status != 0) 983 cryptostats.cs_kerrs++; 984 CRYPTO_DRIVER_LOCK(); 985 /* XXX: What if driver is loaded in the meantime? */ 986 if (krp->krp_hid < crypto_drivers_num) { 987 cap = &crypto_drivers[krp->krp_hid]; 988 cap->cc_koperations--; 989 KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0")); 990 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 991 crypto_remove(cap); 992 } 993 CRYPTO_DRIVER_UNLOCK(); 994 CRYPTO_RETQ_LOCK(); 995 if (TAILQ_EMPTY(&crp_ret_kq)) 996 wakeup_one(&crp_ret_q); /* shared wait channel */ 997 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); 998 CRYPTO_RETQ_UNLOCK(); 999} 1000 1001int 1002crypto_getfeat(int *featp) 1003{ 1004 int hid, kalg, feat = 0; 1005 1006 if (!crypto_userasymcrypto) 1007 goto out; 1008 1009 CRYPTO_DRIVER_LOCK(); 1010 for (hid = 0; hid < crypto_drivers_num; hid++) { 1011 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 1012 !crypto_devallowsoft) { 1013 continue; 1014 } 1015 if (crypto_drivers[hid].cc_kprocess == NULL) 1016 continue; 1017 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1018 if ((crypto_drivers[hid].cc_kalg[kalg] & 1019 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 1020 feat |= 1 << kalg; 1021 } 1022 CRYPTO_DRIVER_UNLOCK(); 1023out: 1024 *featp = feat; 1025 return (0); 1026} 1027 1028/* 1029 * Terminate a thread at module unload. The process that 1030 * initiated this is waiting for us to signal that we're gone; 1031 * wake it up and exit. We use the driver table lock to insure 1032 * we don't do the wakeup before they're waiting. There is no 1033 * race here because the waiter sleeps on the proc lock for the 1034 * thread so it gets notified at the right time because of an 1035 * extra wakeup that's done in exit1(). 1036 */ 1037static void 1038crypto_finis(void *chan) 1039{ 1040 CRYPTO_DRIVER_LOCK(); 1041 wakeup_one(chan); 1042 CRYPTO_DRIVER_UNLOCK(); 1043 kthread_exit(0); 1044} 1045 1046/* 1047 * Crypto thread, dispatches crypto requests. 1048 */ 1049static void 1050crypto_proc(void) 1051{ 1052 struct cryptop *crp, *submit; 1053 struct cryptkop *krp; 1054 struct cryptocap *cap; 1055 u_int32_t hid; 1056 int result, hint; 1057 1058 CRYPTO_Q_LOCK(); 1059 for (;;) { 1060 /* 1061 * Find the first element in the queue that can be 1062 * processed and look-ahead to see if multiple ops 1063 * are ready for the same driver. 1064 */ 1065 submit = NULL; 1066 hint = 0; 1067 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1068 hid = CRYPTO_SESID2HID(crp->crp_sid); 1069 cap = crypto_checkdriver(hid); 1070 /* 1071 * Driver cannot disappeared when there is an active 1072 * session. 1073 */ 1074 KASSERT(cap != NULL, ("%s: Driver disappeared.", 1075 __func__)); 1076 if (cap == NULL || cap->cc_process == NULL) { 1077 /* Op needs to be migrated, process it. */ 1078 if (submit == NULL) 1079 submit = crp; 1080 break; 1081 } 1082 if (!cap->cc_qblocked) { 1083 if (submit != NULL) { 1084 /* 1085 * We stop on finding another op, 1086 * regardless whether its for the same 1087 * driver or not. We could keep 1088 * searching the queue but it might be 1089 * better to just use a per-driver 1090 * queue instead. 1091 */ 1092 if (CRYPTO_SESID2HID(submit->crp_sid) == hid) 1093 hint = CRYPTO_HINT_MORE; 1094 break; 1095 } else { 1096 submit = crp; 1097 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1098 break; 1099 /* keep scanning for more are q'd */ 1100 } 1101 } 1102 } 1103 if (submit != NULL) { 1104 TAILQ_REMOVE(&crp_q, submit, crp_next); 1105 CRYPTO_Q_UNLOCK(); 1106 hid = CRYPTO_SESID2HID(submit->crp_sid); 1107 cap = crypto_checkdriver(hid); 1108 result = crypto_invoke(cap, submit, hint); 1109 CRYPTO_Q_LOCK(); 1110 if (result == ERESTART) { 1111 /* 1112 * The driver ran out of resources, mark the 1113 * driver ``blocked'' for cryptop's and put 1114 * the request back in the queue. It would 1115 * best to put the request back where we got 1116 * it but that's hard so for now we put it 1117 * at the front. This should be ok; putting 1118 * it at the end does not work. 1119 */ 1120 /* XXX validate sid again? */ 1121 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; 1122 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 1123 cryptostats.cs_blocks++; 1124 } 1125 } 1126 1127 /* As above, but for key ops */ 1128 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1129 cap = crypto_checkdriver(krp->krp_hid); 1130 if (cap == NULL || cap->cc_kprocess == NULL) { 1131 /* Op needs to be migrated, process it. */ 1132 break; 1133 } 1134 if (!cap->cc_kqblocked) 1135 break; 1136 } 1137 if (krp != NULL) { 1138 TAILQ_REMOVE(&crp_kq, krp, krp_next); 1139 CRYPTO_Q_UNLOCK(); 1140 result = crypto_kinvoke(krp); 1141 CRYPTO_Q_LOCK(); 1142 if (result == ERESTART) { 1143 /* 1144 * The driver ran out of resources, mark the 1145 * driver ``blocked'' for cryptkop's and put 1146 * the request back in the queue. It would 1147 * best to put the request back where we got 1148 * it but that's hard so for now we put it 1149 * at the front. This should be ok; putting 1150 * it at the end does not work. 1151 */ 1152 /* XXX validate sid again? */ 1153 crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 1154 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 1155 cryptostats.cs_kblocks++; 1156 } 1157 } 1158 1159 if (submit == NULL && krp == NULL) { 1160 /* 1161 * Nothing more to be processed. Sleep until we're 1162 * woken because there are more ops to process. 1163 * This happens either by submission or by a driver 1164 * becoming unblocked and notifying us through 1165 * crypto_unblock. Note that when we wakeup we 1166 * start processing each queue again from the 1167 * front. It's not clear that it's important to 1168 * preserve this ordering since ops may finish 1169 * out of order if dispatched to different devices 1170 * and some become blocked while others do not. 1171 */ 1172 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 1173 if (cryptoproc == NULL) 1174 break; 1175 cryptostats.cs_intrs++; 1176 } 1177 } 1178 CRYPTO_Q_UNLOCK(); 1179 1180 crypto_finis(&crp_q); 1181} 1182 1183/* 1184 * Crypto returns thread, does callbacks for processed crypto requests. 1185 * Callbacks are done here, rather than in the crypto drivers, because 1186 * callbacks typically are expensive and would slow interrupt handling. 1187 */ 1188static void 1189crypto_ret_proc(void) 1190{ 1191 struct cryptop *crpt; 1192 struct cryptkop *krpt; 1193 1194 CRYPTO_RETQ_LOCK(); 1195 for (;;) { 1196 /* Harvest return q's for completed ops */ 1197 crpt = TAILQ_FIRST(&crp_ret_q); 1198 if (crpt != NULL) 1199 TAILQ_REMOVE(&crp_ret_q, crpt, crp_next); 1200 1201 krpt = TAILQ_FIRST(&crp_ret_kq); 1202 if (krpt != NULL) 1203 TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next); 1204 1205 if (crpt != NULL || krpt != NULL) { 1206 CRYPTO_RETQ_UNLOCK(); 1207 /* 1208 * Run callbacks unlocked. 1209 */ 1210 if (crpt != NULL) { 1211#ifdef CRYPTO_TIMING 1212 if (crypto_timing) { 1213 /* 1214 * NB: We must copy the timestamp before 1215 * doing the callback as the cryptop is 1216 * likely to be reclaimed. 1217 */ 1218 struct bintime t = crpt->crp_tstamp; 1219 crypto_tstat(&cryptostats.cs_cb, &t); 1220 crpt->crp_callback(crpt); 1221 crypto_tstat(&cryptostats.cs_finis, &t); 1222 } else 1223#endif 1224 crpt->crp_callback(crpt); 1225 } 1226 if (krpt != NULL) 1227 krpt->krp_callback(krpt); 1228 CRYPTO_RETQ_LOCK(); 1229 } else { 1230 /* 1231 * Nothing more to be processed. Sleep until we're 1232 * woken because there are more returns to process. 1233 */ 1234 msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT, 1235 "crypto_ret_wait", 0); 1236 if (cryptoretproc == NULL) 1237 break; 1238 cryptostats.cs_rets++; 1239 } 1240 } 1241 CRYPTO_RETQ_UNLOCK(); 1242 1243 crypto_finis(&crp_ret_q); 1244} 1245