1/*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2
3/*-
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6 *
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
10 *
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 *
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
17 *
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22 * PURPOSE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD$");
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/malloc.h>
31#include <sys/mbuf.h>
32#include <sys/module.h>
33#include <sys/sysctl.h>
34#include <sys/errno.h>
35#include <sys/random.h>
36#include <sys/kernel.h>
37#include <sys/uio.h>
38
39#include <crypto/blowfish/blowfish.h>
40#include <crypto/sha1.h>
41#include <opencrypto/rmd160.h>
42#include <opencrypto/cast.h>
43#include <opencrypto/skipjack.h>
44#include <sys/md5.h>
45
46#include <opencrypto/cryptodev.h>
47#include <opencrypto/cryptosoft.h>
48#include <opencrypto/xform.h>
49
50#include <sys/kobj.h>
51#include <sys/bus.h>
52#include "cryptodev_if.h"
53
54static	int32_t swcr_id;
55static	struct swcr_data **swcr_sessions = NULL;
56static	u_int32_t swcr_sesnum;
57
58u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
59u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
60
61static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
62static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
63static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
64static	int swcr_freesession(device_t dev, u_int64_t tid);
65
66/*
67 * Apply a symmetric encryption/decryption algorithm.
68 */
69static int
70swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
71    int flags)
72{
73	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
74	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
75	struct enc_xform *exf;
76	int i, k, j, blks;
77
78	exf = sw->sw_exf;
79	blks = exf->blocksize;
80
81	/* Check for non-padded data */
82	if (crd->crd_len % blks)
83		return EINVAL;
84
85	/* Initialize the IV */
86	if (crd->crd_flags & CRD_F_ENCRYPT) {
87		/* IV explicitly provided ? */
88		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
89			bcopy(crd->crd_iv, iv, blks);
90		else
91			arc4rand(iv, blks, 0);
92
93		/* Do we need to write the IV */
94		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
95			crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
96
97	} else {	/* Decryption */
98			/* IV explicitly provided ? */
99		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
100			bcopy(crd->crd_iv, iv, blks);
101		else {
102			/* Get IV off buf */
103			crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
104		}
105	}
106
107	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
108		int error;
109
110		if (sw->sw_kschedule)
111			exf->zerokey(&(sw->sw_kschedule));
112		error = exf->setkey(&sw->sw_kschedule,
113				crd->crd_key, crd->crd_klen / 8);
114		if (error)
115			return (error);
116	}
117
118	ivp = iv;
119
120	/*
121	 * xforms that provide a reinit method perform all IV
122	 * handling themselves.
123	 */
124	if (exf->reinit)
125		exf->reinit(sw->sw_kschedule, iv);
126
127	if (flags & CRYPTO_F_IMBUF) {
128		struct mbuf *m = (struct mbuf *) buf;
129
130		/* Find beginning of data */
131		m = m_getptr(m, crd->crd_skip, &k);
132		if (m == NULL)
133			return EINVAL;
134
135		i = crd->crd_len;
136
137		while (i > 0) {
138			/*
139			 * If there's insufficient data at the end of
140			 * an mbuf, we have to do some copying.
141			 */
142			if (m->m_len < k + blks && m->m_len != k) {
143				m_copydata(m, k, blks, blk);
144
145				/* Actual encryption/decryption */
146				if (exf->reinit) {
147					if (crd->crd_flags & CRD_F_ENCRYPT) {
148						exf->encrypt(sw->sw_kschedule,
149						    blk);
150					} else {
151						exf->decrypt(sw->sw_kschedule,
152						    blk);
153					}
154				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
155					/* XOR with previous block */
156					for (j = 0; j < blks; j++)
157						blk[j] ^= ivp[j];
158
159					exf->encrypt(sw->sw_kschedule, blk);
160
161					/*
162					 * Keep encrypted block for XOR'ing
163					 * with next block
164					 */
165					bcopy(blk, iv, blks);
166					ivp = iv;
167				} else {	/* decrypt */
168					/*
169					 * Keep encrypted block for XOR'ing
170					 * with next block
171					 */
172					if (ivp == iv)
173						bcopy(blk, piv, blks);
174					else
175						bcopy(blk, iv, blks);
176
177					exf->decrypt(sw->sw_kschedule, blk);
178
179					/* XOR with previous block */
180					for (j = 0; j < blks; j++)
181						blk[j] ^= ivp[j];
182
183					if (ivp == iv)
184						bcopy(piv, iv, blks);
185					else
186						ivp = iv;
187				}
188
189				/* Copy back decrypted block */
190				m_copyback(m, k, blks, blk);
191
192				/* Advance pointer */
193				m = m_getptr(m, k + blks, &k);
194				if (m == NULL)
195					return EINVAL;
196
197				i -= blks;
198
199				/* Could be done... */
200				if (i == 0)
201					break;
202			}
203
204			/* Skip possibly empty mbufs */
205			if (k == m->m_len) {
206				for (m = m->m_next; m && m->m_len == 0;
207				    m = m->m_next)
208					;
209				k = 0;
210			}
211
212			/* Sanity check */
213			if (m == NULL)
214				return EINVAL;
215
216			/*
217			 * Warning: idat may point to garbage here, but
218			 * we only use it in the while() loop, only if
219			 * there are indeed enough data.
220			 */
221			idat = mtod(m, unsigned char *) + k;
222
223	   		while (m->m_len >= k + blks && i > 0) {
224				if (exf->reinit) {
225					if (crd->crd_flags & CRD_F_ENCRYPT) {
226						exf->encrypt(sw->sw_kschedule,
227						    idat);
228					} else {
229						exf->decrypt(sw->sw_kschedule,
230						    idat);
231					}
232				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
233					/* XOR with previous block/IV */
234					for (j = 0; j < blks; j++)
235						idat[j] ^= ivp[j];
236
237					exf->encrypt(sw->sw_kschedule, idat);
238					ivp = idat;
239				} else {	/* decrypt */
240					/*
241					 * Keep encrypted block to be used
242					 * in next block's processing.
243					 */
244					if (ivp == iv)
245						bcopy(idat, piv, blks);
246					else
247						bcopy(idat, iv, blks);
248
249					exf->decrypt(sw->sw_kschedule, idat);
250
251					/* XOR with previous block/IV */
252					for (j = 0; j < blks; j++)
253						idat[j] ^= ivp[j];
254
255					if (ivp == iv)
256						bcopy(piv, iv, blks);
257					else
258						ivp = iv;
259				}
260
261				idat += blks;
262				k += blks;
263				i -= blks;
264			}
265		}
266
267		return 0; /* Done with mbuf encryption/decryption */
268	} else if (flags & CRYPTO_F_IOV) {
269		struct uio *uio = (struct uio *) buf;
270		struct iovec *iov;
271
272		/* Find beginning of data */
273		iov = cuio_getptr(uio, crd->crd_skip, &k);
274		if (iov == NULL)
275			return EINVAL;
276
277		i = crd->crd_len;
278
279		while (i > 0) {
280			/*
281			 * If there's insufficient data at the end of
282			 * an iovec, we have to do some copying.
283			 */
284			if (iov->iov_len < k + blks && iov->iov_len != k) {
285				cuio_copydata(uio, k, blks, blk);
286
287				/* Actual encryption/decryption */
288				if (exf->reinit) {
289					if (crd->crd_flags & CRD_F_ENCRYPT) {
290						exf->encrypt(sw->sw_kschedule,
291						    blk);
292					} else {
293						exf->decrypt(sw->sw_kschedule,
294						    blk);
295					}
296				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
297					/* XOR with previous block */
298					for (j = 0; j < blks; j++)
299						blk[j] ^= ivp[j];
300
301					exf->encrypt(sw->sw_kschedule, blk);
302
303					/*
304					 * Keep encrypted block for XOR'ing
305					 * with next block
306					 */
307					bcopy(blk, iv, blks);
308					ivp = iv;
309				} else {	/* decrypt */
310					/*
311					 * Keep encrypted block for XOR'ing
312					 * with next block
313					 */
314					if (ivp == iv)
315						bcopy(blk, piv, blks);
316					else
317						bcopy(blk, iv, blks);
318
319					exf->decrypt(sw->sw_kschedule, blk);
320
321					/* XOR with previous block */
322					for (j = 0; j < blks; j++)
323						blk[j] ^= ivp[j];
324
325					if (ivp == iv)
326						bcopy(piv, iv, blks);
327					else
328						ivp = iv;
329				}
330
331				/* Copy back decrypted block */
332				cuio_copyback(uio, k, blks, blk);
333
334				/* Advance pointer */
335				iov = cuio_getptr(uio, k + blks, &k);
336				if (iov == NULL)
337					return EINVAL;
338
339				i -= blks;
340
341				/* Could be done... */
342				if (i == 0)
343					break;
344			}
345
346			/*
347			 * Warning: idat may point to garbage here, but
348			 * we only use it in the while() loop, only if
349			 * there are indeed enough data.
350			 */
351			idat = (char *)iov->iov_base + k;
352
353	   		while (iov->iov_len >= k + blks && i > 0) {
354				if (exf->reinit) {
355					if (crd->crd_flags & CRD_F_ENCRYPT) {
356						exf->encrypt(sw->sw_kschedule,
357						    idat);
358					} else {
359						exf->decrypt(sw->sw_kschedule,
360						    idat);
361					}
362				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
363					/* XOR with previous block/IV */
364					for (j = 0; j < blks; j++)
365						idat[j] ^= ivp[j];
366
367					exf->encrypt(sw->sw_kschedule, idat);
368					ivp = idat;
369				} else {	/* decrypt */
370					/*
371					 * Keep encrypted block to be used
372					 * in next block's processing.
373					 */
374					if (ivp == iv)
375						bcopy(idat, piv, blks);
376					else
377						bcopy(idat, iv, blks);
378
379					exf->decrypt(sw->sw_kschedule, idat);
380
381					/* XOR with previous block/IV */
382					for (j = 0; j < blks; j++)
383						idat[j] ^= ivp[j];
384
385					if (ivp == iv)
386						bcopy(piv, iv, blks);
387					else
388						ivp = iv;
389				}
390
391				idat += blks;
392				k += blks;
393				i -= blks;
394			}
395			if (k == iov->iov_len) {
396				iov++;
397				k = 0;
398			}
399		}
400
401		return 0; /* Done with iovec encryption/decryption */
402	} else {	/* contiguous buffer */
403		if (exf->reinit) {
404			for (i = crd->crd_skip;
405			    i < crd->crd_skip + crd->crd_len; i += blks) {
406				if (crd->crd_flags & CRD_F_ENCRYPT)
407					exf->encrypt(sw->sw_kschedule, buf + i);
408				else
409					exf->decrypt(sw->sw_kschedule, buf + i);
410			}
411		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
412			for (i = crd->crd_skip;
413			    i < crd->crd_skip + crd->crd_len; i += blks) {
414				/* XOR with the IV/previous block, as appropriate. */
415				if (i == crd->crd_skip)
416					for (k = 0; k < blks; k++)
417						buf[i + k] ^= ivp[k];
418				else
419					for (k = 0; k < blks; k++)
420						buf[i + k] ^= buf[i + k - blks];
421				exf->encrypt(sw->sw_kschedule, buf + i);
422			}
423		} else {		/* Decrypt */
424			/*
425			 * Start at the end, so we don't need to keep the encrypted
426			 * block as the IV for the next block.
427			 */
428			for (i = crd->crd_skip + crd->crd_len - blks;
429			    i >= crd->crd_skip; i -= blks) {
430				exf->decrypt(sw->sw_kschedule, buf + i);
431
432				/* XOR with the IV/previous block, as appropriate */
433				if (i == crd->crd_skip)
434					for (k = 0; k < blks; k++)
435						buf[i + k] ^= ivp[k];
436				else
437					for (k = 0; k < blks; k++)
438						buf[i + k] ^= buf[i + k - blks];
439			}
440		}
441
442		return 0; /* Done with contiguous buffer encryption/decryption */
443	}
444
445	/* Unreachable */
446	return EINVAL;
447}
448
449static void
450swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
451    int klen)
452{
453	int k;
454
455	klen /= 8;
456
457	switch (axf->type) {
458	case CRYPTO_MD5_HMAC:
459	case CRYPTO_SHA1_HMAC:
460	case CRYPTO_SHA2_256_HMAC:
461	case CRYPTO_SHA2_384_HMAC:
462	case CRYPTO_SHA2_512_HMAC:
463	case CRYPTO_NULL_HMAC:
464	case CRYPTO_RIPEMD160_HMAC:
465		for (k = 0; k < klen; k++)
466			key[k] ^= HMAC_IPAD_VAL;
467
468		axf->Init(sw->sw_ictx);
469		axf->Update(sw->sw_ictx, key, klen);
470		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
471
472		for (k = 0; k < klen; k++)
473			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
474
475		axf->Init(sw->sw_octx);
476		axf->Update(sw->sw_octx, key, klen);
477		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
478
479		for (k = 0; k < klen; k++)
480			key[k] ^= HMAC_OPAD_VAL;
481		break;
482	case CRYPTO_MD5_KPDK:
483	case CRYPTO_SHA1_KPDK:
484	{
485		/*
486		 * We need a buffer that can hold an md5 and a sha1 result
487		 * just to throw it away.
488		 * What we do here is the initial part of:
489		 *   ALGO( key, keyfill, .. )
490		 * adding the key to sw_ictx and abusing Final() to get the
491		 * "keyfill" padding.
492		 * In addition we abuse the sw_octx to save the key to have
493		 * it to be able to append it at the end in swcr_authcompute().
494		 */
495		u_char buf[SHA1_RESULTLEN];
496
497		sw->sw_klen = klen;
498		bcopy(key, sw->sw_octx, klen);
499		axf->Init(sw->sw_ictx);
500		axf->Update(sw->sw_ictx, key, klen);
501		axf->Final(buf, sw->sw_ictx);
502		break;
503	}
504	default:
505		printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
506		    "doesn't use keys.\n", __func__, axf->type);
507	}
508}
509
510/*
511 * Compute keyed-hash authenticator.
512 */
513static int
514swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
515    int flags)
516{
517	unsigned char aalg[HASH_MAX_LEN];
518	struct auth_hash *axf;
519	union authctx ctx;
520	int err;
521
522	if (sw->sw_ictx == 0)
523		return EINVAL;
524
525	axf = sw->sw_axf;
526
527	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
528		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
529
530	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
531
532	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
533	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
534	if (err)
535		return err;
536
537	switch (sw->sw_alg) {
538	case CRYPTO_MD5_HMAC:
539	case CRYPTO_SHA1_HMAC:
540	case CRYPTO_SHA2_256_HMAC:
541	case CRYPTO_SHA2_384_HMAC:
542	case CRYPTO_SHA2_512_HMAC:
543	case CRYPTO_RIPEMD160_HMAC:
544		if (sw->sw_octx == NULL)
545			return EINVAL;
546
547		axf->Final(aalg, &ctx);
548		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
549		axf->Update(&ctx, aalg, axf->hashsize);
550		axf->Final(aalg, &ctx);
551		break;
552
553	case CRYPTO_MD5_KPDK:
554	case CRYPTO_SHA1_KPDK:
555		/* If we have no key saved, return error. */
556		if (sw->sw_octx == NULL)
557			return EINVAL;
558
559		/*
560		 * Add the trailing copy of the key (see comment in
561		 * swcr_authprepare()) after the data:
562		 *   ALGO( .., key, algofill )
563		 * and let Final() do the proper, natural "algofill"
564		 * padding.
565		 */
566		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
567		axf->Final(aalg, &ctx);
568		break;
569
570	case CRYPTO_NULL_HMAC:
571		axf->Final(aalg, &ctx);
572		break;
573	}
574
575	/* Inject the authentication data */
576	crypto_copyback(flags, buf, crd->crd_inject,
577	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
578	return 0;
579}
580
581/*
582 * Apply a compression/decompression algorithm
583 */
584static int
585swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
586    caddr_t buf, int flags)
587{
588	u_int8_t *data, *out;
589	struct comp_algo *cxf;
590	int adj;
591	u_int32_t result;
592
593	cxf = sw->sw_cxf;
594
595	/* We must handle the whole buffer of data in one time
596	 * then if there is not all the data in the mbuf, we must
597	 * copy in a buffer.
598	 */
599
600	data = malloc(crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
601	if (data == NULL)
602		return (EINVAL);
603	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
604
605	if (crd->crd_flags & CRD_F_COMP)
606		result = cxf->compress(data, crd->crd_len, &out);
607	else
608		result = cxf->decompress(data, crd->crd_len, &out);
609
610	free(data, M_CRYPTO_DATA);
611	if (result == 0)
612		return EINVAL;
613
614	/* Copy back the (de)compressed data. m_copyback is
615	 * extending the mbuf as necessary.
616	 */
617	sw->sw_size = result;
618	/* Check the compressed size when doing compression */
619	if (crd->crd_flags & CRD_F_COMP) {
620		if (result >= crd->crd_len) {
621			/* Compression was useless, we lost time */
622			free(out, M_CRYPTO_DATA);
623			return 0;
624		}
625	}
626
627	crypto_copyback(flags, buf, crd->crd_skip, result, out);
628	if (result < crd->crd_len) {
629		adj = result - crd->crd_len;
630		if (flags & CRYPTO_F_IMBUF) {
631			adj = result - crd->crd_len;
632			m_adj((struct mbuf *)buf, adj);
633		} else if (flags & CRYPTO_F_IOV) {
634			struct uio *uio = (struct uio *)buf;
635			int ind;
636
637			adj = crd->crd_len - result;
638			ind = uio->uio_iovcnt - 1;
639
640			while (adj > 0 && ind >= 0) {
641				if (adj < uio->uio_iov[ind].iov_len) {
642					uio->uio_iov[ind].iov_len -= adj;
643					break;
644				}
645
646				adj -= uio->uio_iov[ind].iov_len;
647				uio->uio_iov[ind].iov_len = 0;
648				ind--;
649				uio->uio_iovcnt--;
650			}
651		}
652	}
653	free(out, M_CRYPTO_DATA);
654	return 0;
655}
656
657/*
658 * Generate a new software session.
659 */
660static int
661swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
662{
663	struct swcr_data **swd;
664	struct auth_hash *axf;
665	struct enc_xform *txf;
666	struct comp_algo *cxf;
667	u_int32_t i;
668	int error;
669
670	if (sid == NULL || cri == NULL)
671		return EINVAL;
672
673	if (swcr_sessions) {
674		for (i = 1; i < swcr_sesnum; i++)
675			if (swcr_sessions[i] == NULL)
676				break;
677	} else
678		i = 1;		/* NB: to silence compiler warning */
679
680	if (swcr_sessions == NULL || i == swcr_sesnum) {
681		if (swcr_sessions == NULL) {
682			i = 1; /* We leave swcr_sessions[0] empty */
683			swcr_sesnum = CRYPTO_SW_SESSIONS;
684		} else
685			swcr_sesnum *= 2;
686
687		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
688		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
689		if (swd == NULL) {
690			/* Reset session number */
691			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
692				swcr_sesnum = 0;
693			else
694				swcr_sesnum /= 2;
695			return ENOBUFS;
696		}
697
698		/* Copy existing sessions */
699		if (swcr_sessions != NULL) {
700			bcopy(swcr_sessions, swd,
701			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
702			free(swcr_sessions, M_CRYPTO_DATA);
703		}
704
705		swcr_sessions = swd;
706	}
707
708	swd = &swcr_sessions[i];
709	*sid = i;
710
711	while (cri) {
712		*swd = malloc(sizeof(struct swcr_data),
713		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
714		if (*swd == NULL) {
715			swcr_freesession(dev, i);
716			return ENOBUFS;
717		}
718
719		switch (cri->cri_alg) {
720		case CRYPTO_DES_CBC:
721			txf = &enc_xform_des;
722			goto enccommon;
723		case CRYPTO_3DES_CBC:
724			txf = &enc_xform_3des;
725			goto enccommon;
726		case CRYPTO_BLF_CBC:
727			txf = &enc_xform_blf;
728			goto enccommon;
729		case CRYPTO_CAST_CBC:
730			txf = &enc_xform_cast5;
731			goto enccommon;
732		case CRYPTO_SKIPJACK_CBC:
733			txf = &enc_xform_skipjack;
734			goto enccommon;
735		case CRYPTO_RIJNDAEL128_CBC:
736			txf = &enc_xform_rijndael128;
737			goto enccommon;
738		case CRYPTO_AES_XTS:
739			txf = &enc_xform_aes_xts;
740			goto enccommon;
741		case CRYPTO_CAMELLIA_CBC:
742			txf = &enc_xform_camellia;
743			goto enccommon;
744		case CRYPTO_NULL_CBC:
745			txf = &enc_xform_null;
746			goto enccommon;
747		enccommon:
748			if (cri->cri_key != NULL) {
749				error = txf->setkey(&((*swd)->sw_kschedule),
750				    cri->cri_key, cri->cri_klen / 8);
751				if (error) {
752					swcr_freesession(dev, i);
753					return error;
754				}
755			}
756			(*swd)->sw_exf = txf;
757			break;
758
759		case CRYPTO_MD5_HMAC:
760			axf = &auth_hash_hmac_md5;
761			goto authcommon;
762		case CRYPTO_SHA1_HMAC:
763			axf = &auth_hash_hmac_sha1;
764			goto authcommon;
765		case CRYPTO_SHA2_256_HMAC:
766			axf = &auth_hash_hmac_sha2_256;
767			goto authcommon;
768		case CRYPTO_SHA2_384_HMAC:
769			axf = &auth_hash_hmac_sha2_384;
770			goto authcommon;
771		case CRYPTO_SHA2_512_HMAC:
772			axf = &auth_hash_hmac_sha2_512;
773			goto authcommon;
774		case CRYPTO_NULL_HMAC:
775			axf = &auth_hash_null;
776			goto authcommon;
777		case CRYPTO_RIPEMD160_HMAC:
778			axf = &auth_hash_hmac_ripemd_160;
779		authcommon:
780			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
781			    M_NOWAIT);
782			if ((*swd)->sw_ictx == NULL) {
783				swcr_freesession(dev, i);
784				return ENOBUFS;
785			}
786
787			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
788			    M_NOWAIT);
789			if ((*swd)->sw_octx == NULL) {
790				swcr_freesession(dev, i);
791				return ENOBUFS;
792			}
793
794			if (cri->cri_key != NULL) {
795				swcr_authprepare(axf, *swd, cri->cri_key,
796				    cri->cri_klen);
797			}
798
799			(*swd)->sw_mlen = cri->cri_mlen;
800			(*swd)->sw_axf = axf;
801			break;
802
803		case CRYPTO_MD5_KPDK:
804			axf = &auth_hash_key_md5;
805			goto auth2common;
806
807		case CRYPTO_SHA1_KPDK:
808			axf = &auth_hash_key_sha1;
809		auth2common:
810			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
811			    M_NOWAIT);
812			if ((*swd)->sw_ictx == NULL) {
813				swcr_freesession(dev, i);
814				return ENOBUFS;
815			}
816
817			(*swd)->sw_octx = malloc(cri->cri_klen / 8,
818			    M_CRYPTO_DATA, M_NOWAIT);
819			if ((*swd)->sw_octx == NULL) {
820				swcr_freesession(dev, i);
821				return ENOBUFS;
822			}
823
824			/* Store the key so we can "append" it to the payload */
825			if (cri->cri_key != NULL) {
826				swcr_authprepare(axf, *swd, cri->cri_key,
827				    cri->cri_klen);
828			}
829
830			(*swd)->sw_mlen = cri->cri_mlen;
831			(*swd)->sw_axf = axf;
832			break;
833#ifdef notdef
834		case CRYPTO_MD5:
835			axf = &auth_hash_md5;
836			goto auth3common;
837
838		case CRYPTO_SHA1:
839			axf = &auth_hash_sha1;
840		auth3common:
841			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
842			    M_NOWAIT);
843			if ((*swd)->sw_ictx == NULL) {
844				swcr_freesession(dev, i);
845				return ENOBUFS;
846			}
847
848			axf->Init((*swd)->sw_ictx);
849			(*swd)->sw_mlen = cri->cri_mlen;
850			(*swd)->sw_axf = axf;
851			break;
852#endif
853		case CRYPTO_DEFLATE_COMP:
854			cxf = &comp_algo_deflate;
855			(*swd)->sw_cxf = cxf;
856			break;
857		default:
858			swcr_freesession(dev, i);
859			return EINVAL;
860		}
861
862		(*swd)->sw_alg = cri->cri_alg;
863		cri = cri->cri_next;
864		swd = &((*swd)->sw_next);
865	}
866	return 0;
867}
868
869/*
870 * Free a session.
871 */
872static int
873swcr_freesession(device_t dev, u_int64_t tid)
874{
875	struct swcr_data *swd;
876	struct enc_xform *txf;
877	struct auth_hash *axf;
878	struct comp_algo *cxf;
879	u_int32_t sid = CRYPTO_SESID2LID(tid);
880
881	if (sid > swcr_sesnum || swcr_sessions == NULL ||
882	    swcr_sessions[sid] == NULL)
883		return EINVAL;
884
885	/* Silently accept and return */
886	if (sid == 0)
887		return 0;
888
889	while ((swd = swcr_sessions[sid]) != NULL) {
890		swcr_sessions[sid] = swd->sw_next;
891
892		switch (swd->sw_alg) {
893		case CRYPTO_DES_CBC:
894		case CRYPTO_3DES_CBC:
895		case CRYPTO_BLF_CBC:
896		case CRYPTO_CAST_CBC:
897		case CRYPTO_SKIPJACK_CBC:
898		case CRYPTO_RIJNDAEL128_CBC:
899		case CRYPTO_AES_XTS:
900		case CRYPTO_CAMELLIA_CBC:
901		case CRYPTO_NULL_CBC:
902			txf = swd->sw_exf;
903
904			if (swd->sw_kschedule)
905				txf->zerokey(&(swd->sw_kschedule));
906			break;
907
908		case CRYPTO_MD5_HMAC:
909		case CRYPTO_SHA1_HMAC:
910		case CRYPTO_SHA2_256_HMAC:
911		case CRYPTO_SHA2_384_HMAC:
912		case CRYPTO_SHA2_512_HMAC:
913		case CRYPTO_RIPEMD160_HMAC:
914		case CRYPTO_NULL_HMAC:
915			axf = swd->sw_axf;
916
917			if (swd->sw_ictx) {
918				bzero(swd->sw_ictx, axf->ctxsize);
919				free(swd->sw_ictx, M_CRYPTO_DATA);
920			}
921			if (swd->sw_octx) {
922				bzero(swd->sw_octx, axf->ctxsize);
923				free(swd->sw_octx, M_CRYPTO_DATA);
924			}
925			break;
926
927		case CRYPTO_MD5_KPDK:
928		case CRYPTO_SHA1_KPDK:
929			axf = swd->sw_axf;
930
931			if (swd->sw_ictx) {
932				bzero(swd->sw_ictx, axf->ctxsize);
933				free(swd->sw_ictx, M_CRYPTO_DATA);
934			}
935			if (swd->sw_octx) {
936				bzero(swd->sw_octx, swd->sw_klen);
937				free(swd->sw_octx, M_CRYPTO_DATA);
938			}
939			break;
940
941		case CRYPTO_MD5:
942		case CRYPTO_SHA1:
943			axf = swd->sw_axf;
944
945			if (swd->sw_ictx)
946				free(swd->sw_ictx, M_CRYPTO_DATA);
947			break;
948
949		case CRYPTO_DEFLATE_COMP:
950			cxf = swd->sw_cxf;
951			break;
952		}
953
954		free(swd, M_CRYPTO_DATA);
955	}
956	return 0;
957}
958
959/*
960 * Process a software request.
961 */
962static int
963swcr_process(device_t dev, struct cryptop *crp, int hint)
964{
965	struct cryptodesc *crd;
966	struct swcr_data *sw;
967	u_int32_t lid;
968
969	/* Sanity check */
970	if (crp == NULL)
971		return EINVAL;
972
973	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
974		crp->crp_etype = EINVAL;
975		goto done;
976	}
977
978	lid = crp->crp_sid & 0xffffffff;
979	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
980		crp->crp_etype = ENOENT;
981		goto done;
982	}
983
984	/* Go through crypto descriptors, processing as we go */
985	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
986		/*
987		 * Find the crypto context.
988		 *
989		 * XXX Note that the logic here prevents us from having
990		 * XXX the same algorithm multiple times in a session
991		 * XXX (or rather, we can but it won't give us the right
992		 * XXX results). To do that, we'd need some way of differentiating
993		 * XXX between the various instances of an algorithm (so we can
994		 * XXX locate the correct crypto context).
995		 */
996		for (sw = swcr_sessions[lid];
997		    sw && sw->sw_alg != crd->crd_alg;
998		    sw = sw->sw_next)
999			;
1000
1001		/* No such context ? */
1002		if (sw == NULL) {
1003			crp->crp_etype = EINVAL;
1004			goto done;
1005		}
1006		switch (sw->sw_alg) {
1007		case CRYPTO_DES_CBC:
1008		case CRYPTO_3DES_CBC:
1009		case CRYPTO_BLF_CBC:
1010		case CRYPTO_CAST_CBC:
1011		case CRYPTO_SKIPJACK_CBC:
1012		case CRYPTO_RIJNDAEL128_CBC:
1013		case CRYPTO_AES_XTS:
1014		case CRYPTO_CAMELLIA_CBC:
1015			if ((crp->crp_etype = swcr_encdec(crd, sw,
1016			    crp->crp_buf, crp->crp_flags)) != 0)
1017				goto done;
1018			break;
1019		case CRYPTO_NULL_CBC:
1020			crp->crp_etype = 0;
1021			break;
1022		case CRYPTO_MD5_HMAC:
1023		case CRYPTO_SHA1_HMAC:
1024		case CRYPTO_SHA2_256_HMAC:
1025		case CRYPTO_SHA2_384_HMAC:
1026		case CRYPTO_SHA2_512_HMAC:
1027		case CRYPTO_RIPEMD160_HMAC:
1028		case CRYPTO_NULL_HMAC:
1029		case CRYPTO_MD5_KPDK:
1030		case CRYPTO_SHA1_KPDK:
1031		case CRYPTO_MD5:
1032		case CRYPTO_SHA1:
1033			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1034			    crp->crp_buf, crp->crp_flags)) != 0)
1035				goto done;
1036			break;
1037
1038		case CRYPTO_DEFLATE_COMP:
1039			if ((crp->crp_etype = swcr_compdec(crd, sw,
1040			    crp->crp_buf, crp->crp_flags)) != 0)
1041				goto done;
1042			else
1043				crp->crp_olen = (int)sw->sw_size;
1044			break;
1045
1046		default:
1047			/* Unknown/unsupported algorithm */
1048			crp->crp_etype = EINVAL;
1049			goto done;
1050		}
1051	}
1052
1053done:
1054	crypto_done(crp);
1055	return 0;
1056}
1057
1058static void
1059swcr_identify(driver_t *drv, device_t parent)
1060{
1061	/* NB: order 10 is so we get attached after h/w devices */
1062	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1063	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1064		panic("cryptosoft: could not attach");
1065}
1066
1067static int
1068swcr_probe(device_t dev)
1069{
1070	device_set_desc(dev, "software crypto");
1071	return (BUS_PROBE_NOWILDCARD);
1072}
1073
1074static int
1075swcr_attach(device_t dev)
1076{
1077	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1078	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1079
1080	swcr_id = crypto_get_driverid(dev,
1081			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1082	if (swcr_id < 0) {
1083		device_printf(dev, "cannot initialize!");
1084		return ENOMEM;
1085	}
1086#define	REGISTER(alg) \
1087	crypto_register(swcr_id, alg, 0,0)
1088	REGISTER(CRYPTO_DES_CBC);
1089	REGISTER(CRYPTO_3DES_CBC);
1090	REGISTER(CRYPTO_BLF_CBC);
1091	REGISTER(CRYPTO_CAST_CBC);
1092	REGISTER(CRYPTO_SKIPJACK_CBC);
1093	REGISTER(CRYPTO_NULL_CBC);
1094	REGISTER(CRYPTO_MD5_HMAC);
1095	REGISTER(CRYPTO_SHA1_HMAC);
1096	REGISTER(CRYPTO_SHA2_256_HMAC);
1097	REGISTER(CRYPTO_SHA2_384_HMAC);
1098	REGISTER(CRYPTO_SHA2_512_HMAC);
1099	REGISTER(CRYPTO_RIPEMD160_HMAC);
1100	REGISTER(CRYPTO_NULL_HMAC);
1101	REGISTER(CRYPTO_MD5_KPDK);
1102	REGISTER(CRYPTO_SHA1_KPDK);
1103	REGISTER(CRYPTO_MD5);
1104	REGISTER(CRYPTO_SHA1);
1105	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1106	REGISTER(CRYPTO_AES_XTS);
1107 	REGISTER(CRYPTO_CAMELLIA_CBC);
1108	REGISTER(CRYPTO_DEFLATE_COMP);
1109#undef REGISTER
1110
1111	return 0;
1112}
1113
1114static int
1115swcr_detach(device_t dev)
1116{
1117	crypto_unregister_all(swcr_id);
1118	if (swcr_sessions != NULL)
1119		free(swcr_sessions, M_CRYPTO_DATA);
1120	return 0;
1121}
1122
1123static device_method_t swcr_methods[] = {
1124	DEVMETHOD(device_identify,	swcr_identify),
1125	DEVMETHOD(device_probe,		swcr_probe),
1126	DEVMETHOD(device_attach,	swcr_attach),
1127	DEVMETHOD(device_detach,	swcr_detach),
1128
1129	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1130	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1131	DEVMETHOD(cryptodev_process,	swcr_process),
1132
1133	{0, 0},
1134};
1135
1136static driver_t swcr_driver = {
1137	"cryptosoft",
1138	swcr_methods,
1139	0,		/* NB: no softc */
1140};
1141static devclass_t swcr_devclass;
1142
1143/*
1144 * NB: We explicitly reference the crypto module so we
1145 * get the necessary ordering when built as a loadable
1146 * module.  This is required because we bundle the crypto
1147 * module code together with the cryptosoft driver (otherwise
1148 * normal module dependencies would handle things).
1149 */
1150extern int crypto_modevent(struct module *, int, void *);
1151/* XXX where to attach */
1152DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1153MODULE_VERSION(cryptosoft, 1);
1154MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1155