1/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
2
3/*-
4 * Copyright (c) 1999 Theo de Raadt
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *   notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *   notice, this list of conditions and the following disclaimer in the
14 *   documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *   derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/proc.h>
33#include <sys/errno.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/mbuf.h>
37#include <sys/uio.h>
38#include <sys/limits.h>
39#include <sys/lock.h>
40#include <sys/sdt.h>
41
42#include <machine/vmparam.h>
43
44#include <vm/vm.h>
45#include <vm/vm_page.h>
46#include <vm/pmap.h>
47
48#include <opencrypto/cryptodev.h>
49
50SDT_PROVIDER_DECLARE(opencrypto);
51
52/*
53 * These macros are only for avoiding code duplication, as we need to skip
54 * given number of bytes in the same way in several functions below.
55 */
56#define	CUIO_SKIP()	do {						\
57	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
58	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
59	while (off > 0) {						\
60		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
61		if (off < iov->iov_len)					\
62			break;						\
63		off -= iov->iov_len;					\
64		iol--;							\
65		iov++;							\
66	}								\
67} while (0)
68
69#define CVM_PAGE_SKIP()	do {					\
70	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
71	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
72	while (off > 0) {						\
73		if (off < PAGE_SIZE)					\
74			break;						\
75		processed += PAGE_SIZE - off;				\
76		off -= PAGE_SIZE - off;					\
77		pages++;						\
78	}								\
79} while (0)
80
81static void
82cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
83{
84	struct iovec *iov = uio->uio_iov;
85	int iol __diagused = uio->uio_iovcnt;
86	unsigned count;
87
88	CUIO_SKIP();
89	while (len > 0) {
90		KASSERT(iol >= 0, ("%s: empty", __func__));
91		count = min(iov->iov_len - off, len);
92		bcopy(((caddr_t)iov->iov_base) + off, cp, count);
93		len -= count;
94		cp += count;
95		off = 0;
96		iol--;
97		iov++;
98	}
99}
100
101static void
102cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
103{
104	struct iovec *iov = uio->uio_iov;
105	int iol __diagused = uio->uio_iovcnt;
106	unsigned count;
107
108	CUIO_SKIP();
109	while (len > 0) {
110		KASSERT(iol >= 0, ("%s: empty", __func__));
111		count = min(iov->iov_len - off, len);
112		bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
113		len -= count;
114		cp += count;
115		off = 0;
116		iol--;
117		iov++;
118	}
119}
120
121/*
122 * Return the index and offset of location in iovec list.
123 */
124static int
125cuio_getptr(struct uio *uio, int loc, int *off)
126{
127	int ind, len;
128
129	ind = 0;
130	while (loc >= 0 && ind < uio->uio_iovcnt) {
131		len = uio->uio_iov[ind].iov_len;
132		if (len > loc) {
133	    		*off = loc;
134	    		return (ind);
135		}
136		loc -= len;
137		ind++;
138	}
139
140	if (ind > 0 && loc == 0) {
141		ind--;
142		*off = uio->uio_iov[ind].iov_len;
143		return (ind);
144	}
145
146	return (-1);
147}
148
149#if CRYPTO_MAY_HAVE_VMPAGE
150/*
151 * Apply function f to the data in a vm_page_t list starting "off" bytes from
152 * the beginning, continuing for "len" bytes.
153 */
154static int
155cvm_page_apply(vm_page_t *pages, int off, int len,
156    int (*f)(void *, const void *, u_int), void *arg)
157{
158	int processed __unused;
159	unsigned count;
160	int rval;
161
162	processed = 0;
163	CVM_PAGE_SKIP();
164	while (len > 0) {
165		char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
166		count = min(PAGE_SIZE - off, len);
167		rval = (*f)(arg, kaddr + off, count);
168		if (rval)
169			return (rval);
170		len -= count;
171		processed += count;
172		off = 0;
173		pages++;
174	}
175	return (0);
176}
177
178static inline void *
179cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
180{
181	if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
182		return (NULL);
183
184	pages += (skip / PAGE_SIZE);
185	skip -= rounddown(skip, PAGE_SIZE);
186	return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
187}
188
189/*
190 * Copy len bytes of data from the vm_page_t array, skipping the first off
191 * bytes, into the pointer cp.  Return the number of bytes skipped and copied.
192 * Does not verify the length of the array.
193 */
194static int
195cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
196{
197	int processed = 0;
198	unsigned count;
199
200	CVM_PAGE_SKIP();
201	while (len > 0) {
202		count = min(PAGE_SIZE - off, len);
203		bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
204		    count);
205		len -= count;
206		cp += count;
207		processed += count;
208		off = 0;
209		pages++;
210	}
211	return (processed);
212}
213
214/*
215 * Copy len bytes of data from the pointer cp into the vm_page_t array,
216 * skipping the first off bytes, Return the number of bytes skipped and copied.
217 * Does not verify the length of the array.
218 */
219static int
220cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
221{
222	int processed = 0;
223	unsigned count;
224
225	CVM_PAGE_SKIP();
226	while (len > 0) {
227		count = min(PAGE_SIZE - off, len);
228		bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
229		    count);
230		len -= count;
231		cp += count;
232		processed += count;
233		off = 0;
234		pages++;
235	}
236	return processed;
237}
238#endif /* CRYPTO_MAY_HAVE_VMPAGE */
239
240/*
241 * Given a starting page in an m_epg, determine the length of the
242 * current physically contiguous segment.
243 */
244static __inline size_t
245m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
246{
247	size_t len;
248	u_int i;
249
250	len = pglen;
251	for (i = idx + 1; i < m->m_epg_npgs; i++) {
252		if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
253			break;
254		len += m_epg_pagelen(m, i, 0);
255	}
256	return (len);
257}
258
259static void *
260m_epg_segment(struct mbuf *m, size_t offset, size_t *len)
261{
262	u_int i, pglen, pgoff;
263
264	offset += mtod(m, vm_offset_t);
265	if (offset < m->m_epg_hdrlen) {
266		*len = m->m_epg_hdrlen - offset;
267		return (m->m_epg_hdr + offset);
268	}
269	offset -= m->m_epg_hdrlen;
270	pgoff = m->m_epg_1st_off;
271	for (i = 0; i < m->m_epg_npgs; i++) {
272		pglen = m_epg_pagelen(m, i, pgoff);
273		if (offset < pglen) {
274			*len = m_epg_pages_extent(m, i, pglen) - offset;
275			return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
276			    offset));
277		}
278		offset -= pglen;
279		pgoff = 0;
280	}
281	KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
282	    __func__));
283	*len = m->m_epg_trllen - offset;
284	return (m->m_epg_trail + offset);
285}
286
287static __inline void *
288m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
289{
290	void *base;
291	size_t seglen;
292
293	base = m_epg_segment(m, skip, &seglen);
294	if (len > seglen)
295		return (NULL);
296	return (base);
297}
298
299void
300crypto_cursor_init(struct crypto_buffer_cursor *cc,
301    const struct crypto_buffer *cb)
302{
303	memset(cc, 0, sizeof(*cc));
304	cc->cc_type = cb->cb_type;
305	switch (cc->cc_type) {
306	case CRYPTO_BUF_CONTIG:
307		cc->cc_buf = cb->cb_buf;
308		cc->cc_buf_len = cb->cb_buf_len;
309		break;
310	case CRYPTO_BUF_MBUF:
311	case CRYPTO_BUF_SINGLE_MBUF:
312		cc->cc_mbuf = cb->cb_mbuf;
313		break;
314	case CRYPTO_BUF_VMPAGE:
315		cc->cc_vmpage = cb->cb_vm_page;
316		cc->cc_buf_len = cb->cb_vm_page_len;
317		cc->cc_offset = cb->cb_vm_page_offset;
318		break;
319	case CRYPTO_BUF_UIO:
320		cc->cc_iov = cb->cb_uio->uio_iov;
321		cc->cc_buf_len = cb->cb_uio->uio_resid;
322		break;
323	default:
324#ifdef INVARIANTS
325		panic("%s: invalid buffer type %d", __func__, cb->cb_type);
326#endif
327		break;
328	}
329}
330
331SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
332
333void
334crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
335{
336	size_t remain;
337
338	switch (cc->cc_type) {
339	case CRYPTO_BUF_CONTIG:
340		MPASS(cc->cc_buf_len >= amount);
341		cc->cc_buf += amount;
342		cc->cc_buf_len -= amount;
343		break;
344	case CRYPTO_BUF_MBUF:
345		for (;;) {
346			remain = cc->cc_mbuf->m_len - cc->cc_offset;
347			if (amount < remain) {
348				cc->cc_offset += amount;
349				break;
350			}
351			amount -= remain;
352			cc->cc_mbuf = cc->cc_mbuf->m_next;
353			cc->cc_offset = 0;
354			if (amount == 0)
355				break;
356		}
357		break;
358	case CRYPTO_BUF_SINGLE_MBUF:
359		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
360		cc->cc_offset += amount;
361		break;
362	case CRYPTO_BUF_VMPAGE:
363		for (;;) {
364			SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
365			    cc, amount);
366			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
367			if (amount < remain) {
368				cc->cc_buf_len -= amount;
369				cc->cc_offset += amount;
370				break;
371			}
372			cc->cc_buf_len -= remain;
373			amount -= remain;
374			cc->cc_vmpage++;
375			cc->cc_offset = 0;
376			if (amount == 0 || cc->cc_buf_len == 0)
377				break;
378		}
379		break;
380	case CRYPTO_BUF_UIO:
381		for (;;) {
382			remain = cc->cc_iov->iov_len - cc->cc_offset;
383			if (amount < remain) {
384				cc->cc_offset += amount;
385				break;
386			}
387			cc->cc_buf_len -= remain;
388			amount -= remain;
389			cc->cc_iov++;
390			cc->cc_offset = 0;
391			if (amount == 0)
392				break;
393		}
394		break;
395	default:
396#ifdef INVARIANTS
397		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
398#endif
399		break;
400	}
401}
402
403void *
404crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len)
405{
406	switch (cc->cc_type) {
407	case CRYPTO_BUF_CONTIG:
408	case CRYPTO_BUF_UIO:
409	case CRYPTO_BUF_VMPAGE:
410		if (cc->cc_buf_len == 0) {
411			*len = 0;
412			return (NULL);
413		}
414		break;
415	case CRYPTO_BUF_MBUF:
416	case CRYPTO_BUF_SINGLE_MBUF:
417		if (cc->cc_mbuf == NULL) {
418			*len = 0;
419			return (NULL);
420		}
421		break;
422	default:
423#ifdef INVARIANTS
424		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
425#endif
426		*len = 0;
427		return (NULL);
428	}
429
430	switch (cc->cc_type) {
431	case CRYPTO_BUF_CONTIG:
432		*len = cc->cc_buf_len;
433		return (cc->cc_buf);
434	case CRYPTO_BUF_MBUF:
435	case CRYPTO_BUF_SINGLE_MBUF:
436		if (cc->cc_mbuf->m_flags & M_EXTPG)
437			return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len));
438		*len = cc->cc_mbuf->m_len - cc->cc_offset;
439		return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
440	case CRYPTO_BUF_VMPAGE:
441		*len = PAGE_SIZE - cc->cc_offset;
442		return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
443		    *cc->cc_vmpage)) + cc->cc_offset);
444	case CRYPTO_BUF_UIO:
445		*len = cc->cc_iov->iov_len - cc->cc_offset;
446		return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
447	default:
448		__assert_unreachable();
449	}
450}
451
452void
453crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
454    const void *vsrc)
455{
456	size_t remain, todo;
457	const char *src;
458	char *dst;
459
460	src = vsrc;
461	switch (cc->cc_type) {
462	case CRYPTO_BUF_CONTIG:
463		MPASS(cc->cc_buf_len >= size);
464		memcpy(cc->cc_buf, src, size);
465		cc->cc_buf += size;
466		cc->cc_buf_len -= size;
467		break;
468	case CRYPTO_BUF_MBUF:
469		for (;;) {
470			/*
471			 * This uses m_copyback() for individual
472			 * mbufs so that cc_mbuf and cc_offset are
473			 * updated.
474			 */
475			remain = cc->cc_mbuf->m_len - cc->cc_offset;
476			todo = MIN(remain, size);
477			m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
478			src += todo;
479			if (todo < remain) {
480				cc->cc_offset += todo;
481				break;
482			}
483			size -= todo;
484			cc->cc_mbuf = cc->cc_mbuf->m_next;
485			cc->cc_offset = 0;
486			if (size == 0)
487				break;
488		}
489		break;
490	case CRYPTO_BUF_SINGLE_MBUF:
491		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
492		m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
493		cc->cc_offset += size;
494		break;
495	case CRYPTO_BUF_VMPAGE:
496		for (;;) {
497			dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
498			    *cc->cc_vmpage)) + cc->cc_offset;
499			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
500			todo = MIN(remain, size);
501			memcpy(dst, src, todo);
502			src += todo;
503			cc->cc_buf_len -= todo;
504			if (todo < remain) {
505				cc->cc_offset += todo;
506				break;
507			}
508			size -= todo;
509			cc->cc_vmpage++;
510			cc->cc_offset = 0;
511			if (size == 0)
512				break;
513		}
514		break;
515	case CRYPTO_BUF_UIO:
516		for (;;) {
517			dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
518			remain = cc->cc_iov->iov_len - cc->cc_offset;
519			todo = MIN(remain, size);
520			memcpy(dst, src, todo);
521			src += todo;
522			cc->cc_buf_len -= todo;
523			if (todo < remain) {
524				cc->cc_offset += todo;
525				break;
526			}
527			size -= todo;
528			cc->cc_iov++;
529			cc->cc_offset = 0;
530			if (size == 0)
531				break;
532		}
533		break;
534	default:
535#ifdef INVARIANTS
536		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
537#endif
538		break;
539	}
540}
541
542void
543crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
544{
545	size_t remain, todo;
546	const char *src;
547	char *dst;
548
549	dst = vdst;
550	switch (cc->cc_type) {
551	case CRYPTO_BUF_CONTIG:
552		MPASS(cc->cc_buf_len >= size);
553		memcpy(dst, cc->cc_buf, size);
554		cc->cc_buf += size;
555		cc->cc_buf_len -= size;
556		break;
557	case CRYPTO_BUF_MBUF:
558		for (;;) {
559			/*
560			 * This uses m_copydata() for individual
561			 * mbufs so that cc_mbuf and cc_offset are
562			 * updated.
563			 */
564			remain = cc->cc_mbuf->m_len - cc->cc_offset;
565			todo = MIN(remain, size);
566			m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
567			dst += todo;
568			if (todo < remain) {
569				cc->cc_offset += todo;
570				break;
571			}
572			size -= todo;
573			cc->cc_mbuf = cc->cc_mbuf->m_next;
574			cc->cc_offset = 0;
575			if (size == 0)
576				break;
577		}
578		break;
579	case CRYPTO_BUF_SINGLE_MBUF:
580		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
581		m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
582		cc->cc_offset += size;
583		break;
584	case CRYPTO_BUF_VMPAGE:
585		for (;;) {
586			src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
587			    *cc->cc_vmpage)) + cc->cc_offset;
588			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
589			todo = MIN(remain, size);
590			memcpy(dst, src, todo);
591			dst += todo;
592			cc->cc_buf_len -= todo;
593			if (todo < remain) {
594				cc->cc_offset += todo;
595				break;
596			}
597			size -= todo;
598			cc->cc_vmpage++;
599			cc->cc_offset = 0;
600			if (size == 0)
601				break;
602		}
603		break;
604	case CRYPTO_BUF_UIO:
605		for (;;) {
606			src = (const char *)cc->cc_iov->iov_base +
607			    cc->cc_offset;
608			remain = cc->cc_iov->iov_len - cc->cc_offset;
609			todo = MIN(remain, size);
610			memcpy(dst, src, todo);
611			dst += todo;
612			cc->cc_buf_len -= todo;
613			if (todo < remain) {
614				cc->cc_offset += todo;
615				break;
616			}
617			size -= todo;
618			cc->cc_iov++;
619			cc->cc_offset = 0;
620			if (size == 0)
621				break;
622		}
623		break;
624	default:
625#ifdef INVARIANTS
626		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
627#endif
628		break;
629	}
630}
631
632/*
633 * To avoid advancing 'cursor', make a local copy that gets advanced
634 * instead.
635 */
636void
637crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
638    void *vdst)
639{
640	struct crypto_buffer_cursor copy;
641
642	copy = *cc;
643	crypto_cursor_copydata(&copy, size, vdst);
644}
645
646/*
647 * Apply function f to the data in an iovec list starting "off" bytes from
648 * the beginning, continuing for "len" bytes.
649 */
650static int
651cuio_apply(struct uio *uio, int off, int len,
652    int (*f)(void *, const void *, u_int), void *arg)
653{
654	struct iovec *iov = uio->uio_iov;
655	int iol __diagused = uio->uio_iovcnt;
656	unsigned count;
657	int rval;
658
659	CUIO_SKIP();
660	while (len > 0) {
661		KASSERT(iol >= 0, ("%s: empty", __func__));
662		count = min(iov->iov_len - off, len);
663		rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
664		if (rval)
665			return (rval);
666		len -= count;
667		off = 0;
668		iol--;
669		iov++;
670	}
671	return (0);
672}
673
674void
675crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
676{
677	struct crypto_buffer *cb;
678
679	if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
680		cb = &crp->crp_obuf;
681	else
682		cb = &crp->crp_buf;
683	switch (cb->cb_type) {
684	case CRYPTO_BUF_MBUF:
685	case CRYPTO_BUF_SINGLE_MBUF:
686		m_copyback(cb->cb_mbuf, off, size, src);
687		break;
688#if CRYPTO_MAY_HAVE_VMPAGE
689	case CRYPTO_BUF_VMPAGE:
690		MPASS(size <= cb->cb_vm_page_len);
691		MPASS(size + off <=
692		    cb->cb_vm_page_len + cb->cb_vm_page_offset);
693		cvm_page_copyback(cb->cb_vm_page,
694		    off + cb->cb_vm_page_offset, size, src);
695		break;
696#endif /* CRYPTO_MAY_HAVE_VMPAGE */
697	case CRYPTO_BUF_UIO:
698		cuio_copyback(cb->cb_uio, off, size, src);
699		break;
700	case CRYPTO_BUF_CONTIG:
701		MPASS(off + size <= cb->cb_buf_len);
702		bcopy(src, cb->cb_buf + off, size);
703		break;
704	default:
705#ifdef INVARIANTS
706		panic("invalid crp buf type %d", cb->cb_type);
707#endif
708		break;
709	}
710}
711
712void
713crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
714{
715
716	switch (crp->crp_buf.cb_type) {
717	case CRYPTO_BUF_MBUF:
718	case CRYPTO_BUF_SINGLE_MBUF:
719		m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
720		break;
721#if CRYPTO_MAY_HAVE_VMPAGE
722	case CRYPTO_BUF_VMPAGE:
723		MPASS(size <= crp->crp_buf.cb_vm_page_len);
724		MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
725		    crp->crp_buf.cb_vm_page_offset);
726		cvm_page_copydata(crp->crp_buf.cb_vm_page,
727		    off + crp->crp_buf.cb_vm_page_offset, size, dst);
728		break;
729#endif /* CRYPTO_MAY_HAVE_VMPAGE */
730	case CRYPTO_BUF_UIO:
731		cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
732		break;
733	case CRYPTO_BUF_CONTIG:
734		MPASS(off + size <= crp->crp_buf.cb_buf_len);
735		bcopy(crp->crp_buf.cb_buf + off, dst, size);
736		break;
737	default:
738#ifdef INVARIANTS
739		panic("invalid crp buf type %d", crp->crp_buf.cb_type);
740#endif
741		break;
742	}
743}
744
745int
746crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
747    int (*f)(void *, const void *, u_int), void *arg)
748{
749	int error;
750
751	switch (cb->cb_type) {
752	case CRYPTO_BUF_MBUF:
753	case CRYPTO_BUF_SINGLE_MBUF:
754		error = m_apply(cb->cb_mbuf, off, len,
755		    (int (*)(void *, void *, u_int))f, arg);
756		break;
757	case CRYPTO_BUF_UIO:
758		error = cuio_apply(cb->cb_uio, off, len, f, arg);
759		break;
760#if CRYPTO_MAY_HAVE_VMPAGE
761	case CRYPTO_BUF_VMPAGE:
762		error = cvm_page_apply(cb->cb_vm_page,
763		    off + cb->cb_vm_page_offset, len, f, arg);
764		break;
765#endif /* CRYPTO_MAY_HAVE_VMPAGE */
766	case CRYPTO_BUF_CONTIG:
767		MPASS(off + len <= cb->cb_buf_len);
768		error = (*f)(arg, cb->cb_buf + off, len);
769		break;
770	default:
771#ifdef INVARIANTS
772		panic("invalid crypto buf type %d", cb->cb_type);
773#endif
774		error = 0;
775		break;
776	}
777	return (error);
778}
779
780int
781crypto_apply(struct cryptop *crp, int off, int len,
782    int (*f)(void *, const void *, u_int), void *arg)
783{
784	return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
785}
786
787static inline void *
788m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
789{
790	int rel_off;
791
792	MPASS(skip <= INT_MAX);
793
794	m = m_getptr(m, (int)skip, &rel_off);
795	if (m == NULL)
796		return (NULL);
797
798	MPASS(rel_off >= 0);
799	skip = rel_off;
800	if (skip + len > m->m_len)
801		return (NULL);
802
803	if (m->m_flags & M_EXTPG)
804		return (m_epg_contiguous_subsegment(m, skip, len));
805	return (mtod(m, char*) + skip);
806}
807
808static inline void *
809cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
810{
811	int rel_off, idx;
812
813	MPASS(skip <= INT_MAX);
814	idx = cuio_getptr(uio, (int)skip, &rel_off);
815	if (idx < 0)
816		return (NULL);
817
818	MPASS(rel_off >= 0);
819	skip = rel_off;
820	if (skip + len > uio->uio_iov[idx].iov_len)
821		return (NULL);
822	return ((char *)uio->uio_iov[idx].iov_base + skip);
823}
824
825void *
826crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
827    size_t len)
828{
829
830	switch (cb->cb_type) {
831	case CRYPTO_BUF_MBUF:
832	case CRYPTO_BUF_SINGLE_MBUF:
833		return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
834	case CRYPTO_BUF_UIO:
835		return (cuio_contiguous_segment(cb->cb_uio, skip, len));
836#if CRYPTO_MAY_HAVE_VMPAGE
837	case CRYPTO_BUF_VMPAGE:
838		MPASS(skip + len <= cb->cb_vm_page_len);
839		return (cvm_page_contiguous_segment(cb->cb_vm_page,
840		    skip + cb->cb_vm_page_offset, len));
841#endif /* CRYPTO_MAY_HAVE_VMPAGE */
842	case CRYPTO_BUF_CONTIG:
843		MPASS(skip + len <= cb->cb_buf_len);
844		return (cb->cb_buf + skip);
845	default:
846#ifdef INVARIANTS
847		panic("invalid crp buf type %d", cb->cb_type);
848#endif
849		return (NULL);
850	}
851}
852
853void *
854crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
855{
856	return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));
857}
858