1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2023 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/mbuf.h>
31#include <sys/memdesc.h>
32#include <sys/systm.h>
33#include <sys/uio.h>
34#include <vm/vm.h>
35#include <vm/pmap.h>
36#include <vm/vm_page.h>
37#include <vm/vm_param.h>
38#include <machine/bus.h>
39
40/*
41 * memdesc_copyback copies data from a source buffer into a buffer
42 * described by a memory descriptor.
43 */
44static void
45phys_copyback(vm_paddr_t pa, int off, int size, const void *src)
46{
47	const char *cp;
48	u_int page_off;
49	int todo;
50	void *p;
51
52	KASSERT(PMAP_HAS_DMAP, ("direct-map required"));
53
54	cp = src;
55	pa += off;
56	page_off = pa & PAGE_MASK;
57	while (size > 0) {
58		todo = min(PAGE_SIZE - page_off, size);
59		p = (void *)PHYS_TO_DMAP(pa);
60		memcpy(p, cp, todo);
61		size -= todo;
62		cp += todo;
63		pa += todo;
64		page_off = 0;
65	}
66}
67
68static void
69vlist_copyback(struct bus_dma_segment *vlist, int sglist_cnt, int off,
70    int size, const void *src)
71{
72	const char *p;
73	int todo;
74
75	while (vlist->ds_len <= off) {
76		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
77
78		off -= vlist->ds_len;
79		vlist++;
80		sglist_cnt--;
81	}
82
83	p = src;
84	while (size > 0) {
85		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
86
87		todo = size;
88		if (todo > vlist->ds_len - off)
89			todo = vlist->ds_len - off;
90
91		memcpy((char *)(uintptr_t)vlist->ds_addr + off, p, todo);
92		off = 0;
93		vlist++;
94		sglist_cnt--;
95		size -= todo;
96		p += todo;
97	}
98}
99
100static void
101plist_copyback(struct bus_dma_segment *plist, int sglist_cnt, int off,
102    int size, const void *src)
103{
104	const char *p;
105	int todo;
106
107	while (plist->ds_len <= off) {
108		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
109
110		off -= plist->ds_len;
111		plist++;
112		sglist_cnt--;
113	}
114
115	p = src;
116	while (size > 0) {
117		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
118
119		todo = size;
120		if (todo > plist->ds_len - off)
121			todo = plist->ds_len - off;
122
123		phys_copyback(plist->ds_addr, off, todo, p);
124		off = 0;
125		plist++;
126		sglist_cnt--;
127		size -= todo;
128		p += todo;
129	}
130}
131
132static void
133vmpages_copyback(vm_page_t *m, int off, int size, const void *src)
134{
135	struct iovec iov[1];
136	struct uio uio;
137	int error __diagused;
138
139	iov[0].iov_base = __DECONST(void *, src);
140	iov[0].iov_len = size;
141	uio.uio_iov = iov;
142	uio.uio_iovcnt = 1;
143	uio.uio_offset = 0;
144	uio.uio_resid = size;
145	uio.uio_segflg = UIO_SYSSPACE;
146	uio.uio_rw = UIO_WRITE;
147	error = uiomove_fromphys(m, off, size, &uio);
148	KASSERT(error == 0 && uio.uio_resid == 0, ("copy failed"));
149}
150
151void
152memdesc_copyback(struct memdesc *mem, int off, int size, const void *src)
153{
154	KASSERT(off >= 0, ("%s: invalid offset %d", __func__, off));
155	KASSERT(size >= 0, ("%s: invalid size %d", __func__, off));
156
157	switch (mem->md_type) {
158	case MEMDESC_VADDR:
159		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
160		memcpy((char *)mem->u.md_vaddr + off, src, size);
161		break;
162	case MEMDESC_PADDR:
163		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
164		phys_copyback(mem->u.md_paddr, off, size, src);
165		break;
166	case MEMDESC_VLIST:
167		vlist_copyback(mem->u.md_list, mem->md_nseg, off, size, src);
168		break;
169	case MEMDESC_PLIST:
170		plist_copyback(mem->u.md_list, mem->md_nseg, off, size, src);
171		break;
172	case MEMDESC_UIO:
173		panic("Use uiomove instead");
174		break;
175	case MEMDESC_MBUF:
176		m_copyback(mem->u.md_mbuf, off, size, src);
177		break;
178	case MEMDESC_VMPAGES:
179		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
180		vmpages_copyback(mem->u.md_ma, mem->md_offset + off, size,
181		    src);
182		break;
183	default:
184		__assert_unreachable();
185	}
186}
187
188/*
189 * memdesc_copydata copies data from a buffer described by a memory
190 * descriptor into a destination buffer.
191 */
192static void
193phys_copydata(vm_paddr_t pa, int off, int size, void *dst)
194{
195	char *cp;
196	u_int page_off;
197	int todo;
198	const void *p;
199
200	KASSERT(PMAP_HAS_DMAP, ("direct-map required"));
201
202	cp = dst;
203	pa += off;
204	page_off = pa & PAGE_MASK;
205	while (size > 0) {
206		todo = min(PAGE_SIZE - page_off, size);
207		p = (const void *)PHYS_TO_DMAP(pa);
208		memcpy(cp, p, todo);
209		size -= todo;
210		cp += todo;
211		pa += todo;
212		page_off = 0;
213	}
214}
215
216static void
217vlist_copydata(struct bus_dma_segment *vlist, int sglist_cnt, int off,
218    int size, void *dst)
219{
220	char *p;
221	int todo;
222
223	while (vlist->ds_len <= off) {
224		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
225
226		off -= vlist->ds_len;
227		vlist++;
228		sglist_cnt--;
229	}
230
231	p = dst;
232	while (size > 0) {
233		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
234
235		todo = size;
236		if (todo > vlist->ds_len - off)
237			todo = vlist->ds_len - off;
238
239		memcpy(p, (char *)(uintptr_t)vlist->ds_addr + off, todo);
240		off = 0;
241		vlist++;
242		sglist_cnt--;
243		size -= todo;
244		p += todo;
245	}
246}
247
248static void
249plist_copydata(struct bus_dma_segment *plist, int sglist_cnt, int off,
250    int size, void *dst)
251{
252	char *p;
253	int todo;
254
255	while (plist->ds_len <= off) {
256		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
257
258		off -= plist->ds_len;
259		plist++;
260		sglist_cnt--;
261	}
262
263	p = dst;
264	while (size > 0) {
265		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
266
267		todo = size;
268		if (todo > plist->ds_len - off)
269			todo = plist->ds_len - off;
270
271		phys_copydata(plist->ds_addr, off, todo, p);
272		off = 0;
273		plist++;
274		sglist_cnt--;
275		size -= todo;
276		p += todo;
277	}
278}
279
280static void
281vmpages_copydata(vm_page_t *m, int off, int size, void *dst)
282{
283	struct iovec iov[1];
284	struct uio uio;
285	int error __diagused;
286
287	iov[0].iov_base = dst;
288	iov[0].iov_len = size;
289	uio.uio_iov = iov;
290	uio.uio_iovcnt = 1;
291	uio.uio_offset = 0;
292	uio.uio_resid = size;
293	uio.uio_segflg = UIO_SYSSPACE;
294	uio.uio_rw = UIO_READ;
295	error = uiomove_fromphys(m, off, size, &uio);
296	KASSERT(error == 0 && uio.uio_resid == 0, ("copy failed"));
297}
298
299void
300memdesc_copydata(struct memdesc *mem, int off, int size, void *dst)
301{
302	KASSERT(off >= 0, ("%s: invalid offset %d", __func__, off));
303	KASSERT(size >= 0, ("%s: invalid size %d", __func__, off));
304
305	switch (mem->md_type) {
306	case MEMDESC_VADDR:
307		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
308		memcpy(dst, (const char *)mem->u.md_vaddr + off, size);
309		break;
310	case MEMDESC_PADDR:
311		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
312		phys_copydata(mem->u.md_paddr, off, size, dst);
313		break;
314	case MEMDESC_VLIST:
315		vlist_copydata(mem->u.md_list, mem->md_nseg, off, size, dst);
316		break;
317	case MEMDESC_PLIST:
318		plist_copydata(mem->u.md_list, mem->md_nseg, off, size, dst);
319		break;
320	case MEMDESC_UIO:
321		panic("Use uiomove instead");
322		break;
323	case MEMDESC_MBUF:
324		m_copydata(mem->u.md_mbuf, off, size, dst);
325		break;
326	case MEMDESC_VMPAGES:
327		KASSERT(off + size <= mem->md_len, ("copy out of bounds"));
328		vmpages_copydata(mem->u.md_ma, mem->md_offset + off, size,
329		    dst);
330		break;
331	default:
332		__assert_unreachable();
333	}
334}
335
336/*
337 * memdesc_alloc_ext_mbufs allocates a chain of external mbufs backed
338 * by the storage of a memory descriptor's data buffer.
339 */
340static struct mbuf *
341vaddr_ext_mbuf(memdesc_alloc_ext_mbuf_t *ext_alloc, void *cb_arg, int how,
342    void *buf, size_t len, size_t *actual_len)
343{
344	*actual_len = len;
345	return (ext_alloc(cb_arg, how, buf, len));
346}
347
348static bool
349can_append_paddr(struct mbuf *m, vm_paddr_t pa)
350{
351	u_int last_len;
352
353	/* Can always append to an empty mbuf. */
354	if (m->m_epg_npgs == 0)
355		return (true);
356
357	/* Can't append to a full mbuf. */
358	if (m->m_epg_npgs == MBUF_PEXT_MAX_PGS)
359		return (false);
360
361	/* Can't append a non-page-aligned address to a non-empty mbuf. */
362	if ((pa & PAGE_MASK) != 0)
363		return (false);
364
365	/* Can't append if the last page is not a full page. */
366	last_len = m->m_epg_last_len;
367	if (m->m_epg_npgs == 1)
368		last_len += m->m_epg_1st_off;
369	return (last_len == PAGE_SIZE);
370}
371
372/*
373 * Returns amount of data added to an M_EXTPG mbuf.
374 */
375static size_t
376append_paddr_range(struct mbuf *m, vm_paddr_t pa, size_t len)
377{
378	size_t appended;
379
380	appended = 0;
381
382	/* Append the first page. */
383	if (m->m_epg_npgs == 0) {
384		m->m_epg_pa[0] = trunc_page(pa);
385		m->m_epg_npgs = 1;
386		m->m_epg_1st_off = pa & PAGE_MASK;
387		m->m_epg_last_len = PAGE_SIZE - m->m_epg_1st_off;
388		if (m->m_epg_last_len > len)
389			m->m_epg_last_len = len;
390		m->m_len = m->m_epg_last_len;
391		len -= m->m_epg_last_len;
392		pa += m->m_epg_last_len;
393		appended += m->m_epg_last_len;
394	}
395	KASSERT(len == 0 || (pa & PAGE_MASK) == 0,
396	    ("PA not aligned before full pages"));
397
398	/* Full pages. */
399	while (len >= PAGE_SIZE && m->m_epg_npgs < MBUF_PEXT_MAX_PGS) {
400		m->m_epg_pa[m->m_epg_npgs] = pa;
401		m->m_epg_npgs++;
402		m->m_epg_last_len = PAGE_SIZE;
403		m->m_len += PAGE_SIZE;
404		pa += PAGE_SIZE;
405		len -= PAGE_SIZE;
406		appended += PAGE_SIZE;
407	}
408
409	/* Final partial page. */
410	if (len > 0 && m->m_epg_npgs < MBUF_PEXT_MAX_PGS) {
411		KASSERT(len < PAGE_SIZE, ("final page is full page"));
412		m->m_epg_pa[m->m_epg_npgs] = pa;
413		m->m_epg_npgs++;
414		m->m_epg_last_len = len;
415		m->m_len += len;
416		appended += len;
417	}
418
419	return (appended);
420}
421
422static struct mbuf *
423paddr_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
424    vm_paddr_t pa, size_t len, size_t *actual_len, bool can_truncate)
425{
426	struct mbuf *m, *tail;
427	size_t appended;
428
429	if (can_truncate) {
430		vm_paddr_t end;
431
432		/*
433		 * Trim any partial page at the end, but not if it's
434		 * the only page.
435		 */
436		end = trunc_page(pa + len);
437		if (end > pa)
438			len = end - pa;
439	}
440	*actual_len = len;
441
442	m = tail = extpg_alloc(cb_arg, how);
443	if (m == NULL)
444		return (NULL);
445	while (len > 0) {
446		if (!can_append_paddr(tail, pa)) {
447			MBUF_EXT_PGS_ASSERT_SANITY(tail);
448			tail->m_next = extpg_alloc(cb_arg, how);
449			if (tail->m_next == NULL)
450				goto error;
451			tail = tail->m_next;
452		}
453
454		appended = append_paddr_range(tail, pa, len);
455		KASSERT(appended > 0, ("did not append anything"));
456		KASSERT(appended <= len, ("appended too much"));
457
458		pa += appended;
459		len -= appended;
460	}
461
462	MBUF_EXT_PGS_ASSERT_SANITY(tail);
463	return (m);
464error:
465	m_freem(m);
466	return (NULL);
467}
468
469static struct mbuf *
470vlist_ext_mbuf(memdesc_alloc_ext_mbuf_t *ext_alloc, void *cb_arg, int how,
471    struct bus_dma_segment *vlist, u_int sglist_cnt, size_t offset,
472    size_t len, size_t *actual_len)
473{
474	struct mbuf *m, *n, *tail;
475	size_t todo;
476
477	*actual_len = len;
478
479	while (vlist->ds_len <= offset) {
480		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
481
482		offset -= vlist->ds_len;
483		vlist++;
484		sglist_cnt--;
485	}
486
487	m = tail = NULL;
488	while (len > 0) {
489		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
490
491		todo = len;
492		if (todo > vlist->ds_len - offset)
493			todo = vlist->ds_len - offset;
494
495		n = ext_alloc(cb_arg, how, (char *)(uintptr_t)vlist->ds_addr +
496		    offset, todo);
497		if (n == NULL)
498			goto error;
499
500		if (m == NULL) {
501			m = n;
502			tail = m;
503		} else {
504			tail->m_next = n;
505			tail = n;
506		}
507
508		offset = 0;
509		vlist++;
510		sglist_cnt--;
511		len -= todo;
512	}
513
514	return (m);
515error:
516	m_freem(m);
517	return (NULL);
518}
519
520static struct mbuf *
521plist_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
522    struct bus_dma_segment *plist, u_int sglist_cnt, size_t offset, size_t len,
523    size_t *actual_len, bool can_truncate)
524{
525	vm_paddr_t pa;
526	struct mbuf *m, *tail;
527	size_t appended, totlen, todo;
528
529	while (plist->ds_len <= offset) {
530		KASSERT(sglist_cnt > 1, ("out of sglist entries"));
531
532		offset -= plist->ds_len;
533		plist++;
534		sglist_cnt--;
535	}
536
537	totlen = 0;
538	m = tail = extpg_alloc(cb_arg, how);
539	if (m == NULL)
540		return (NULL);
541	while (len > 0) {
542		KASSERT(sglist_cnt >= 1, ("out of sglist entries"));
543
544		pa = plist->ds_addr + offset;
545		todo = len;
546		if (todo > plist->ds_len - offset)
547			todo = plist->ds_len - offset;
548
549		/*
550		 * If truncation is enabled, avoid sending a final
551		 * partial page, but only if there is more data
552		 * available in the current segment.  Also, at least
553		 * some data must be sent, so only drop the final page
554		 * for this segment if the segment spans multiple
555		 * pages or some other data is already queued.
556		 */
557		else if (can_truncate) {
558			vm_paddr_t end;
559
560			end = trunc_page(pa + len);
561			if (end <= pa && totlen != 0) {
562				/*
563				 * This last segment is only a partial
564				 * page.
565				 */
566				len = 0;
567				break;
568			}
569			todo = end - pa;
570		}
571
572		offset = 0;
573		len -= todo;
574		totlen += todo;
575
576		while (todo > 0) {
577			if (!can_append_paddr(tail, pa)) {
578				MBUF_EXT_PGS_ASSERT_SANITY(tail);
579				tail->m_next = extpg_alloc(cb_arg, how);
580				if (tail->m_next == NULL)
581					goto error;
582				tail = tail->m_next;
583			}
584
585			appended = append_paddr_range(tail, pa, todo);
586			KASSERT(appended > 0, ("did not append anything"));
587
588			pa += appended;
589			todo -= appended;
590		}
591	}
592
593	MBUF_EXT_PGS_ASSERT_SANITY(tail);
594	*actual_len = totlen;
595	return (m);
596error:
597	m_freem(m);
598	return (NULL);
599}
600
601static struct mbuf *
602vmpages_ext_mbuf(memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
603    vm_page_t *ma, size_t offset, size_t len, size_t *actual_len,
604    bool can_truncate)
605{
606	struct mbuf *m, *tail;
607
608	while (offset >= PAGE_SIZE) {
609		ma++;
610		offset -= PAGE_SIZE;
611	}
612
613	if (can_truncate) {
614		size_t end;
615
616		/*
617		 * Trim any partial page at the end, but not if it's
618		 * the only page.
619		 */
620		end = trunc_page(offset + len);
621		if (end > offset)
622			len = end - offset;
623	}
624	*actual_len = len;
625
626	m = tail = extpg_alloc(cb_arg, how);
627	if (m == NULL)
628		return (NULL);
629
630	/* First page. */
631	m->m_epg_pa[0] = VM_PAGE_TO_PHYS(*ma);
632	ma++;
633	m->m_epg_npgs = 1;
634	m->m_epg_1st_off = offset;
635	m->m_epg_last_len = PAGE_SIZE - offset;
636	if (m->m_epg_last_len > len)
637		m->m_epg_last_len = len;
638	m->m_len = m->m_epg_last_len;
639	len -= m->m_epg_last_len;
640
641	/* Full pages. */
642	while (len >= PAGE_SIZE) {
643		if (tail->m_epg_npgs == MBUF_PEXT_MAX_PGS) {
644			MBUF_EXT_PGS_ASSERT_SANITY(tail);
645			tail->m_next = extpg_alloc(cb_arg, how);
646			if (tail->m_next == NULL)
647				goto error;
648			tail = tail->m_next;
649		}
650
651		tail->m_epg_pa[tail->m_epg_npgs] = VM_PAGE_TO_PHYS(*ma);
652		ma++;
653		tail->m_epg_npgs++;
654		tail->m_epg_last_len = PAGE_SIZE;
655		tail->m_len += PAGE_SIZE;
656		len -= PAGE_SIZE;
657	}
658
659	/* Last partial page. */
660	if (len > 0) {
661		if (tail->m_epg_npgs == MBUF_PEXT_MAX_PGS) {
662			MBUF_EXT_PGS_ASSERT_SANITY(tail);
663			tail->m_next = extpg_alloc(cb_arg, how);
664			if (tail->m_next == NULL)
665				goto error;
666			tail = tail->m_next;
667		}
668
669		tail->m_epg_pa[tail->m_epg_npgs] = VM_PAGE_TO_PHYS(*ma);
670		ma++;
671		tail->m_epg_npgs++;
672		tail->m_epg_last_len = len;
673		tail->m_len += len;
674	}
675
676	MBUF_EXT_PGS_ASSERT_SANITY(tail);
677	return (m);
678error:
679	m_freem(m);
680	return (NULL);
681}
682
683/*
684 * Somewhat similar to m_copym but optionally avoids a partial mbuf at
685 * the end.
686 */
687static struct mbuf *
688mbuf_subchain(struct mbuf *m0, size_t offset, size_t len,
689    size_t *actual_len, bool can_truncate, int how)
690{
691	struct mbuf *m, *tail;
692	size_t totlen;
693
694	while (offset >= m0->m_len) {
695		offset -= m0->m_len;
696		m0 = m0->m_next;
697	}
698
699	/* Always return at least one mbuf. */
700	totlen = m0->m_len - offset;
701	if (totlen > len)
702		totlen = len;
703
704	m = m_get(how, MT_DATA);
705	if (m == NULL)
706		return (NULL);
707	m->m_len = totlen;
708	if (m0->m_flags & (M_EXT | M_EXTPG)) {
709		m->m_data = m0->m_data + offset;
710		mb_dupcl(m, m0);
711	} else
712		memcpy(mtod(m, void *), mtodo(m0, offset), m->m_len);
713
714	tail = m;
715	m0 = m0->m_next;
716	len -= totlen;
717	while (len > 0) {
718		/*
719		 * If truncation is enabled, don't send any partial
720		 * mbufs besides the first one.
721		 */
722		if (can_truncate && m0->m_len > len)
723			break;
724
725		tail->m_next = m_get(how, MT_DATA);
726		if (tail->m_next == NULL)
727			goto error;
728		tail = tail->m_next;
729		tail->m_len = m0->m_len;
730		if (m0->m_flags & (M_EXT | M_EXTPG)) {
731			tail->m_data = m0->m_data;
732			mb_dupcl(tail, m0);
733		} else
734			memcpy(mtod(tail, void *), mtod(m0, void *),
735			    tail->m_len);
736
737		totlen += tail->m_len;
738		m0 = m0->m_next;
739		len -= tail->m_len;
740	}
741	*actual_len = totlen;
742	return (m);
743error:
744	m_freem(m);
745	return (NULL);
746}
747
748struct mbuf *
749memdesc_alloc_ext_mbufs(struct memdesc *mem,
750    memdesc_alloc_ext_mbuf_t *ext_alloc,
751    memdesc_alloc_extpg_mbuf_t *extpg_alloc, void *cb_arg, int how,
752    size_t offset, size_t len, size_t *actual_len, bool can_truncate)
753{
754	struct mbuf *m;
755	size_t done;
756
757	switch (mem->md_type) {
758	case MEMDESC_VADDR:
759		m = vaddr_ext_mbuf(ext_alloc, cb_arg, how,
760		    (char *)mem->u.md_vaddr + offset, len, &done);
761		break;
762	case MEMDESC_PADDR:
763		m = paddr_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_paddr +
764		    offset, len, &done, can_truncate);
765		break;
766	case MEMDESC_VLIST:
767		m = vlist_ext_mbuf(ext_alloc, cb_arg, how, mem->u.md_list,
768		    mem->md_nseg, offset, len, &done);
769		break;
770	case MEMDESC_PLIST:
771		m = plist_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_list,
772		    mem->md_nseg, offset, len, &done, can_truncate);
773		break;
774	case MEMDESC_UIO:
775		panic("uio not supported");
776	case MEMDESC_MBUF:
777		m = mbuf_subchain(mem->u.md_mbuf, offset, len, &done,
778		    can_truncate, how);
779		break;
780	case MEMDESC_VMPAGES:
781		m = vmpages_ext_mbuf(extpg_alloc, cb_arg, how, mem->u.md_ma,
782		    mem->md_offset + offset, len, &done, can_truncate);
783		break;
784	default:
785		__assert_unreachable();
786	}
787	if (m == NULL)
788		return (NULL);
789
790	if (can_truncate) {
791		KASSERT(done <= len, ("chain too long"));
792	} else {
793		KASSERT(done == len, ("short chain with no limit"));
794	}
795	KASSERT(m_length(m, NULL) == done, ("length mismatch"));
796	if (actual_len != NULL)
797		*actual_len = done;
798	return (m);
799}
800