1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Copyright (c) 2014 The FreeBSD Foundation
13 *
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/mman.h>
48#include <sys/proc.h>
49#include <sys/resourcevar.h>
50#include <sys/rwlock.h>
51#include <sys/sched.h>
52#include <sys/sysctl.h>
53#include <sys/vnode.h>
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <vm/vm_extern.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pageout.h>
60#include <vm/vm_map.h>
61
62#include <machine/bus.h>
63
64SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, UIO_MAXIOV,
65	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
66
67static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
68
69int
70copyin_nofault(const void *udaddr, void *kaddr, size_t len)
71{
72	int error, save;
73
74	save = vm_fault_disable_pagefaults();
75	error = copyin(udaddr, kaddr, len);
76	vm_fault_enable_pagefaults(save);
77	return (error);
78}
79
80int
81copyout_nofault(const void *kaddr, void *udaddr, size_t len)
82{
83	int error, save;
84
85	save = vm_fault_disable_pagefaults();
86	error = copyout(kaddr, udaddr, len);
87	vm_fault_enable_pagefaults(save);
88	return (error);
89}
90
91#define	PHYS_PAGE_COUNT(len)	(howmany(len, PAGE_SIZE) + 1)
92
93int
94physcopyin(void *src, vm_paddr_t dst, size_t len)
95{
96	vm_page_t m[PHYS_PAGE_COUNT(len)];
97	struct iovec iov[1];
98	struct uio uio;
99	int i;
100
101	iov[0].iov_base = src;
102	iov[0].iov_len = len;
103	uio.uio_iov = iov;
104	uio.uio_iovcnt = 1;
105	uio.uio_offset = 0;
106	uio.uio_resid = len;
107	uio.uio_segflg = UIO_SYSSPACE;
108	uio.uio_rw = UIO_WRITE;
109	for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
110		m[i] = PHYS_TO_VM_PAGE(dst);
111	return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
112}
113
114int
115physcopyout(vm_paddr_t src, void *dst, size_t len)
116{
117	vm_page_t m[PHYS_PAGE_COUNT(len)];
118	struct iovec iov[1];
119	struct uio uio;
120	int i;
121
122	iov[0].iov_base = dst;
123	iov[0].iov_len = len;
124	uio.uio_iov = iov;
125	uio.uio_iovcnt = 1;
126	uio.uio_offset = 0;
127	uio.uio_resid = len;
128	uio.uio_segflg = UIO_SYSSPACE;
129	uio.uio_rw = UIO_READ;
130	for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
131		m[i] = PHYS_TO_VM_PAGE(src);
132	return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
133}
134
135#undef PHYS_PAGE_COUNT
136
137int
138physcopyin_vlist(bus_dma_segment_t *src, off_t offset, vm_paddr_t dst,
139    size_t len)
140{
141	size_t seg_len;
142	int error;
143
144	error = 0;
145	while (offset >= src->ds_len) {
146		offset -= src->ds_len;
147		src++;
148	}
149
150	while (len > 0 && error == 0) {
151		seg_len = MIN(src->ds_len - offset, len);
152		error = physcopyin((void *)(uintptr_t)(src->ds_addr + offset),
153		    dst, seg_len);
154		offset = 0;
155		src++;
156		len -= seg_len;
157		dst += seg_len;
158	}
159
160	return (error);
161}
162
163int
164physcopyout_vlist(vm_paddr_t src, bus_dma_segment_t *dst, off_t offset,
165    size_t len)
166{
167	size_t seg_len;
168	int error;
169
170	error = 0;
171	while (offset >= dst->ds_len) {
172		offset -= dst->ds_len;
173		dst++;
174	}
175
176	while (len > 0 && error == 0) {
177		seg_len = MIN(dst->ds_len - offset, len);
178		error = physcopyout(src, (void *)(uintptr_t)(dst->ds_addr +
179		    offset), seg_len);
180		offset = 0;
181		dst++;
182		len -= seg_len;
183		src += seg_len;
184	}
185
186	return (error);
187}
188
189int
190uiomove(void *cp, int n, struct uio *uio)
191{
192
193	return (uiomove_faultflag(cp, n, uio, 0));
194}
195
196int
197uiomove_nofault(void *cp, int n, struct uio *uio)
198{
199
200	return (uiomove_faultflag(cp, n, uio, 1));
201}
202
203static int
204uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
205{
206	struct iovec *iov;
207	size_t cnt;
208	int error, newflags, save;
209
210	save = error = 0;
211
212	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
213	    ("uiomove: mode"));
214	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
215	    ("uiomove proc"));
216	KASSERT(uio->uio_resid >= 0,
217	    ("%s: uio %p resid underflow", __func__, uio));
218
219	if (uio->uio_segflg == UIO_USERSPACE) {
220		newflags = TDP_DEADLKTREAT;
221		if (nofault) {
222			/*
223			 * Fail if a non-spurious page fault occurs.
224			 */
225			newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
226		} else {
227			WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
228			    "Calling uiomove()");
229		}
230		save = curthread_pflags_set(newflags);
231	} else {
232		KASSERT(nofault == 0, ("uiomove: nofault"));
233	}
234
235	while (n > 0 && uio->uio_resid) {
236		KASSERT(uio->uio_iovcnt > 0,
237		    ("%s: uio %p iovcnt underflow", __func__, uio));
238
239		iov = uio->uio_iov;
240		cnt = iov->iov_len;
241		if (cnt == 0) {
242			uio->uio_iov++;
243			uio->uio_iovcnt--;
244			continue;
245		}
246		if (cnt > n)
247			cnt = n;
248
249		switch (uio->uio_segflg) {
250		case UIO_USERSPACE:
251			maybe_yield();
252			switch (uio->uio_rw) {
253			case UIO_READ:
254				error = copyout(cp, iov->iov_base, cnt);
255				break;
256			case UIO_WRITE:
257				error = copyin(iov->iov_base, cp, cnt);
258				break;
259			}
260			if (error)
261				goto out;
262			break;
263
264		case UIO_SYSSPACE:
265			switch (uio->uio_rw) {
266			case UIO_READ:
267				bcopy(cp, iov->iov_base, cnt);
268				break;
269			case UIO_WRITE:
270				bcopy(iov->iov_base, cp, cnt);
271				break;
272			}
273			break;
274		case UIO_NOCOPY:
275			break;
276		}
277		iov->iov_base = (char *)iov->iov_base + cnt;
278		iov->iov_len -= cnt;
279		uio->uio_resid -= cnt;
280		uio->uio_offset += cnt;
281		cp = (char *)cp + cnt;
282		n -= cnt;
283	}
284out:
285	if (save)
286		curthread_pflags_restore(save);
287	return (error);
288}
289
290/*
291 * Wrapper for uiomove() that validates the arguments against a known-good
292 * kernel buffer.  Currently, uiomove accepts a signed (n) argument, which
293 * is almost definitely a bad thing, so we catch that here as well.  We
294 * return a runtime failure, but it might be desirable to generate a runtime
295 * assertion failure instead.
296 */
297int
298uiomove_frombuf(void *buf, int buflen, struct uio *uio)
299{
300	size_t offset, n;
301
302	if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
303	    (offset = uio->uio_offset) != uio->uio_offset)
304		return (EINVAL);
305	if (buflen <= 0 || offset >= buflen)
306		return (0);
307	if ((n = buflen - offset) > IOSIZE_MAX)
308		return (EINVAL);
309	return (uiomove((char *)buf + offset, n, uio));
310}
311
312/*
313 * Give next character to user as result of read.
314 */
315int
316ureadc(int c, struct uio *uio)
317{
318	struct iovec *iov;
319	char *iov_base;
320
321	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
322	    "Calling ureadc()");
323
324again:
325	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
326		panic("ureadc");
327	iov = uio->uio_iov;
328	if (iov->iov_len == 0) {
329		uio->uio_iovcnt--;
330		uio->uio_iov++;
331		goto again;
332	}
333	switch (uio->uio_segflg) {
334	case UIO_USERSPACE:
335		if (subyte(iov->iov_base, c) < 0)
336			return (EFAULT);
337		break;
338
339	case UIO_SYSSPACE:
340		iov_base = iov->iov_base;
341		*iov_base = c;
342		break;
343
344	case UIO_NOCOPY:
345		break;
346	}
347	iov->iov_base = (char *)iov->iov_base + 1;
348	iov->iov_len--;
349	uio->uio_resid--;
350	uio->uio_offset++;
351	return (0);
352}
353
354int
355copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
356{
357	u_int iovlen;
358
359	*iov = NULL;
360	if (iovcnt > UIO_MAXIOV)
361		return (error);
362	iovlen = iovcnt * sizeof(struct iovec);
363	*iov = malloc(iovlen, M_IOV, M_WAITOK);
364	error = copyin(iovp, *iov, iovlen);
365	if (error) {
366		free(*iov, M_IOV);
367		*iov = NULL;
368	}
369	return (error);
370}
371
372int
373copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
374{
375	struct iovec *iov;
376	struct uio *uio;
377	u_int iovlen;
378	int error, i;
379
380	*uiop = NULL;
381	if (iovcnt > UIO_MAXIOV)
382		return (EINVAL);
383	iovlen = iovcnt * sizeof(struct iovec);
384	uio = allocuio(iovcnt);
385	iov = uio->uio_iov;
386	error = copyin(iovp, iov, iovlen);
387	if (error != 0) {
388		freeuio(uio);
389		return (error);
390	}
391	uio->uio_iovcnt = iovcnt;
392	uio->uio_segflg = UIO_USERSPACE;
393	uio->uio_offset = -1;
394	uio->uio_resid = 0;
395	for (i = 0; i < iovcnt; i++) {
396		if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
397			freeuio(uio);
398			return (EINVAL);
399		}
400		uio->uio_resid += iov->iov_len;
401		iov++;
402	}
403	*uiop = uio;
404	return (0);
405}
406
407struct uio *
408allocuio(u_int iovcnt)
409{
410	struct uio *uio;
411	int iovlen;
412
413	KASSERT(iovcnt <= UIO_MAXIOV,
414	    ("Requested %u iovecs exceed UIO_MAXIOV", iovcnt));
415	iovlen = iovcnt * sizeof(struct iovec);
416	uio = malloc(iovlen + sizeof(*uio), M_IOV, M_WAITOK);
417	uio->uio_iov = (struct iovec *)(uio + 1);
418
419	return (uio);
420}
421
422void
423freeuio(struct uio *uio)
424{
425	free(uio, M_IOV);
426}
427
428struct uio *
429cloneuio(struct uio *uiop)
430{
431	struct iovec *iov;
432	struct uio *uio;
433	int iovlen;
434
435	iovlen = uiop->uio_iovcnt * sizeof(struct iovec);
436	uio = allocuio(uiop->uio_iovcnt);
437	iov = uio->uio_iov;
438	*uio = *uiop;
439	uio->uio_iov = iov;
440	bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
441	return (uio);
442}
443
444/*
445 * Map some anonymous memory in user space of size sz, rounded up to the page
446 * boundary.
447 */
448int
449copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
450{
451	struct vmspace *vms;
452	int error;
453	vm_size_t size;
454
455	vms = td->td_proc->p_vmspace;
456
457	/*
458	 * Map somewhere after heap in process memory.
459	 */
460	*addr = round_page((vm_offset_t)vms->vm_daddr +
461	    lim_max(td, RLIMIT_DATA));
462
463	/* round size up to page boundary */
464	size = (vm_size_t)round_page(sz);
465	if (size == 0)
466		return (EINVAL);
467	error = vm_mmap_object(&vms->vm_map, addr, size, VM_PROT_READ |
468	    VM_PROT_WRITE, VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, NULL, 0,
469	    FALSE, td);
470	return (error);
471}
472
473/*
474 * Unmap memory in user space.
475 */
476int
477copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
478{
479	vm_map_t map;
480	vm_size_t size;
481
482	if (sz == 0)
483		return (0);
484
485	map = &td->td_proc->p_vmspace->vm_map;
486	size = (vm_size_t)round_page(sz);
487
488	if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
489		return (EINVAL);
490
491	return (0);
492}
493
494int32_t
495fuword32(volatile const void *addr)
496{
497	int rv;
498	int32_t val;
499
500	rv = fueword32(addr, &val);
501	return (rv == -1 ? -1 : val);
502}
503
504#ifdef _LP64
505int64_t
506fuword64(volatile const void *addr)
507{
508	int rv;
509	int64_t val;
510
511	rv = fueword64(addr, &val);
512	return (rv == -1 ? -1 : val);
513}
514#endif /* _LP64 */
515
516long
517fuword(volatile const void *addr)
518{
519	long val;
520	int rv;
521
522	rv = fueword(addr, &val);
523	return (rv == -1 ? -1 : val);
524}
525
526uint32_t
527casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
528{
529	int rv;
530	uint32_t val;
531
532	rv = casueword32(addr, old, &val, new);
533	return (rv == -1 ? -1 : val);
534}
535
536u_long
537casuword(volatile u_long *addr, u_long old, u_long new)
538{
539	int rv;
540	u_long val;
541
542	rv = casueword(addr, old, &val, new);
543	return (rv == -1 ? -1 : val);
544}
545