vm_glue.c revision 338484
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51 *  School of Computer Science
52 *  Carnegie Mellon University
53 *  Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: stable/11/sys/vm/vm_glue.c 338484 2018-09-05 21:28:33Z kib $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65#include "opt_kstack_usage_prof.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/limits.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/racct.h>
75#include <sys/resourcevar.h>
76#include <sys/rwlock.h>
77#include <sys/sched.h>
78#include <sys/sf_buf.h>
79#include <sys/shm.h>
80#include <sys/vmmeter.h>
81#include <sys/vmem.h>
82#include <sys/sx.h>
83#include <sys/sysctl.h>
84#include <sys/_kstack_cache.h>
85#include <sys/eventhandler.h>
86#include <sys/kernel.h>
87#include <sys/ktr.h>
88#include <sys/unistd.h>
89
90#include <vm/vm.h>
91#include <vm/vm_param.h>
92#include <vm/pmap.h>
93#include <vm/vm_map.h>
94#include <vm/vm_page.h>
95#include <vm/vm_pageout.h>
96#include <vm/vm_object.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_extern.h>
99#include <vm/vm_pager.h>
100#include <vm/swap_pager.h>
101
102#include <machine/cpu.h>
103
104/*
105 * MPSAFE
106 *
107 * WARNING!  This code calls vm_map_check_protection() which only checks
108 * the associated vm_map_entry range.  It does not determine whether the
109 * contents of the memory is actually readable or writable.  In most cases
110 * just checking the vm_map_entry is sufficient within the kernel's address
111 * space.
112 */
113int
114kernacc(addr, len, rw)
115	void *addr;
116	int len, rw;
117{
118	boolean_t rv;
119	vm_offset_t saddr, eaddr;
120	vm_prot_t prot;
121
122	KASSERT((rw & ~VM_PROT_ALL) == 0,
123	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
124
125	if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
126	    (vm_offset_t)addr + len < (vm_offset_t)addr)
127		return (FALSE);
128
129	prot = rw;
130	saddr = trunc_page((vm_offset_t)addr);
131	eaddr = round_page((vm_offset_t)addr + len);
132	vm_map_lock_read(kernel_map);
133	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
134	vm_map_unlock_read(kernel_map);
135	return (rv == TRUE);
136}
137
138/*
139 * MPSAFE
140 *
141 * WARNING!  This code calls vm_map_check_protection() which only checks
142 * the associated vm_map_entry range.  It does not determine whether the
143 * contents of the memory is actually readable or writable.  vmapbuf(),
144 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
145 * used in conjunction with this call.
146 */
147int
148useracc(addr, len, rw)
149	void *addr;
150	int len, rw;
151{
152	boolean_t rv;
153	vm_prot_t prot;
154	vm_map_t map;
155
156	KASSERT((rw & ~VM_PROT_ALL) == 0,
157	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
158	prot = rw;
159	map = &curproc->p_vmspace->vm_map;
160	if ((vm_offset_t)addr + len > vm_map_max(map) ||
161	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
162		return (FALSE);
163	}
164	vm_map_lock_read(map);
165	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
166	    round_page((vm_offset_t)addr + len), prot);
167	vm_map_unlock_read(map);
168	return (rv == TRUE);
169}
170
171int
172vslock(void *addr, size_t len)
173{
174	vm_offset_t end, last, start;
175	vm_size_t npages;
176	int error;
177
178	last = (vm_offset_t)addr + len;
179	start = trunc_page((vm_offset_t)addr);
180	end = round_page(last);
181	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
182		return (EINVAL);
183	npages = atop(end - start);
184	if (npages > vm_page_max_wired)
185		return (ENOMEM);
186#if 0
187	/*
188	 * XXX - not yet
189	 *
190	 * The limit for transient usage of wired pages should be
191	 * larger than for "permanent" wired pages (mlock()).
192	 *
193	 * Also, the sysctl code, which is the only present user
194	 * of vslock(), does a hard loop on EAGAIN.
195	 */
196	if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
197		return (EAGAIN);
198#endif
199	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
200	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
201	if (error == KERN_SUCCESS) {
202		curthread->td_vslock_sz += len;
203		return (0);
204	}
205
206	/*
207	 * Return EFAULT on error to match copy{in,out}() behaviour
208	 * rather than returning ENOMEM like mlock() would.
209	 */
210	return (EFAULT);
211}
212
213void
214vsunlock(void *addr, size_t len)
215{
216
217	/* Rely on the parameter sanity checks performed by vslock(). */
218	MPASS(curthread->td_vslock_sz >= len);
219	curthread->td_vslock_sz -= len;
220	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
221	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
222	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
223}
224
225/*
226 * Pin the page contained within the given object at the given offset.  If the
227 * page is not resident, allocate and load it using the given object's pager.
228 * Return the pinned page if successful; otherwise, return NULL.
229 */
230static vm_page_t
231vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
232{
233	vm_page_t m;
234	vm_pindex_t pindex;
235	int rv;
236
237	VM_OBJECT_WLOCK(object);
238	pindex = OFF_TO_IDX(offset);
239	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
240	if (m->valid != VM_PAGE_BITS_ALL) {
241		vm_page_xbusy(m);
242		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
243		if (rv != VM_PAGER_OK) {
244			vm_page_lock(m);
245			vm_page_free(m);
246			vm_page_unlock(m);
247			m = NULL;
248			goto out;
249		}
250		vm_page_xunbusy(m);
251	}
252	vm_page_lock(m);
253	vm_page_hold(m);
254	vm_page_activate(m);
255	vm_page_unlock(m);
256out:
257	VM_OBJECT_WUNLOCK(object);
258	return (m);
259}
260
261/*
262 * Return a CPU private mapping to the page at the given offset within the
263 * given object.  The page is pinned before it is mapped.
264 */
265struct sf_buf *
266vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
267{
268	vm_page_t m;
269
270	m = vm_imgact_hold_page(object, offset);
271	if (m == NULL)
272		return (NULL);
273	sched_pin();
274	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
275}
276
277/*
278 * Destroy the given CPU private mapping and unpin the page that it mapped.
279 */
280void
281vm_imgact_unmap_page(struct sf_buf *sf)
282{
283	vm_page_t m;
284
285	m = sf_buf_page(sf);
286	sf_buf_free(sf);
287	sched_unpin();
288	vm_page_lock(m);
289	vm_page_unhold(m);
290	vm_page_unlock(m);
291}
292
293void
294vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
295{
296
297	pmap_sync_icache(map->pmap, va, sz);
298}
299
300struct kstack_cache_entry *kstack_cache;
301static int kstack_cache_size = 128;
302static int kstacks;
303static struct mtx kstack_cache_mtx;
304MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
305
306SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
307    "");
308SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
309    "");
310
311/*
312 * Create the kernel stack (including pcb for i386) for a new thread.
313 * This routine directly affects the fork perf for a process and
314 * create performance for a thread.
315 */
316int
317vm_thread_new(struct thread *td, int pages)
318{
319	vm_object_t ksobj;
320	vm_offset_t ks;
321	vm_page_t ma[KSTACK_MAX_PAGES];
322	struct kstack_cache_entry *ks_ce;
323	int i;
324
325	/* Bounds check */
326	if (pages <= 1)
327		pages = kstack_pages;
328	else if (pages > KSTACK_MAX_PAGES)
329		pages = KSTACK_MAX_PAGES;
330
331	if (pages == kstack_pages) {
332		mtx_lock(&kstack_cache_mtx);
333		if (kstack_cache != NULL) {
334			ks_ce = kstack_cache;
335			kstack_cache = ks_ce->next_ks_entry;
336			mtx_unlock(&kstack_cache_mtx);
337
338			td->td_kstack_obj = ks_ce->ksobj;
339			td->td_kstack = (vm_offset_t)ks_ce;
340			td->td_kstack_pages = kstack_pages;
341			return (1);
342		}
343		mtx_unlock(&kstack_cache_mtx);
344	}
345
346	/*
347	 * Allocate an object for the kstack.
348	 */
349	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
350
351	/*
352	 * Get a kernel virtual address for this thread's kstack.
353	 */
354#if defined(__mips__)
355	/*
356	 * We need to align the kstack's mapped address to fit within
357	 * a single TLB entry.
358	 */
359	if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
360	    PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
361	    M_BESTFIT | M_NOWAIT, &ks)) {
362		ks = 0;
363	}
364#else
365	ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
366#endif
367	if (ks == 0) {
368		printf("vm_thread_new: kstack allocation failed\n");
369		vm_object_deallocate(ksobj);
370		return (0);
371	}
372
373	atomic_add_int(&kstacks, 1);
374	if (KSTACK_GUARD_PAGES != 0) {
375		pmap_qremove(ks, KSTACK_GUARD_PAGES);
376		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
377	}
378	td->td_kstack_obj = ksobj;
379	td->td_kstack = ks;
380	/*
381	 * Knowing the number of pages allocated is useful when you
382	 * want to deallocate them.
383	 */
384	td->td_kstack_pages = pages;
385	/*
386	 * For the length of the stack, link in a real page of ram for each
387	 * page of stack.
388	 */
389	VM_OBJECT_WLOCK(ksobj);
390	(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
391	    VM_ALLOC_WIRED, ma, pages);
392	for (i = 0; i < pages; i++)
393		ma[i]->valid = VM_PAGE_BITS_ALL;
394	VM_OBJECT_WUNLOCK(ksobj);
395	pmap_qenter(ks, ma, pages);
396	return (1);
397}
398
399static void
400vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
401{
402	vm_page_t m;
403	int i;
404
405	atomic_add_int(&kstacks, -1);
406	pmap_qremove(ks, pages);
407	VM_OBJECT_WLOCK(ksobj);
408	for (i = 0; i < pages; i++) {
409		m = vm_page_lookup(ksobj, i);
410		if (m == NULL)
411			panic("vm_thread_dispose: kstack already missing?");
412		vm_page_lock(m);
413		vm_page_unwire(m, PQ_NONE);
414		vm_page_free(m);
415		vm_page_unlock(m);
416	}
417	VM_OBJECT_WUNLOCK(ksobj);
418	vm_object_deallocate(ksobj);
419	kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
420	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
421}
422
423/*
424 * Dispose of a thread's kernel stack.
425 */
426void
427vm_thread_dispose(struct thread *td)
428{
429	vm_object_t ksobj;
430	vm_offset_t ks;
431	struct kstack_cache_entry *ks_ce;
432	int pages;
433
434	pages = td->td_kstack_pages;
435	ksobj = td->td_kstack_obj;
436	ks = td->td_kstack;
437	td->td_kstack = 0;
438	td->td_kstack_pages = 0;
439	if (pages == kstack_pages && kstacks <= kstack_cache_size) {
440		ks_ce = (struct kstack_cache_entry *)ks;
441		ks_ce->ksobj = ksobj;
442		mtx_lock(&kstack_cache_mtx);
443		ks_ce->next_ks_entry = kstack_cache;
444		kstack_cache = ks_ce;
445		mtx_unlock(&kstack_cache_mtx);
446		return;
447	}
448	vm_thread_stack_dispose(ksobj, ks, pages);
449}
450
451static void
452vm_thread_stack_lowmem(void *nulll)
453{
454	struct kstack_cache_entry *ks_ce, *ks_ce1;
455
456	mtx_lock(&kstack_cache_mtx);
457	ks_ce = kstack_cache;
458	kstack_cache = NULL;
459	mtx_unlock(&kstack_cache_mtx);
460
461	while (ks_ce != NULL) {
462		ks_ce1 = ks_ce;
463		ks_ce = ks_ce->next_ks_entry;
464
465		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
466		    kstack_pages);
467	}
468}
469
470static void
471kstack_cache_init(void *nulll)
472{
473
474	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
475	    EVENTHANDLER_PRI_ANY);
476}
477
478SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
479
480#ifdef KSTACK_USAGE_PROF
481/*
482 * Track maximum stack used by a thread in kernel.
483 */
484static int max_kstack_used;
485
486SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
487    &max_kstack_used, 0,
488    "Maxiumum stack depth used by a thread in kernel");
489
490void
491intr_prof_stack_use(struct thread *td, struct trapframe *frame)
492{
493	vm_offset_t stack_top;
494	vm_offset_t current;
495	int used, prev_used;
496
497	/*
498	 * Testing for interrupted kernel mode isn't strictly
499	 * needed. It optimizes the execution, since interrupts from
500	 * usermode will have only the trap frame on the stack.
501	 */
502	if (TRAPF_USERMODE(frame))
503		return;
504
505	stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
506	current = (vm_offset_t)(uintptr_t)&stack_top;
507
508	/*
509	 * Try to detect if interrupt is using kernel thread stack.
510	 * Hardware could use a dedicated stack for interrupt handling.
511	 */
512	if (stack_top <= current || current < td->td_kstack)
513		return;
514
515	used = stack_top - current;
516	for (;;) {
517		prev_used = max_kstack_used;
518		if (prev_used >= used)
519			break;
520		if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
521			break;
522	}
523}
524#endif /* KSTACK_USAGE_PROF */
525
526/*
527 * Implement fork's actions on an address space.
528 * Here we arrange for the address space to be copied or referenced,
529 * allocate a user struct (pcb and kernel stack), then call the
530 * machine-dependent layer to fill those in and make the new process
531 * ready to run.  The new process is set up so that it returns directly
532 * to user mode to avoid stack copying and relocation problems.
533 */
534int
535vm_forkproc(td, p2, td2, vm2, flags)
536	struct thread *td;
537	struct proc *p2;
538	struct thread *td2;
539	struct vmspace *vm2;
540	int flags;
541{
542	struct proc *p1 = td->td_proc;
543	int error;
544
545	if ((flags & RFPROC) == 0) {
546		/*
547		 * Divorce the memory, if it is shared, essentially
548		 * this changes shared memory amongst threads, into
549		 * COW locally.
550		 */
551		if ((flags & RFMEM) == 0) {
552			if (p1->p_vmspace->vm_refcnt > 1) {
553				error = vmspace_unshare(p1);
554				if (error)
555					return (error);
556			}
557		}
558		cpu_fork(td, p2, td2, flags);
559		return (0);
560	}
561
562	if (flags & RFMEM) {
563		p2->p_vmspace = p1->p_vmspace;
564		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
565	}
566
567	while (vm_page_count_severe()) {
568		VM_WAIT;
569	}
570
571	if ((flags & RFMEM) == 0) {
572		p2->p_vmspace = vm2;
573		if (p1->p_vmspace->vm_shm)
574			shmfork(p1, p2);
575	}
576
577	/*
578	 * cpu_fork will copy and update the pcb, set up the kernel stack,
579	 * and make the child ready to run.
580	 */
581	cpu_fork(td, p2, td2, flags);
582	return (0);
583}
584
585/*
586 * Called after process has been wait(2)'ed upon and is being reaped.
587 * The idea is to reclaim resources that we could not reclaim while
588 * the process was still executing.
589 */
590void
591vm_waitproc(p)
592	struct proc *p;
593{
594
595	vmspace_exitfree(p);		/* and clean-out the vmspace */
596}
597
598void
599kick_proc0(void)
600{
601
602	wakeup(&proc0);
603}
604