1129198Scognet/*-
2129198Scognet * Copyright (c) 1982, 1986 The Regents of the University of California.
3129198Scognet * Copyright (c) 1989, 1990 William Jolitz
4129198Scognet * Copyright (c) 1994 John Dyson
5129198Scognet * All rights reserved.
6129198Scognet *
7129198Scognet * This code is derived from software contributed to Berkeley by
8129198Scognet * the Systems Programming Group of the University of Utah Computer
9129198Scognet * Science Department, and William Jolitz.
10129198Scognet *
11150868Scognet * Redistribution and use in source and binary :forms, with or without
12129198Scognet * modification, are permitted provided that the following conditions
13129198Scognet * are met:
14129198Scognet * 1. Redistributions of source code must retain the above copyright
15129198Scognet *    notice, this list of conditions and the following disclaimer.
16129198Scognet * 2. Redistributions in binary form must reproduce the above copyright
17129198Scognet *    notice, this list of conditions and the following disclaimer in the
18129198Scognet *    documentation and/or other materials provided with the distribution.
19129198Scognet * 3. All advertising materials mentioning features or use of this software
20129198Scognet *    must display the following acknowledgement:
21129198Scognet *	This product includes software developed by the University of
22129198Scognet *	California, Berkeley and its contributors.
23129198Scognet * 4. Neither the name of the University nor the names of its contributors
24129198Scognet *    may be used to endorse or promote products derived from this software
25129198Scognet *    without specific prior written permission.
26129198Scognet *
27129198Scognet * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30129198Scognet * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31129198Scognet * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37129198Scognet * SUCH DAMAGE.
38129198Scognet *
39129198Scognet *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40129198Scognet *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41129198Scognet */
42129198Scognet
43129198Scognet#include <sys/cdefs.h>
44129198Scognet__FBSDID("$FreeBSD$");
45129198Scognet
46129198Scognet#include <sys/param.h>
47129198Scognet#include <sys/systm.h>
48129198Scognet#include <sys/kernel.h>
49129198Scognet#include <sys/malloc.h>
50129198Scognet#include <sys/mbuf.h>
51129198Scognet#include <sys/proc.h>
52129198Scognet#include <sys/socketvar.h>
53129198Scognet#include <sys/sf_buf.h>
54199135Skib#include <sys/syscall.h>
55255786Sglebius#include <sys/sysctl.h>
56199135Skib#include <sys/sysent.h>
57146599Scognet#include <sys/unistd.h>
58129198Scognet#include <machine/cpu.h>
59259329Sian#include <machine/frame.h>
60129198Scognet#include <machine/pcb.h>
61145433Sdavidxu#include <machine/sysarch.h>
62129198Scognet#include <sys/lock.h>
63129198Scognet#include <sys/mutex.h>
64129198Scognet
65129198Scognet#include <vm/vm.h>
66169900Scognet#include <vm/pmap.h>
67129198Scognet#include <vm/vm_extern.h>
68129198Scognet#include <vm/vm_kern.h>
69129198Scognet#include <vm/vm_page.h>
70129198Scognet#include <vm/vm_map.h>
71129198Scognet#include <vm/vm_param.h>
72161105Scognet#include <vm/vm_pageout.h>
73147114Scognet#include <vm/uma.h>
74147114Scognet#include <vm/uma_int.h>
75129198Scognet
76166063Scognet#include <machine/md_var.h>
77266341Sian#include <machine/vfp.h>
78166063Scognet
79247864Sandrew/*
80253968Sandrew * struct switchframe and trapframe must both be a multiple of 8
81253968Sandrew * for correct stack alignment.
82247864Sandrew */
83247864SandrewCTASSERT(sizeof(struct switchframe) == 24);
84253968SandrewCTASSERT(sizeof(struct trapframe) == 80);
85247864Sandrew
86131837Scognet#ifndef NSFBUFS
87131837Scognet#define NSFBUFS		(512 + maxusers * 16)
88131837Scognet#endif
89131837Scognet
90255786Sglebiusstatic int nsfbufs;
91255786Sglebiusstatic int nsfbufspeak;
92255786Sglebiusstatic int nsfbufsused;
93255786Sglebius
94255786SglebiusSYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
95255786Sglebius    "Maximum number of sendfile(2) sf_bufs available");
96255786SglebiusSYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
97255786Sglebius    "Number of sendfile(2) sf_bufs at peak usage");
98255786SglebiusSYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
99255786Sglebius    "Number of sendfile(2) sf_bufs in use");
100255786Sglebius
101129198Scognetstatic void     sf_buf_init(void *arg);
102177253SrwatsonSYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
103129198Scognet
104129198ScognetLIST_HEAD(sf_head, sf_buf);
105129198Scognet
106129198Scognet/*
107129198Scognet * A hash table of active sendfile(2) buffers
108129198Scognet */
109129198Scognetstatic struct sf_head *sf_buf_active;
110129198Scognetstatic u_long sf_buf_hashmask;
111129198Scognet
112129198Scognet#define SF_BUF_HASH(m)  (((m) - vm_page_array) & sf_buf_hashmask)
113129198Scognet
114129198Scognetstatic TAILQ_HEAD(, sf_buf) sf_buf_freelist;
115129198Scognetstatic u_int    sf_buf_alloc_want;
116129198Scognet
117129198Scognet/*
118129198Scognet * A lock used to synchronize access to the hash table and free list
119129198Scognet */
120129198Scognetstatic struct mtx sf_buf_lock;
121129198Scognet
122129198Scognet/*
123129198Scognet * Finish a fork operation, with process p2 nearly set up.
124129198Scognet * Copy and update the pcb, set up the stack so that the child
125129198Scognet * ready to run and return to user mode.
126129198Scognet */
127129198Scognetvoid
128129198Scognetcpu_fork(register struct thread *td1, register struct proc *p2,
129129198Scognet    struct thread *td2, int flags)
130129198Scognet{
131188019Scognet	struct pcb *pcb2;
132129198Scognet	struct trapframe *tf;
133129198Scognet	struct switchframe *sf;
134129198Scognet	struct mdproc *mdp2;
135129198Scognet
136146599Scognet	if ((flags & RFPROC) == 0)
137146599Scognet		return;
138129198Scognet	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
139135657Scognet#ifdef __XSCALE__
140171622Scognet#ifndef CPU_XSCALE_CORE3
141135657Scognet	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
142135657Scognet#endif
143171622Scognet#endif
144129198Scognet	td2->td_pcb = pcb2;
145129198Scognet	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
146129198Scognet	mdp2 = &p2->p_md;
147129198Scognet	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
148137939Scognet	pcb2->un_32.pcb32_sp = td2->td_kstack +
149135657Scognet	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
150266277Sian	pcb2->pcb_vfpcpu = -1;
151266341Sian	pcb2->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ;
152129198Scognet	pmap_activate(td2);
153247864Sandrew	td2->td_frame = tf = (struct trapframe *)STACKALIGN(
154247864Sandrew	    pcb2->un_32.pcb32_sp - sizeof(struct trapframe));
155129198Scognet	*tf = *td1->td_frame;
156129198Scognet	sf = (struct switchframe *)tf - 1;
157129198Scognet	sf->sf_r4 = (u_int)fork_return;
158129198Scognet	sf->sf_r5 = (u_int)td2;
159129198Scognet	sf->sf_pc = (u_int)fork_trampoline;
160129198Scognet	tf->tf_spsr &= ~PSR_C_bit;
161129198Scognet	tf->tf_r0 = 0;
162135657Scognet	tf->tf_r1 = 0;
163129198Scognet	pcb2->un_32.pcb32_sp = (u_int)sf;
164247864Sandrew	KASSERT((pcb2->un_32.pcb32_sp & 7) == 0,
165247864Sandrew	    ("cpu_fork: Incorrect stack alignment"));
166144637Sjhb
167170305Sjeff	/* Setup to release spin count in fork_exit(). */
168144637Sjhb	td2->td_md.md_spinlock_count = 1;
169144637Sjhb	td2->td_md.md_saved_cspr = 0;
170239268Sgonzo#ifdef ARM_TP_ADDRESS
171218310Simp	td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS;
172239268Sgonzo#else
173239268Sgonzo	td2->td_md.md_tp = (register_t) get_tls();
174239268Sgonzo#endif
175129198Scognet}
176129198Scognet
177129198Scognetvoid
178129198Scognetcpu_thread_swapin(struct thread *td)
179129198Scognet{
180236991Simp}
181129198Scognet
182236991Simpvoid
183129198Scognetcpu_thread_swapout(struct thread *td)
184236991Simp{
185129198Scognet}
186129198Scognet
187129198Scognet/*
188129198Scognet * Detatch mapped page and release resources back to the system.
189129198Scognet */
190129198Scognetvoid
191129198Scognetsf_buf_free(struct sf_buf *sf)
192129198Scognet{
193266175Sian
194129198Scognet	 mtx_lock(&sf_buf_lock);
195129198Scognet	 sf->ref_count--;
196129198Scognet	 if (sf->ref_count == 0) {
197129198Scognet		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
198129198Scognet		 nsfbufsused--;
199205028Sraj		 pmap_kremove(sf->kva);
200205028Sraj		 sf->m = NULL;
201205028Sraj		 LIST_REMOVE(sf, list_entry);
202129198Scognet		 if (sf_buf_alloc_want > 0)
203217561Skib			 wakeup(&sf_buf_freelist);
204129198Scognet	 }
205236991Simp	 mtx_unlock(&sf_buf_lock);
206129198Scognet}
207129198Scognet
208129198Scognet/*
209161105Scognet * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
210161105Scognet */
211129198Scognetstatic void
212129198Scognetsf_buf_init(void *arg)
213236991Simp{
214129198Scognet	struct sf_buf *sf_bufs;
215129198Scognet	vm_offset_t sf_base;
216129198Scognet	int i;
217236991Simp
218131837Scognet	nsfbufs = NSFBUFS;
219131837Scognet	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
220131837Scognet
221129198Scognet	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
222129198Scognet	TAILQ_INIT(&sf_buf_freelist);
223254025Sjeff	sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
224129198Scognet	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
225129198Scognet	    M_NOWAIT | M_ZERO);
226129198Scognet	for (i = 0; i < nsfbufs; i++) {
227129198Scognet		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
228129198Scognet		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
229129198Scognet	}
230236991Simp	sf_buf_alloc_want = 0;
231129198Scognet	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
232129198Scognet}
233129198Scognet
234129198Scognet/*
235129198Scognet * Get an sf_buf from the freelist. Will block if none are available.
236129198Scognet */
237129198Scognetstruct sf_buf *
238137372Salcsf_buf_alloc(struct vm_page *m, int flags)
239129198Scognet{
240129198Scognet	struct sf_head *hash_list;
241129198Scognet	struct sf_buf *sf;
242129198Scognet	int error;
243129198Scognet
244129198Scognet	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
245129198Scognet	mtx_lock(&sf_buf_lock);
246129198Scognet	LIST_FOREACH(sf, hash_list, list_entry) {
247129198Scognet		if (sf->m == m) {
248129198Scognet			sf->ref_count++;
249129198Scognet			if (sf->ref_count == 1) {
250129198Scognet				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
251129198Scognet				nsfbufsused++;
252129198Scognet				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
253129198Scognet			}
254129198Scognet			goto done;
255129198Scognet		}
256129198Scognet	}
257129198Scognet	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
258137372Salc		if (flags & SFB_NOWAIT)
259137372Salc			goto done;
260129198Scognet		sf_buf_alloc_want++;
261253351Sae		SFSTAT_INC(sf_allocwait);
262137372Salc		error = msleep(&sf_buf_freelist, &sf_buf_lock,
263137372Salc		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
264129198Scognet		sf_buf_alloc_want--;
265129198Scognet
266129198Scognet
267129198Scognet		/*
268236991Simp		 * If we got a signal, don't risk going back to sleep.
269129198Scognet		 */
270129198Scognet		if (error)
271129198Scognet			goto done;
272129198Scognet	}
273129198Scognet	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
274129198Scognet	if (sf->m != NULL)
275129198Scognet		LIST_REMOVE(sf, list_entry);
276129198Scognet	LIST_INSERT_HEAD(hash_list, sf, list_entry);
277129198Scognet	sf->ref_count = 1;
278129198Scognet	sf->m = m;
279129198Scognet	nsfbufsused++;
280129198Scognet	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
281150868Scognet	pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
282129198Scognetdone:
283129198Scognet	mtx_unlock(&sf_buf_lock);
284129198Scognet	return (sf);
285129198Scognet}
286129198Scognet
287199135Skibvoid
288199135Skibcpu_set_syscall_retval(struct thread *td, int error)
289199135Skib{
290259329Sian	struct trapframe *frame;
291199135Skib	int fixup;
292199135Skib#ifdef __ARMEB__
293266160Sian	u_int call;
294199135Skib#endif
295199135Skib
296199135Skib	frame = td->td_frame;
297199135Skib	fixup = 0;
298199135Skib
299199135Skib#ifdef __ARMEB__
300266160Sian	/*
301266160Sian	 * __syscall returns an off_t while most other syscalls return an
302266160Sian	 * int. As an off_t is 64-bits and an int is 32-bits we need to
303266160Sian	 * place the returned data into r1. As the lseek and frerebsd6_lseek
304266160Sian	 * syscalls also return an off_t they do not need this fixup.
305266160Sian	 */
306266160Sian#ifdef __ARM_EABI__
307266160Sian	call = frame->tf_r7;
308266160Sian#else
309266160Sian	call = *(u_int32_t *)(frame->tf_pc - INSN_SIZE) & 0x000fffff;
310266160Sian#endif
311266160Sian	if (call == SYS___syscall) {
312199135Skib		register_t *ap = &frame->tf_r0;
313199135Skib		register_t code = ap[_QUAD_LOWWORD];
314199135Skib		if (td->td_proc->p_sysent->sv_mask)
315199135Skib			code &= td->td_proc->p_sysent->sv_mask;
316199135Skib		fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek)
317199135Skib		    ? 1 : 0;
318199135Skib	}
319199135Skib#endif
320199135Skib
321199135Skib	switch (error) {
322199135Skib	case 0:
323199135Skib		if (fixup) {
324199135Skib			frame->tf_r0 = 0;
325199135Skib			frame->tf_r1 = td->td_retval[0];
326199135Skib		} else {
327199135Skib			frame->tf_r0 = td->td_retval[0];
328199135Skib			frame->tf_r1 = td->td_retval[1];
329199135Skib		}
330199135Skib		frame->tf_spsr &= ~PSR_C_bit;   /* carry bit */
331199135Skib		break;
332199135Skib	case ERESTART:
333199135Skib		/*
334199135Skib		 * Reconstruct the pc to point at the swi.
335199135Skib		 */
336199135Skib		frame->tf_pc -= INSN_SIZE;
337199135Skib		break;
338199135Skib	case EJUSTRETURN:
339199135Skib		/* nothing to do */
340199135Skib		break;
341199135Skib	default:
342199135Skib		frame->tf_r0 = error;
343199135Skib		frame->tf_spsr |= PSR_C_bit;    /* carry bit */
344199135Skib		break;
345199135Skib	}
346199135Skib}
347199135Skib
348129198Scognet/*
349129198Scognet * Initialize machine state (pcb and trap frame) for a new thread about to
350236991Simp * upcall. Put enough state in the new thread's PCB to get it to go back
351129198Scognet * userret(), where we can intercept it again to set the return (upcall)
352129198Scognet * Address and stack, along with those from upcals that are from other sources
353129198Scognet * such as those generated in thread_userret() itself.
354129198Scognet */
355129198Scognetvoid
356129198Scognetcpu_set_upcall(struct thread *td, struct thread *td0)
357129198Scognet{
358137214Scognet	struct trapframe *tf;
359137214Scognet	struct switchframe *sf;
360137214Scognet
361137214Scognet	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
362137214Scognet	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
363137214Scognet	tf = td->td_frame;
364137214Scognet	sf = (struct switchframe *)tf - 1;
365137214Scognet	sf->sf_r4 = (u_int)fork_return;
366137214Scognet	sf->sf_r5 = (u_int)td;
367137214Scognet	sf->sf_pc = (u_int)fork_trampoline;
368137214Scognet	tf->tf_spsr &= ~PSR_C_bit;
369137214Scognet	tf->tf_r0 = 0;
370137214Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
371247864Sandrew	KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
372247864Sandrew	    ("cpu_set_upcall: Incorrect stack alignment"));
373144637Sjhb
374170305Sjeff	/* Setup to release spin count in fork_exit(). */
375144637Sjhb	td->td_md.md_spinlock_count = 1;
376144637Sjhb	td->td_md.md_saved_cspr = 0;
377129198Scognet}
378129198Scognet
379129198Scognet/*
380129198Scognet * Set that machine state for performing an upcall that has to
381129198Scognet * be done in thread_userret() so that those upcalls generated
382129198Scognet * in thread_userret() itself can be done as well.
383129198Scognet */
384129198Scognetvoid
385145433Sdavidxucpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
386145433Sdavidxu	stack_t *stack)
387129198Scognet{
388137214Scognet	struct trapframe *tf = td->td_frame;
389137214Scognet
390246318Sandrew	tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size
391246318Sandrew	    - sizeof(struct trapframe));
392145433Sdavidxu	tf->tf_pc = (int)entry;
393145433Sdavidxu	tf->tf_r0 = (int)arg;
394137214Scognet	tf->tf_spsr = PSR_USR32_MODE;
395129198Scognet}
396129198Scognet
397147889Sdavidxuint
398145433Sdavidxucpu_set_user_tls(struct thread *td, void *tls_base)
399145433Sdavidxu{
400145433Sdavidxu
401239268Sgonzo	td->td_md.md_tp = (register_t)tls_base;
402239268Sgonzo	if (td == curthread) {
403145433Sdavidxu		critical_enter();
404239268Sgonzo#ifdef ARM_TP_ADDRESS
405218310Simp		*(register_t *)ARM_TP_ADDRESS = (register_t)tls_base;
406239268Sgonzo#else
407239268Sgonzo		set_tls((void *)tls_base);
408239268Sgonzo#endif
409145433Sdavidxu		critical_exit();
410145433Sdavidxu	}
411147889Sdavidxu	return (0);
412145433Sdavidxu}
413145433Sdavidxu
414145433Sdavidxuvoid
415129198Scognetcpu_thread_exit(struct thread *td)
416129198Scognet{
417129198Scognet}
418129198Scognet
419129198Scognetvoid
420173615Smarcelcpu_thread_alloc(struct thread *td)
421129198Scognet{
422236991Simp	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
423129198Scognet	    PAGE_SIZE) - 1;
424245942Sandrew	/*
425245942Sandrew	 * Ensure td_frame is aligned to an 8 byte boundary as it will be
426245942Sandrew	 * placed into the stack pointer which must be 8 byte aligned in
427245942Sandrew	 * the ARM EABI.
428245942Sandrew	 */
429246318Sandrew	td->td_frame = (struct trapframe *)STACKALIGN((u_int)td->td_kstack +
430246601Skientzle	    USPACE_SVC_STACK_TOP - sizeof(struct pcb) -
431246601Skientzle	    sizeof(struct trapframe));
432137214Scognet#ifdef __XSCALE__
433171622Scognet#ifndef CPU_XSCALE_CORE3
434137214Scognet	pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
435171622Scognet#endif
436236991Simp#endif
437129198Scognet}
438173615Smarcel
439129198Scognetvoid
440173615Smarcelcpu_thread_free(struct thread *td)
441173615Smarcel{
442173615Smarcel}
443173615Smarcel
444173615Smarcelvoid
445129198Scognetcpu_thread_clean(struct thread *td)
446129198Scognet{
447129198Scognet}
448129198Scognet
449129198Scognet/*
450129198Scognet * Intercept the return address from a freshly forked process that has NOT
451129198Scognet * been scheduled yet.
452129198Scognet *
453129198Scognet * This is needed to make kernel threads stay in kernel mode.
454129198Scognet */
455129198Scognetvoid
456129198Scognetcpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
457129198Scognet{
458129198Scognet	struct switchframe *sf;
459129198Scognet	struct trapframe *tf;
460129198Scognet
461129198Scognet	tf = td->td_frame;
462129198Scognet	sf = (struct switchframe *)tf - 1;
463129198Scognet	sf->sf_r4 = (u_int)func;
464129198Scognet	sf->sf_r5 = (u_int)arg;
465129198Scognet	td->td_pcb->un_32.pcb32_sp = (u_int)sf;
466247864Sandrew	KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
467247864Sandrew	    ("cpu_set_fork_handler: Incorrect stack alignment"));
468129198Scognet}
469129198Scognet
470129198Scognet/*
471129198Scognet * Software interrupt handler for queued VM system processing.
472236991Simp */
473236991Simpvoid
474129198Scognetswi_vm(void *dummy)
475129198Scognet{
476166063Scognet
477166063Scognet	if (busdma_swi_pending)
478166063Scognet		busdma_swi();
479129198Scognet}
480129198Scognet
481129198Scognetvoid
482129198Scognetcpu_exit(struct thread *td)
483129198Scognet{
484129198Scognet}
485147114Scognet
486