cpu_switch.S revision 271999
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: stable/10/sys/amd64/amd64/cpu_switch.S 271999 2014-09-22 20:34:36Z jhb $
34 */
35
36#include <machine/asmacros.h>
37#include <machine/specialreg.h>
38
39#include "assym.s"
40#include "opt_sched.h"
41
42/*****************************************************************************/
43/* Scheduling                                                                */
44/*****************************************************************************/
45
46	.text
47
48#ifdef SMP
49#define LK	lock ;
50#else
51#define LK
52#endif
53
54#if defined(SCHED_ULE) && defined(SMP)
55#define	SETLK	xchgq
56#else
57#define	SETLK	movq
58#endif
59
60/*
61 * cpu_throw()
62 *
63 * This is the second half of cpu_switch(). It is used when the current
64 * thread is either a dummy or slated to die, and we no longer care
65 * about its state.  This is only a slight optimization and is probably
66 * not worth it anymore.  Note that we need to clear the pm_active bits so
67 * we do need the old proc if it still exists.
68 * %rdi = oldtd
69 * %rsi = newtd
70 */
71ENTRY(cpu_throw)
72	movl	PCPU(CPUID),%eax
73	testq	%rdi,%rdi
74	jz	1f
75	/* release bit from old pm_active */
76	movq	PCPU(CURPMAP),%rdx
77	LK btrl	%eax,PM_ACTIVE(%rdx)		/* clear old */
781:
79	movq	TD_PCB(%rsi),%r8		/* newtd->td_pcb */
80	movq	PCB_CR3(%r8),%rcx		/* new address space */
81	jmp	swact
82END(cpu_throw)
83
84/*
85 * cpu_switch(old, new, mtx)
86 *
87 * Save the current thread state, then select the next thread to run
88 * and load its state.
89 * %rdi = oldtd
90 * %rsi = newtd
91 * %rdx = mtx
92 */
93ENTRY(cpu_switch)
94	/* Switch to new thread.  First, save context. */
95	movq	TD_PCB(%rdi),%r8
96	orl	$PCB_FULL_IRET,PCB_FLAGS(%r8)
97
98	movq	(%rsp),%rax			/* Hardware registers */
99	movq	%r15,PCB_R15(%r8)
100	movq	%r14,PCB_R14(%r8)
101	movq	%r13,PCB_R13(%r8)
102	movq	%r12,PCB_R12(%r8)
103	movq	%rbp,PCB_RBP(%r8)
104	movq	%rsp,PCB_RSP(%r8)
105	movq	%rbx,PCB_RBX(%r8)
106	movq	%rax,PCB_RIP(%r8)
107
108	testl	$PCB_DBREGS,PCB_FLAGS(%r8)
109	jnz	store_dr			/* static predict not taken */
110done_store_dr:
111
112	/* have we used fp, and need a save? */
113	cmpq	%rdi,PCPU(FPCURTHREAD)
114	jne	3f
115	movq	PCB_SAVEFPU(%r8),%r8
116	clts
117	cmpl	$0,use_xsave
118	jne	1f
119	fxsave	(%r8)
120	jmp	2f
1211:	movq	%rdx,%rcx
122	movl	xsave_mask,%eax
123	movl	xsave_mask+4,%edx
124	.globl	ctx_switch_xsave
125ctx_switch_xsave:
126	/* This is patched to xsaveopt if supported, see fpuinit_bsp1() */
127	xsave	(%r8)
128	movq	%rcx,%rdx
1292:	smsw	%ax
130	orb	$CR0_TS,%al
131	lmsw	%ax
132	xorl	%eax,%eax
133	movq	%rax,PCPU(FPCURTHREAD)
1343:
135
136	/* Save is done.  Now fire up new thread. Leave old vmspace. */
137	movq	TD_PCB(%rsi),%r8
138
139	/* switch address space */
140	movq	PCB_CR3(%r8),%rcx
141	movq	%cr3,%rax
142	cmpq	%rcx,%rax			/* Same address space? */
143	jne	swinact
144	SETLK	%rdx, TD_LOCK(%rdi)		/* Release the old thread */
145	jmp	sw1
146swinact:
147	movl	PCPU(CPUID),%eax
148	/* Release bit from old pmap->pm_active */
149	movq	PCPU(CURPMAP),%r12
150	LK btrl	%eax,PM_ACTIVE(%r12)		/* clear old */
151	SETLK	%rdx,TD_LOCK(%rdi)		/* Release the old thread */
152swact:
153	/* Set bit in new pmap->pm_active */
154	movq	TD_PROC(%rsi),%rdx		/* newproc */
155	movq	P_VMSPACE(%rdx), %rdx
156	addq	$VM_PMAP,%rdx
157	cmpl	$-1,PM_PCID(%rdx)
158	je	1f
159	LK btsl	%eax,PM_SAVE(%rdx)
160	jnc	1f
161	btsq	$63,%rcx			/* CR3_PCID_SAVE */
162	incq	PCPU(PM_SAVE_CNT)
1631:
164	movq	%rcx,%cr3			/* new address space */
165	LK btsl	%eax,PM_ACTIVE(%rdx)		/* set new */
166	movq	%rdx,PCPU(CURPMAP)
167
168	/*
169	 * We might lose the race and other CPU might have changed
170	 * the pmap after we set our bit in pmap->pm_save.  Recheck.
171	 * Reload %cr3 with CR3_PCID_SAVE bit cleared if pmap was
172	 * modified, causing TLB flush for this pcid.
173	 */
174	btrq	$63,%rcx
175	jnc	1f
176	LK btsl	%eax,PM_SAVE(%rdx)
177	jc	1f
178	decq	PCPU(PM_SAVE_CNT)
179	movq	%rcx,%cr3
1801:
181
182sw1:
183#if defined(SCHED_ULE) && defined(SMP)
184	/* Wait for the new thread to become unblocked */
185	movq	$blocked_lock, %rdx
1861:
187	movq	TD_LOCK(%rsi),%rcx
188	cmpq	%rcx, %rdx
189	pause
190	je	1b
191#endif
192	/*
193	 * At this point, we've switched address spaces and are ready
194	 * to load up the rest of the next context.
195	 */
196
197	/* Skip loading user fsbase/gsbase for kthreads */
198	testl	$TDP_KTHREAD,TD_PFLAGS(%rsi)
199	jnz	do_kthread
200
201	/*
202	 * Load ldt register
203	 */
204	movq	TD_PROC(%rsi),%rcx
205	cmpq	$0, P_MD+MD_LDT(%rcx)
206	jne	do_ldt
207	xorl	%eax,%eax
208ld_ldt:	lldt	%ax
209
210	/* Restore fs base in GDT */
211	movl	PCB_FSBASE(%r8),%eax
212	movq	PCPU(FS32P),%rdx
213	movw	%ax,2(%rdx)
214	shrl	$16,%eax
215	movb	%al,4(%rdx)
216	shrl	$8,%eax
217	movb	%al,7(%rdx)
218
219	/* Restore gs base in GDT */
220	movl	PCB_GSBASE(%r8),%eax
221	movq	PCPU(GS32P),%rdx
222	movw	%ax,2(%rdx)
223	shrl	$16,%eax
224	movb	%al,4(%rdx)
225	shrl	$8,%eax
226	movb	%al,7(%rdx)
227
228do_kthread:
229	/* Do we need to reload tss ? */
230	movq	PCPU(TSSP),%rax
231	movq	PCB_TSSP(%r8),%rdx
232	testq	%rdx,%rdx
233	cmovzq	PCPU(COMMONTSSP),%rdx
234	cmpq	%rax,%rdx
235	jne	do_tss
236done_tss:
237	movq	%r8,PCPU(RSP0)
238	movq	%r8,PCPU(CURPCB)
239	/* Update the TSS_RSP0 pointer for the next interrupt */
240	movq	%r8,COMMON_TSS_RSP0(%rdx)
241	movq	%rsi,PCPU(CURTHREAD)		/* into next thread */
242
243	/* Test if debug registers should be restored. */
244	testl	$PCB_DBREGS,PCB_FLAGS(%r8)
245	jnz	load_dr				/* static predict not taken */
246done_load_dr:
247
248	/* Restore context. */
249	movq	PCB_R15(%r8),%r15
250	movq	PCB_R14(%r8),%r14
251	movq	PCB_R13(%r8),%r13
252	movq	PCB_R12(%r8),%r12
253	movq	PCB_RBP(%r8),%rbp
254	movq	PCB_RSP(%r8),%rsp
255	movq	PCB_RBX(%r8),%rbx
256	movq	PCB_RIP(%r8),%rax
257	movq	%rax,(%rsp)
258	ret
259
260	/*
261	 * We order these strangely for several reasons.
262	 * 1: I wanted to use static branch prediction hints
263	 * 2: Most athlon64/opteron cpus don't have them.  They define
264	 *    a forward branch as 'predict not taken'.  Intel cores have
265	 *    the 'rep' prefix to invert this.
266	 * So, to make it work on both forms of cpu we do the detour.
267	 * We use jumps rather than call in order to avoid the stack.
268	 */
269
270store_dr:
271	movq	%dr7,%rax			/* yes, do the save */
272	movq	%dr0,%r15
273	movq	%dr1,%r14
274	movq	%dr2,%r13
275	movq	%dr3,%r12
276	movq	%dr6,%r11
277	movq	%r15,PCB_DR0(%r8)
278	movq	%r14,PCB_DR1(%r8)
279	movq	%r13,PCB_DR2(%r8)
280	movq	%r12,PCB_DR3(%r8)
281	movq	%r11,PCB_DR6(%r8)
282	movq	%rax,PCB_DR7(%r8)
283	andq	$0x0000fc00, %rax		/* disable all watchpoints */
284	movq	%rax,%dr7
285	jmp	done_store_dr
286
287load_dr:
288	movq	%dr7,%rax
289	movq	PCB_DR0(%r8),%r15
290	movq	PCB_DR1(%r8),%r14
291	movq	PCB_DR2(%r8),%r13
292	movq	PCB_DR3(%r8),%r12
293	movq	PCB_DR6(%r8),%r11
294	movq	PCB_DR7(%r8),%rcx
295	movq	%r15,%dr0
296	movq	%r14,%dr1
297	/* Preserve reserved bits in %dr7 */
298	andq	$0x0000fc00,%rax
299	andq	$~0x0000fc00,%rcx
300	movq	%r13,%dr2
301	movq	%r12,%dr3
302	orq	%rcx,%rax
303	movq	%r11,%dr6
304	movq	%rax,%dr7
305	jmp	done_load_dr
306
307do_tss:	movq	%rdx,PCPU(TSSP)
308	movq	%rdx,%rcx
309	movq	PCPU(TSS),%rax
310	movw	%cx,2(%rax)
311	shrq	$16,%rcx
312	movb	%cl,4(%rax)
313	shrq	$8,%rcx
314	movb	%cl,7(%rax)
315	shrq	$8,%rcx
316	movl	%ecx,8(%rax)
317	movb	$0x89,5(%rax)	/* unset busy */
318	movl	$TSSSEL,%eax
319	ltr	%ax
320	jmp	done_tss
321
322do_ldt:	movq	PCPU(LDT),%rax
323	movq	P_MD+MD_LDT_SD(%rcx),%rdx
324	movq	%rdx,(%rax)
325	movq	P_MD+MD_LDT_SD+8(%rcx),%rdx
326	movq	%rdx,8(%rax)
327	movl	$LDTSEL,%eax
328	jmp	ld_ldt
329END(cpu_switch)
330
331/*
332 * savectx(pcb)
333 * Update pcb, saving current processor state.
334 */
335ENTRY(savectx)
336	/* Save caller's return address. */
337	movq	(%rsp),%rax
338	movq	%rax,PCB_RIP(%rdi)
339
340	movq	%rbx,PCB_RBX(%rdi)
341	movq	%rsp,PCB_RSP(%rdi)
342	movq	%rbp,PCB_RBP(%rdi)
343	movq	%r12,PCB_R12(%rdi)
344	movq	%r13,PCB_R13(%rdi)
345	movq	%r14,PCB_R14(%rdi)
346	movq	%r15,PCB_R15(%rdi)
347
348	movq	%cr0,%rax
349	movq	%rax,PCB_CR0(%rdi)
350	movq	%cr2,%rax
351	movq	%rax,PCB_CR2(%rdi)
352	movq	%cr3,%rax
353	movq	%rax,PCB_CR3(%rdi)
354	movq	%cr4,%rax
355	movq	%rax,PCB_CR4(%rdi)
356
357	movq	%dr0,%rax
358	movq	%rax,PCB_DR0(%rdi)
359	movq	%dr1,%rax
360	movq	%rax,PCB_DR1(%rdi)
361	movq	%dr2,%rax
362	movq	%rax,PCB_DR2(%rdi)
363	movq	%dr3,%rax
364	movq	%rax,PCB_DR3(%rdi)
365	movq	%dr6,%rax
366	movq	%rax,PCB_DR6(%rdi)
367	movq	%dr7,%rax
368	movq	%rax,PCB_DR7(%rdi)
369
370	movl	$MSR_FSBASE,%ecx
371	rdmsr
372	movl	%eax,PCB_FSBASE(%rdi)
373	movl	%edx,PCB_FSBASE+4(%rdi)
374	movl	$MSR_GSBASE,%ecx
375	rdmsr
376	movl	%eax,PCB_GSBASE(%rdi)
377	movl	%edx,PCB_GSBASE+4(%rdi)
378	movl	$MSR_KGSBASE,%ecx
379	rdmsr
380	movl	%eax,PCB_KGSBASE(%rdi)
381	movl	%edx,PCB_KGSBASE+4(%rdi)
382	movl	$MSR_EFER,%ecx
383	rdmsr
384	movl	%eax,PCB_EFER(%rdi)
385	movl	%edx,PCB_EFER+4(%rdi)
386	movl	$MSR_STAR,%ecx
387	rdmsr
388	movl	%eax,PCB_STAR(%rdi)
389	movl	%edx,PCB_STAR+4(%rdi)
390	movl	$MSR_LSTAR,%ecx
391	rdmsr
392	movl	%eax,PCB_LSTAR(%rdi)
393	movl	%edx,PCB_LSTAR+4(%rdi)
394	movl	$MSR_CSTAR,%ecx
395	rdmsr
396	movl	%eax,PCB_CSTAR(%rdi)
397	movl	%edx,PCB_CSTAR+4(%rdi)
398	movl	$MSR_SF_MASK,%ecx
399	rdmsr
400	movl	%eax,PCB_SFMASK(%rdi)
401	movl	%edx,PCB_SFMASK+4(%rdi)
402
403	sgdt	PCB_GDT(%rdi)
404	sidt	PCB_IDT(%rdi)
405	sldt	PCB_LDT(%rdi)
406	str	PCB_TR(%rdi)
407
408	movl	$1,%eax
409	ret
410END(savectx)
411
412/*
413 * resumectx(pcb)
414 * Resuming processor state from pcb.
415 */
416ENTRY(resumectx)
417	/* Switch to KPML4phys. */
418	movq	KPML4phys,%rax
419	movq	%rax,%cr3
420
421	/* Force kernel segment registers. */
422	movl	$KDSEL,%eax
423	movw	%ax,%ds
424	movw	%ax,%es
425	movw	%ax,%ss
426	movl	$KUF32SEL,%eax
427	movw	%ax,%fs
428	movl	$KUG32SEL,%eax
429	movw	%ax,%gs
430
431	movl	$MSR_FSBASE,%ecx
432	movl	PCB_FSBASE(%rdi),%eax
433	movl	4 + PCB_FSBASE(%rdi),%edx
434	wrmsr
435	movl	$MSR_GSBASE,%ecx
436	movl	PCB_GSBASE(%rdi),%eax
437	movl	4 + PCB_GSBASE(%rdi),%edx
438	wrmsr
439	movl	$MSR_KGSBASE,%ecx
440	movl	PCB_KGSBASE(%rdi),%eax
441	movl	4 + PCB_KGSBASE(%rdi),%edx
442	wrmsr
443
444	/* Restore EFER. */
445	movl	$MSR_EFER,%ecx
446	movl	PCB_EFER(%rdi),%eax
447	wrmsr
448
449	/* Restore fast syscall stuff. */
450	movl	$MSR_STAR,%ecx
451	movl	PCB_STAR(%rdi),%eax
452	movl	4 + PCB_STAR(%rdi),%edx
453	wrmsr
454	movl	$MSR_LSTAR,%ecx
455	movl	PCB_LSTAR(%rdi),%eax
456	movl	4 + PCB_LSTAR(%rdi),%edx
457	wrmsr
458	movl	$MSR_CSTAR,%ecx
459	movl	PCB_CSTAR(%rdi),%eax
460	movl	4 + PCB_CSTAR(%rdi),%edx
461	wrmsr
462	movl	$MSR_SF_MASK,%ecx
463	movl	PCB_SFMASK(%rdi),%eax
464	wrmsr
465
466	/* Restore CR0, CR2, CR4 and CR3. */
467	movq	PCB_CR0(%rdi),%rax
468	movq	%rax,%cr0
469	movq	PCB_CR2(%rdi),%rax
470	movq	%rax,%cr2
471	movq	PCB_CR4(%rdi),%rax
472	movq	%rax,%cr4
473	movq	PCB_CR3(%rdi),%rax
474	movq	%rax,%cr3
475
476	/* Restore descriptor tables. */
477	lidt	PCB_IDT(%rdi)
478	lldt	PCB_LDT(%rdi)
479
480#define	SDT_SYSTSS	9
481#define	SDT_SYSBSY	11
482
483	/* Clear "task busy" bit and reload TR. */
484	movq	PCPU(TSS),%rax
485	andb	$(~SDT_SYSBSY | SDT_SYSTSS),5(%rax)
486	movw	PCB_TR(%rdi),%ax
487	ltr	%ax
488
489#undef	SDT_SYSTSS
490#undef	SDT_SYSBSY
491
492	/* Restore debug registers. */
493	movq	PCB_DR0(%rdi),%rax
494	movq	%rax,%dr0
495	movq	PCB_DR1(%rdi),%rax
496	movq	%rax,%dr1
497	movq	PCB_DR2(%rdi),%rax
498	movq	%rax,%dr2
499	movq	PCB_DR3(%rdi),%rax
500	movq	%rax,%dr3
501	movq	PCB_DR6(%rdi),%rax
502	movq	%rax,%dr6
503	movq	PCB_DR7(%rdi),%rax
504	movq	%rax,%dr7
505
506	/* Restore other callee saved registers. */
507	movq	PCB_R15(%rdi),%r15
508	movq	PCB_R14(%rdi),%r14
509	movq	PCB_R13(%rdi),%r13
510	movq	PCB_R12(%rdi),%r12
511	movq	PCB_RBP(%rdi),%rbp
512	movq	PCB_RSP(%rdi),%rsp
513	movq	PCB_RBX(%rdi),%rbx
514
515	/* Restore return address. */
516	movq	PCB_RIP(%rdi),%rax
517	movq	%rax,(%rsp)
518
519	xorl	%eax,%eax
520	ret
521END(resumectx)
522