support.S revision 274648
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: stable/10/sys/amd64/amd64/support.S 274648 2014-11-18 12:53:32Z kib $
31 */
32
33#include "opt_ddb.h"
34
35#include <machine/asmacros.h>
36#include <machine/intr_machdep.h>
37#include <machine/pmap.h>
38
39#include "assym.s"
40
41	.text
42
43/*
44 * bcopy family
45 * void bzero(void *buf, u_int len)
46 */
47
48/* done */
49ENTRY(bzero)
50	movq	%rsi,%rcx
51	xorl	%eax,%eax
52	shrq	$3,%rcx
53	cld
54	rep
55	stosq
56	movq	%rsi,%rcx
57	andq	$7,%rcx
58	rep
59	stosb
60	ret
61END(bzero)
62
63/* Address: %rdi */
64ENTRY(pagezero)
65	movq	$-PAGE_SIZE,%rdx
66	subq	%rdx,%rdi
67	xorl	%eax,%eax
681:
69	movnti	%rax,(%rdi,%rdx)
70	movnti	%rax,8(%rdi,%rdx)
71	movnti	%rax,16(%rdi,%rdx)
72	movnti	%rax,24(%rdi,%rdx)
73	addq	$32,%rdx
74	jne	1b
75	sfence
76	ret
77END(pagezero)
78
79ENTRY(bcmp)
80	movq	%rdx,%rcx
81	shrq	$3,%rcx
82	cld					/* compare forwards */
83	repe
84	cmpsq
85	jne	1f
86
87	movq	%rdx,%rcx
88	andq	$7,%rcx
89	repe
90	cmpsb
911:
92	setne	%al
93	movsbl	%al,%eax
94	ret
95END(bcmp)
96
97/*
98 * bcopy(src, dst, cnt)
99 *       rdi, rsi, rdx
100 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
101 */
102ENTRY(bcopy)
103	xchgq	%rsi,%rdi
104	movq	%rdx,%rcx
105
106	movq	%rdi,%rax
107	subq	%rsi,%rax
108	cmpq	%rcx,%rax			/* overlapping && src < dst? */
109	jb	1f
110
111	shrq	$3,%rcx				/* copy by 64-bit words */
112	cld					/* nope, copy forwards */
113	rep
114	movsq
115	movq	%rdx,%rcx
116	andq	$7,%rcx				/* any bytes left? */
117	rep
118	movsb
119	ret
120
121	/* ALIGN_TEXT */
1221:
123	addq	%rcx,%rdi			/* copy backwards */
124	addq	%rcx,%rsi
125	decq	%rdi
126	decq	%rsi
127	andq	$7,%rcx				/* any fractional bytes? */
128	std
129	rep
130	movsb
131	movq	%rdx,%rcx			/* copy remainder by 32-bit words */
132	shrq	$3,%rcx
133	subq	$7,%rsi
134	subq	$7,%rdi
135	rep
136	movsq
137	cld
138	ret
139END(bcopy)
140
141/*
142 * Note: memcpy does not support overlapping copies
143 */
144ENTRY(memcpy)
145	movq	%rdx,%rcx
146	shrq	$3,%rcx				/* copy by 64-bit words */
147	cld					/* copy forwards */
148	rep
149	movsq
150	movq	%rdx,%rcx
151	andq	$7,%rcx				/* any bytes left? */
152	rep
153	movsb
154	ret
155END(memcpy)
156
157/*
158 * pagecopy(%rdi=from, %rsi=to)
159 */
160ENTRY(pagecopy)
161	movq	$-PAGE_SIZE,%rax
162	movq	%rax,%rdx
163	subq	%rax,%rdi
164	subq	%rax,%rsi
1651:
166	prefetchnta (%rdi,%rax)
167	addq	$64,%rax
168	jne	1b
1692:
170	movq	(%rdi,%rdx),%rax
171	movnti	%rax,(%rsi,%rdx)
172	movq	8(%rdi,%rdx),%rax
173	movnti	%rax,8(%rsi,%rdx)
174	movq	16(%rdi,%rdx),%rax
175	movnti	%rax,16(%rsi,%rdx)
176	movq	24(%rdi,%rdx),%rax
177	movnti	%rax,24(%rsi,%rdx)
178	addq	$32,%rdx
179	jne	2b
180	sfence
181	ret
182END(pagecopy)
183
184/* fillw(pat, base, cnt) */
185/*       %rdi,%rsi, %rdx */
186ENTRY(fillw)
187	movq	%rdi,%rax
188	movq	%rsi,%rdi
189	movq	%rdx,%rcx
190	cld
191	rep
192	stosw
193	ret
194END(fillw)
195
196/*****************************************************************************/
197/* copyout and fubyte family                                                 */
198/*****************************************************************************/
199/*
200 * Access user memory from inside the kernel. These routines should be
201 * the only places that do this.
202 *
203 * These routines set curpcb->pcb_onfault for the time they execute. When a
204 * protection violation occurs inside the functions, the trap handler
205 * returns to *curpcb->pcb_onfault instead of the function.
206 */
207
208/*
209 * copyout(from_kernel, to_user, len)  - MP SAFE
210 *         %rdi,        %rsi,    %rdx
211 */
212ENTRY(copyout)
213	movq	PCPU(CURPCB),%rax
214	movq	$copyout_fault,PCB_ONFAULT(%rax)
215	testq	%rdx,%rdx			/* anything to do? */
216	jz	done_copyout
217
218	/*
219	 * Check explicitly for non-user addresses.  If 486 write protection
220	 * is being used, this check is essential because we are in kernel
221	 * mode so the h/w does not provide any protection against writing
222	 * kernel addresses.
223	 */
224
225	/*
226	 * First, prevent address wrapping.
227	 */
228	movq	%rsi,%rax
229	addq	%rdx,%rax
230	jc	copyout_fault
231/*
232 * XXX STOP USING VM_MAXUSER_ADDRESS.
233 * It is an end address, not a max, so every time it is used correctly it
234 * looks like there is an off by one error, and of course it caused an off
235 * by one error in several places.
236 */
237	movq	$VM_MAXUSER_ADDRESS,%rcx
238	cmpq	%rcx,%rax
239	ja	copyout_fault
240
241	xchgq	%rdi,%rsi
242	/* bcopy(%rsi, %rdi, %rdx) */
243	movq	%rdx,%rcx
244
245	shrq	$3,%rcx
246	cld
247	rep
248	movsq
249	movb	%dl,%cl
250	andb	$7,%cl
251	rep
252	movsb
253
254done_copyout:
255	xorl	%eax,%eax
256	movq	PCPU(CURPCB),%rdx
257	movq	%rax,PCB_ONFAULT(%rdx)
258	ret
259
260	ALIGN_TEXT
261copyout_fault:
262	movq	PCPU(CURPCB),%rdx
263	movq	$0,PCB_ONFAULT(%rdx)
264	movq	$EFAULT,%rax
265	ret
266END(copyout)
267
268/*
269 * copyin(from_user, to_kernel, len) - MP SAFE
270 *        %rdi,      %rsi,      %rdx
271 */
272ENTRY(copyin)
273	movq	PCPU(CURPCB),%rax
274	movq	$copyin_fault,PCB_ONFAULT(%rax)
275	testq	%rdx,%rdx			/* anything to do? */
276	jz	done_copyin
277
278	/*
279	 * make sure address is valid
280	 */
281	movq	%rdi,%rax
282	addq	%rdx,%rax
283	jc	copyin_fault
284	movq	$VM_MAXUSER_ADDRESS,%rcx
285	cmpq	%rcx,%rax
286	ja	copyin_fault
287
288	xchgq	%rdi,%rsi
289	movq	%rdx,%rcx
290	movb	%cl,%al
291	shrq	$3,%rcx				/* copy longword-wise */
292	cld
293	rep
294	movsq
295	movb	%al,%cl
296	andb	$7,%cl				/* copy remaining bytes */
297	rep
298	movsb
299
300done_copyin:
301	xorl	%eax,%eax
302	movq	PCPU(CURPCB),%rdx
303	movq	%rax,PCB_ONFAULT(%rdx)
304	ret
305
306	ALIGN_TEXT
307copyin_fault:
308	movq	PCPU(CURPCB),%rdx
309	movq	$0,PCB_ONFAULT(%rdx)
310	movq	$EFAULT,%rax
311	ret
312END(copyin)
313
314/*
315 * casueword32.  Compare and set user integer.  Returns -1 on fault,
316 *        0 if access was successful.  Old value is written to *oldp.
317 *        dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
318 */
319ENTRY(casueword32)
320	movq	PCPU(CURPCB),%r8
321	movq	$fusufault,PCB_ONFAULT(%r8)
322
323	movq	$VM_MAXUSER_ADDRESS-4,%rax
324	cmpq	%rax,%rdi			/* verify address is valid */
325	ja	fusufault
326
327	movl	%esi,%eax			/* old */
328#ifdef SMP
329	lock
330#endif
331	cmpxchgl %ecx,(%rdi)			/* new = %ecx */
332
333	/*
334	 * The old value is in %eax.  If the store succeeded it will be the
335	 * value we expected (old) from before the store, otherwise it will
336	 * be the current value.  Save %eax into %esi to prepare the return
337	 * value.
338	 */
339	movl	%eax,%esi
340	xorl	%eax,%eax
341	movq	%rax,PCB_ONFAULT(%r8)
342
343	/*
344	 * Access the oldp after the pcb_onfault is cleared, to correctly
345	 * catch corrupted pointer.
346	 */
347	movl	%esi,(%rdx)			/* oldp = %rdx */
348	ret
349END(casueword32)
350
351/*
352 * casueword.  Compare and set user long.  Returns -1 on fault,
353 *        0 if access was successful.  Old value is written to *oldp.
354 *        dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
355 */
356ENTRY(casueword)
357	movq	PCPU(CURPCB),%r8
358	movq	$fusufault,PCB_ONFAULT(%r8)
359
360	movq	$VM_MAXUSER_ADDRESS-4,%rax
361	cmpq	%rax,%rdi			/* verify address is valid */
362	ja	fusufault
363
364	movq	%rsi,%rax			/* old */
365#ifdef SMP
366	lock
367#endif
368	cmpxchgq %rcx,(%rdi)			/* new = %rcx */
369
370	/*
371	 * The old value is in %rax.  If the store succeeded it will be the
372	 * value we expected (old) from before the store, otherwise it will
373	 * be the current value.
374	 */
375	movq	%rax,%rsi
376	xorl	%eax,%eax
377	movq	%rax,PCB_ONFAULT(%r8)
378	movq	%rsi,(%rdx)
379	ret
380END(casueword)
381
382/*
383 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
384 * byte from user memory.
385 * addr = %rdi, valp = %rsi
386 */
387
388ALTENTRY(fueword64)
389ENTRY(fueword)
390	movq	PCPU(CURPCB),%rcx
391	movq	$fusufault,PCB_ONFAULT(%rcx)
392
393	movq	$VM_MAXUSER_ADDRESS-8,%rax
394	cmpq	%rax,%rdi			/* verify address is valid */
395	ja	fusufault
396
397	xorl	%eax,%eax
398	movq	(%rdi),%r11
399	movq	%rax,PCB_ONFAULT(%rcx)
400	movq	%r11,(%rsi)
401	ret
402END(fuword64)
403END(fuword)
404
405ENTRY(fueword32)
406	movq	PCPU(CURPCB),%rcx
407	movq	$fusufault,PCB_ONFAULT(%rcx)
408
409	movq	$VM_MAXUSER_ADDRESS-4,%rax
410	cmpq	%rax,%rdi			/* verify address is valid */
411	ja	fusufault
412
413	xorl	%eax,%eax
414	movl	(%rdi),%r11d
415	movq	%rax,PCB_ONFAULT(%rcx)
416	movl	%r11d,(%rsi)
417	ret
418END(fueword32)
419
420/*
421 * fuswintr() and suswintr() are specialized variants of fuword16() and
422 * suword16(), respectively.  They are called from the profiling code,
423 * potentially at interrupt time.  If they fail, that's okay; good things
424 * will happen later.  They always fail for now, until the trap code is
425 * able to deal with this.
426 */
427ALTENTRY(suswintr)
428ENTRY(fuswintr)
429	movq	$-1,%rax
430	ret
431END(suswintr)
432END(fuswintr)
433
434ENTRY(fuword16)
435	movq	PCPU(CURPCB),%rcx
436	movq	$fusufault,PCB_ONFAULT(%rcx)
437
438	movq	$VM_MAXUSER_ADDRESS-2,%rax
439	cmpq	%rax,%rdi
440	ja	fusufault
441
442	movzwl	(%rdi),%eax
443	movq	$0,PCB_ONFAULT(%rcx)
444	ret
445END(fuword16)
446
447ENTRY(fubyte)
448	movq	PCPU(CURPCB),%rcx
449	movq	$fusufault,PCB_ONFAULT(%rcx)
450
451	movq	$VM_MAXUSER_ADDRESS-1,%rax
452	cmpq	%rax,%rdi
453	ja	fusufault
454
455	movzbl	(%rdi),%eax
456	movq	$0,PCB_ONFAULT(%rcx)
457	ret
458END(fubyte)
459
460	ALIGN_TEXT
461fusufault:
462	movq	PCPU(CURPCB),%rcx
463	xorl	%eax,%eax
464	movq	%rax,PCB_ONFAULT(%rcx)
465	decq	%rax
466	ret
467
468/*
469 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
470 * user memory.  All these functions are MPSAFE.
471 * addr = %rdi, value = %rsi
472 */
473ALTENTRY(suword64)
474ENTRY(suword)
475	movq	PCPU(CURPCB),%rcx
476	movq	$fusufault,PCB_ONFAULT(%rcx)
477
478	movq	$VM_MAXUSER_ADDRESS-8,%rax
479	cmpq	%rax,%rdi			/* verify address validity */
480	ja	fusufault
481
482	movq	%rsi,(%rdi)
483	xorl	%eax,%eax
484	movq	PCPU(CURPCB),%rcx
485	movq	%rax,PCB_ONFAULT(%rcx)
486	ret
487END(suword64)
488END(suword)
489
490ENTRY(suword32)
491	movq	PCPU(CURPCB),%rcx
492	movq	$fusufault,PCB_ONFAULT(%rcx)
493
494	movq	$VM_MAXUSER_ADDRESS-4,%rax
495	cmpq	%rax,%rdi			/* verify address validity */
496	ja	fusufault
497
498	movl	%esi,(%rdi)
499	xorl	%eax,%eax
500	movq	PCPU(CURPCB),%rcx
501	movq	%rax,PCB_ONFAULT(%rcx)
502	ret
503END(suword32)
504
505ENTRY(suword16)
506	movq	PCPU(CURPCB),%rcx
507	movq	$fusufault,PCB_ONFAULT(%rcx)
508
509	movq	$VM_MAXUSER_ADDRESS-2,%rax
510	cmpq	%rax,%rdi			/* verify address validity */
511	ja	fusufault
512
513	movw	%si,(%rdi)
514	xorl	%eax,%eax
515	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
516	movq	%rax,PCB_ONFAULT(%rcx)
517	ret
518END(suword16)
519
520ENTRY(subyte)
521	movq	PCPU(CURPCB),%rcx
522	movq	$fusufault,PCB_ONFAULT(%rcx)
523
524	movq	$VM_MAXUSER_ADDRESS-1,%rax
525	cmpq	%rax,%rdi			/* verify address validity */
526	ja	fusufault
527
528	movl	%esi,%eax
529	movb	%al,(%rdi)
530	xorl	%eax,%eax
531	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
532	movq	%rax,PCB_ONFAULT(%rcx)
533	ret
534END(subyte)
535
536/*
537 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
538 *           %rdi, %rsi, %rdx, %rcx
539 *
540 *	copy a string from from to to, stop when a 0 character is reached.
541 *	return ENAMETOOLONG if string is longer than maxlen, and
542 *	EFAULT on protection violations. If lencopied is non-zero,
543 *	return the actual length in *lencopied.
544 */
545ENTRY(copyinstr)
546	movq	%rdx,%r8			/* %r8 = maxlen */
547	movq	%rcx,%r9			/* %r9 = *len */
548	xchgq	%rdi,%rsi			/* %rdi = from, %rsi = to */
549	movq	PCPU(CURPCB),%rcx
550	movq	$cpystrflt,PCB_ONFAULT(%rcx)
551
552	movq	$VM_MAXUSER_ADDRESS,%rax
553
554	/* make sure 'from' is within bounds */
555	subq	%rsi,%rax
556	jbe	cpystrflt
557
558	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
559	cmpq	%rdx,%rax
560	jae	1f
561	movq	%rax,%rdx
562	movq	%rax,%r8
5631:
564	incq	%rdx
565	cld
566
5672:
568	decq	%rdx
569	jz	3f
570
571	lodsb
572	stosb
573	orb	%al,%al
574	jnz	2b
575
576	/* Success -- 0 byte reached */
577	decq	%rdx
578	xorl	%eax,%eax
579	jmp	cpystrflt_x
5803:
581	/* rdx is zero - return ENAMETOOLONG or EFAULT */
582	movq	$VM_MAXUSER_ADDRESS,%rax
583	cmpq	%rax,%rsi
584	jae	cpystrflt
5854:
586	movq	$ENAMETOOLONG,%rax
587	jmp	cpystrflt_x
588
589cpystrflt:
590	movq	$EFAULT,%rax
591
592cpystrflt_x:
593	/* set *lencopied and return %eax */
594	movq	PCPU(CURPCB),%rcx
595	movq	$0,PCB_ONFAULT(%rcx)
596
597	testq	%r9,%r9
598	jz	1f
599	subq	%rdx,%r8
600	movq	%r8,(%r9)
6011:
602	ret
603END(copyinstr)
604
605/*
606 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
607 *         %rdi, %rsi, %rdx, %rcx
608 */
609ENTRY(copystr)
610	movq	%rdx,%r8			/* %r8 = maxlen */
611
612	xchgq	%rdi,%rsi
613	incq	%rdx
614	cld
6151:
616	decq	%rdx
617	jz	4f
618	lodsb
619	stosb
620	orb	%al,%al
621	jnz	1b
622
623	/* Success -- 0 byte reached */
624	decq	%rdx
625	xorl	%eax,%eax
626	jmp	6f
6274:
628	/* rdx is zero -- return ENAMETOOLONG */
629	movq	$ENAMETOOLONG,%rax
630
6316:
632
633	testq	%rcx,%rcx
634	jz	7f
635	/* set *lencopied and return %rax */
636	subq	%rdx,%r8
637	movq	%r8,(%rcx)
6387:
639	ret
640END(copystr)
641
642/*
643 * Handling of special amd64 registers and descriptor tables etc
644 * %rdi
645 */
646/* void lgdt(struct region_descriptor *rdp); */
647ENTRY(lgdt)
648	/* reload the descriptor table */
649	lgdt	(%rdi)
650
651	/* flush the prefetch q */
652	jmp	1f
653	nop
6541:
655	movl	$KDSEL,%eax
656	movl	%eax,%ds
657	movl	%eax,%es
658	movl	%eax,%fs	/* Beware, use wrmsr to set 64 bit base */
659	movl	%eax,%gs
660	movl	%eax,%ss
661
662	/* reload code selector by turning return into intersegmental return */
663	popq	%rax
664	pushq	$KCSEL
665	pushq	%rax
666	MEXITCOUNT
667	lretq
668END(lgdt)
669
670/*****************************************************************************/
671/* setjump, longjump                                                         */
672/*****************************************************************************/
673
674ENTRY(setjmp)
675	movq	%rbx,0(%rdi)			/* save rbx */
676	movq	%rsp,8(%rdi)			/* save rsp */
677	movq	%rbp,16(%rdi)			/* save rbp */
678	movq	%r12,24(%rdi)			/* save r12 */
679	movq	%r13,32(%rdi)			/* save r13 */
680	movq	%r14,40(%rdi)			/* save r14 */
681	movq	%r15,48(%rdi)			/* save r15 */
682	movq	0(%rsp),%rdx			/* get rta */
683	movq	%rdx,56(%rdi)			/* save rip */
684	xorl	%eax,%eax			/* return(0); */
685	ret
686END(setjmp)
687
688ENTRY(longjmp)
689	movq	0(%rdi),%rbx			/* restore rbx */
690	movq	8(%rdi),%rsp			/* restore rsp */
691	movq	16(%rdi),%rbp			/* restore rbp */
692	movq	24(%rdi),%r12			/* restore r12 */
693	movq	32(%rdi),%r13			/* restore r13 */
694	movq	40(%rdi),%r14			/* restore r14 */
695	movq	48(%rdi),%r15			/* restore r15 */
696	movq	56(%rdi),%rdx			/* get rta */
697	movq	%rdx,0(%rsp)			/* put in return frame */
698	xorl	%eax,%eax			/* return(1); */
699	incl	%eax
700	ret
701END(longjmp)
702
703/*
704 * Support for reading MSRs in the safe manner.
705 */
706ENTRY(rdmsr_safe)
707/* int rdmsr_safe(u_int msr, uint64_t *data) */
708	movq	PCPU(CURPCB),%r8
709	movq	$msr_onfault,PCB_ONFAULT(%r8)
710	movl	%edi,%ecx
711	rdmsr			/* Read MSR pointed by %ecx. Returns
712				   hi byte in edx, lo in %eax */
713	salq	$32,%rdx	/* sign-shift %rdx left */
714	movl	%eax,%eax	/* zero-extend %eax -> %rax */
715	orq	%rdx,%rax
716	movq	%rax,(%rsi)
717	xorq	%rax,%rax
718	movq	%rax,PCB_ONFAULT(%r8)
719	ret
720
721/*
722 * Support for writing MSRs in the safe manner.
723 */
724ENTRY(wrmsr_safe)
725/* int wrmsr_safe(u_int msr, uint64_t data) */
726	movq	PCPU(CURPCB),%r8
727	movq	$msr_onfault,PCB_ONFAULT(%r8)
728	movl	%edi,%ecx
729	movl	%esi,%eax
730	sarq	$32,%rsi
731	movl	%esi,%edx
732	wrmsr			/* Write MSR pointed by %ecx. Accepts
733				   hi byte in edx, lo in %eax. */
734	xorq	%rax,%rax
735	movq	%rax,PCB_ONFAULT(%r8)
736	ret
737
738/*
739 * MSR operations fault handler
740 */
741	ALIGN_TEXT
742msr_onfault:
743	movq	$0,PCB_ONFAULT(%r8)
744	movl	$EFAULT,%eax
745	ret
746