cpufunc.h revision 212177
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: head/sys/amd64/include/cpufunc.h 212177 2010-09-03 14:25:17Z rdivacky $
31 */
32
33/*
34 * Functions to provide access to special i386 instructions.
35 * This in included in sys/systm.h, and that file should be
36 * used in preference to this.
37 */
38
39#ifndef _MACHINE_CPUFUNC_H_
40#define	_MACHINE_CPUFUNC_H_
41
42#ifndef _SYS_CDEFS_H_
43#error this file needs sys/cdefs.h as a prerequisite
44#endif
45
46struct region_descriptor;
47
48#define readb(va)	(*(volatile u_int8_t *) (va))
49#define readw(va)	(*(volatile u_int16_t *) (va))
50#define readl(va)	(*(volatile u_int32_t *) (va))
51#define readq(va)	(*(volatile u_int64_t *) (va))
52
53#define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
54#define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
55#define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
56#define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
57
58#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
59
60static __inline void
61breakpoint(void)
62{
63	__asm __volatile("int $3");
64}
65
66static __inline u_int
67bsfl(u_int mask)
68{
69	u_int	result;
70
71	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
72	return (result);
73}
74
75static __inline u_long
76bsfq(u_long mask)
77{
78	u_long	result;
79
80	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
81	return (result);
82}
83
84static __inline u_int
85bsrl(u_int mask)
86{
87	u_int	result;
88
89	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
90	return (result);
91}
92
93static __inline u_long
94bsrq(u_long mask)
95{
96	u_long	result;
97
98	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
99	return (result);
100}
101
102static __inline void
103clflush(u_long addr)
104{
105
106	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
107}
108
109static __inline void
110disable_intr(void)
111{
112	__asm __volatile("cli" : : : "memory");
113}
114
115static __inline void
116do_cpuid(u_int ax, u_int *p)
117{
118	__asm __volatile("cpuid"
119			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
120			 :  "0" (ax));
121}
122
123static __inline void
124cpuid_count(u_int ax, u_int cx, u_int *p)
125{
126	__asm __volatile("cpuid"
127			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
128			 :  "0" (ax), "c" (cx));
129}
130
131static __inline void
132enable_intr(void)
133{
134	__asm __volatile("sti");
135}
136
137#ifdef _KERNEL
138
139#define	HAVE_INLINE_FFS
140#define        ffs(x)  __builtin_ffs(x)
141
142#define	HAVE_INLINE_FFSL
143
144static __inline int
145ffsl(long mask)
146{
147	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
148}
149
150#define	HAVE_INLINE_FLS
151
152static __inline int
153fls(int mask)
154{
155	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
156}
157
158#define	HAVE_INLINE_FLSL
159
160static __inline int
161flsl(long mask)
162{
163	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
164}
165
166#endif /* _KERNEL */
167
168static __inline void
169halt(void)
170{
171	__asm __volatile("hlt");
172}
173
174static __inline u_char
175inb(u_int port)
176{
177	u_char	data;
178
179	__asm volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
180	return (data);
181}
182
183static __inline u_int
184inl(u_int port)
185{
186	u_int	data;
187
188	__asm volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
189	return (data);
190}
191
192static __inline void
193insb(u_int port, void *addr, size_t count)
194{
195	__asm __volatile("cld; rep; insb"
196			 : "+D" (addr), "+c" (count)
197			 : "d" (port)
198			 : "memory");
199}
200
201static __inline void
202insw(u_int port, void *addr, size_t count)
203{
204	__asm __volatile("cld; rep; insw"
205			 : "+D" (addr), "+c" (count)
206			 : "d" (port)
207			 : "memory");
208}
209
210static __inline void
211insl(u_int port, void *addr, size_t count)
212{
213	__asm __volatile("cld; rep; insl"
214			 : "+D" (addr), "+c" (count)
215			 : "d" (port)
216			 : "memory");
217}
218
219static __inline void
220invd(void)
221{
222	__asm __volatile("invd");
223}
224
225static __inline u_short
226inw(u_int port)
227{
228	u_short	data;
229
230	__asm volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
231	return (data);
232}
233
234static __inline void
235outb(u_int port, u_char data)
236{
237	__asm volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
238}
239
240static __inline void
241outl(u_int port, u_int data)
242{
243	__asm volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
244}
245
246static __inline void
247outsb(u_int port, const void *addr, size_t count)
248{
249	__asm __volatile("cld; rep; outsb"
250			 : "+S" (addr), "+c" (count)
251			 : "d" (port));
252}
253
254static __inline void
255outsw(u_int port, const void *addr, size_t count)
256{
257	__asm __volatile("cld; rep; outsw"
258			 : "+S" (addr), "+c" (count)
259			 : "d" (port));
260}
261
262static __inline void
263outsl(u_int port, const void *addr, size_t count)
264{
265	__asm __volatile("cld; rep; outsl"
266			 : "+S" (addr), "+c" (count)
267			 : "d" (port));
268}
269
270static __inline void
271outw(u_int port, u_short data)
272{
273	__asm volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
274}
275
276static __inline void
277mfence(void)
278{
279
280	__asm __volatile("mfence" : : : "memory");
281}
282
283static __inline void
284ia32_pause(void)
285{
286	__asm __volatile("pause");
287}
288
289static __inline u_long
290read_rflags(void)
291{
292	u_long	rf;
293
294	__asm __volatile("pushfq; popq %0" : "=r" (rf));
295	return (rf);
296}
297
298static __inline u_int64_t
299rdmsr(u_int msr)
300{
301	u_int32_t low, high;
302
303	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
304	return (low | ((u_int64_t)high << 32));
305}
306
307static __inline u_int64_t
308rdpmc(u_int pmc)
309{
310	u_int32_t low, high;
311
312	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
313	return (low | ((u_int64_t)high << 32));
314}
315
316static __inline u_int64_t
317rdtsc(void)
318{
319	u_int32_t low, high;
320
321	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
322	return (low | ((u_int64_t)high << 32));
323}
324
325static __inline void
326wbinvd(void)
327{
328	__asm __volatile("wbinvd");
329}
330
331static __inline void
332write_rflags(u_long rf)
333{
334	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
335}
336
337static __inline void
338wrmsr(u_int msr, u_int64_t newval)
339{
340	u_int32_t low, high;
341
342	low = newval;
343	high = newval >> 32;
344	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
345}
346
347static __inline void
348load_cr0(u_long data)
349{
350
351	__asm __volatile("movq %0,%%cr0" : : "r" (data));
352}
353
354static __inline u_long
355rcr0(void)
356{
357	u_long	data;
358
359	__asm __volatile("movq %%cr0,%0" : "=r" (data));
360	return (data);
361}
362
363static __inline u_long
364rcr2(void)
365{
366	u_long	data;
367
368	__asm __volatile("movq %%cr2,%0" : "=r" (data));
369	return (data);
370}
371
372static __inline void
373load_cr3(u_long data)
374{
375
376	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
377}
378
379static __inline u_long
380rcr3(void)
381{
382	u_long	data;
383
384	__asm __volatile("movq %%cr3,%0" : "=r" (data));
385	return (data);
386}
387
388static __inline void
389load_cr4(u_long data)
390{
391	__asm __volatile("movq %0,%%cr4" : : "r" (data));
392}
393
394static __inline u_long
395rcr4(void)
396{
397	u_long	data;
398
399	__asm __volatile("movq %%cr4,%0" : "=r" (data));
400	return (data);
401}
402
403/*
404 * Global TLB flush (except for thise for pages marked PG_G)
405 */
406static __inline void
407invltlb(void)
408{
409
410	load_cr3(rcr3());
411}
412
413/*
414 * TLB flush for an individual page (even if it has PG_G).
415 * Only works on 486+ CPUs (i386 does not have PG_G).
416 */
417static __inline void
418invlpg(u_long addr)
419{
420
421	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
422}
423
424static __inline u_short
425rfs(void)
426{
427	u_short sel;
428	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
429	return (sel);
430}
431
432static __inline u_short
433rgs(void)
434{
435	u_short sel;
436	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
437	return (sel);
438}
439
440static __inline u_short
441rss(void)
442{
443	u_short sel;
444	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
445	return (sel);
446}
447
448static __inline void
449load_ds(u_short sel)
450{
451	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
452}
453
454static __inline void
455load_es(u_short sel)
456{
457	__asm __volatile("movw %0,%%es" : : "rm" (sel));
458}
459
460static __inline void
461cpu_monitor(const void *addr, int extensions, int hints)
462{
463	__asm __volatile("monitor;"
464	    : :"a" (addr), "c" (extensions), "d"(hints));
465}
466
467static __inline void
468cpu_mwait(int extensions, int hints)
469{
470	__asm __volatile("mwait;" : :"a" (hints), "c" (extensions));
471}
472
473#ifdef _KERNEL
474/* This is defined in <machine/specialreg.h> but is too painful to get to */
475#ifndef	MSR_FSBASE
476#define	MSR_FSBASE	0xc0000100
477#endif
478static __inline void
479load_fs(u_short sel)
480{
481	/* Preserve the fsbase value across the selector load */
482	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
483	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
484}
485
486#ifndef	MSR_GSBASE
487#define	MSR_GSBASE	0xc0000101
488#endif
489static __inline void
490load_gs(u_short sel)
491{
492	/*
493	 * Preserve the gsbase value across the selector load.
494	 * Note that we have to disable interrupts because the gsbase
495	 * being trashed happens to be the kernel gsbase at the time.
496	 */
497	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
498	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
499}
500#else
501/* Usable by userland */
502static __inline void
503load_fs(u_short sel)
504{
505	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
506}
507
508static __inline void
509load_gs(u_short sel)
510{
511	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
512}
513#endif
514
515static __inline void
516lidt(struct region_descriptor *addr)
517{
518	__asm __volatile("lidt (%0)" : : "r" (addr));
519}
520
521static __inline void
522lldt(u_short sel)
523{
524	__asm __volatile("lldt %0" : : "r" (sel));
525}
526
527static __inline void
528ltr(u_short sel)
529{
530	__asm __volatile("ltr %0" : : "r" (sel));
531}
532
533static __inline u_int64_t
534rdr0(void)
535{
536	u_int64_t data;
537	__asm __volatile("movq %%dr0,%0" : "=r" (data));
538	return (data);
539}
540
541static __inline void
542load_dr0(u_int64_t dr0)
543{
544	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
545}
546
547static __inline u_int64_t
548rdr1(void)
549{
550	u_int64_t data;
551	__asm __volatile("movq %%dr1,%0" : "=r" (data));
552	return (data);
553}
554
555static __inline void
556load_dr1(u_int64_t dr1)
557{
558	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
559}
560
561static __inline u_int64_t
562rdr2(void)
563{
564	u_int64_t data;
565	__asm __volatile("movq %%dr2,%0" : "=r" (data));
566	return (data);
567}
568
569static __inline void
570load_dr2(u_int64_t dr2)
571{
572	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
573}
574
575static __inline u_int64_t
576rdr3(void)
577{
578	u_int64_t data;
579	__asm __volatile("movq %%dr3,%0" : "=r" (data));
580	return (data);
581}
582
583static __inline void
584load_dr3(u_int64_t dr3)
585{
586	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
587}
588
589static __inline u_int64_t
590rdr4(void)
591{
592	u_int64_t data;
593	__asm __volatile("movq %%dr4,%0" : "=r" (data));
594	return (data);
595}
596
597static __inline void
598load_dr4(u_int64_t dr4)
599{
600	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
601}
602
603static __inline u_int64_t
604rdr5(void)
605{
606	u_int64_t data;
607	__asm __volatile("movq %%dr5,%0" : "=r" (data));
608	return (data);
609}
610
611static __inline void
612load_dr5(u_int64_t dr5)
613{
614	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
615}
616
617static __inline u_int64_t
618rdr6(void)
619{
620	u_int64_t data;
621	__asm __volatile("movq %%dr6,%0" : "=r" (data));
622	return (data);
623}
624
625static __inline void
626load_dr6(u_int64_t dr6)
627{
628	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
629}
630
631static __inline u_int64_t
632rdr7(void)
633{
634	u_int64_t data;
635	__asm __volatile("movq %%dr7,%0" : "=r" (data));
636	return (data);
637}
638
639static __inline void
640load_dr7(u_int64_t dr7)
641{
642	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
643}
644
645static __inline register_t
646intr_disable(void)
647{
648	register_t rflags;
649
650	rflags = read_rflags();
651	disable_intr();
652	return (rflags);
653}
654
655static __inline void
656intr_restore(register_t rflags)
657{
658	write_rflags(rflags);
659}
660
661#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
662
663int	breakpoint(void);
664u_int	bsfl(u_int mask);
665u_int	bsrl(u_int mask);
666void	disable_intr(void);
667void	do_cpuid(u_int ax, u_int *p);
668void	enable_intr(void);
669void	halt(void);
670void	ia32_pause(void);
671u_char	inb(u_int port);
672u_int	inl(u_int port);
673void	insb(u_int port, void *addr, size_t count);
674void	insl(u_int port, void *addr, size_t count);
675void	insw(u_int port, void *addr, size_t count);
676register_t	intr_disable(void);
677void	intr_restore(register_t rf);
678void	invd(void);
679void	invlpg(u_int addr);
680void	invltlb(void);
681u_short	inw(u_int port);
682void	lidt(struct region_descriptor *addr);
683void	lldt(u_short sel);
684void	load_cr0(u_long cr0);
685void	load_cr3(u_long cr3);
686void	load_cr4(u_long cr4);
687void	load_dr0(u_int64_t dr0);
688void	load_dr1(u_int64_t dr1);
689void	load_dr2(u_int64_t dr2);
690void	load_dr3(u_int64_t dr3);
691void	load_dr4(u_int64_t dr4);
692void	load_dr5(u_int64_t dr5);
693void	load_dr6(u_int64_t dr6);
694void	load_dr7(u_int64_t dr7);
695void	load_fs(u_short sel);
696void	load_gs(u_short sel);
697void	ltr(u_short sel);
698void	outb(u_int port, u_char data);
699void	outl(u_int port, u_int data);
700void	outsb(u_int port, const void *addr, size_t count);
701void	outsl(u_int port, const void *addr, size_t count);
702void	outsw(u_int port, const void *addr, size_t count);
703void	outw(u_int port, u_short data);
704u_long	rcr0(void);
705u_long	rcr2(void);
706u_long	rcr3(void);
707u_long	rcr4(void);
708u_int64_t rdmsr(u_int msr);
709u_int64_t rdpmc(u_int pmc);
710u_int64_t rdr0(void);
711u_int64_t rdr1(void);
712u_int64_t rdr2(void);
713u_int64_t rdr3(void);
714u_int64_t rdr4(void);
715u_int64_t rdr5(void);
716u_int64_t rdr6(void);
717u_int64_t rdr7(void);
718u_int64_t rdtsc(void);
719u_int	read_rflags(void);
720u_int	rfs(void);
721u_int	rgs(void);
722void	wbinvd(void);
723void	write_rflags(u_int rf);
724void	wrmsr(u_int msr, u_int64_t newval);
725
726#endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
727
728void	reset_dbregs(void);
729
730#ifdef _KERNEL
731int	rdmsr_safe(u_int msr, uint64_t *val);
732int	wrmsr_safe(u_int msr, uint64_t newval);
733#endif
734
735#endif /* !_MACHINE_CPUFUNC_H_ */
736