1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2003 Peter Wemm.
5 * Copyright (c) 1993 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35/*
36 * Functions to provide access to special i386 instructions.
37 * This in included in sys/systm.h, and that file should be
38 * used in preference to this.
39 */
40
41#ifndef _MACHINE_CPUFUNC_H_
42#define	_MACHINE_CPUFUNC_H_
43
44#ifndef _SYS_CDEFS_H_
45#error this file needs sys/cdefs.h as a prerequisite
46#endif
47
48struct region_descriptor;
49
50#define readb(va)	(*(volatile uint8_t *) (va))
51#define readw(va)	(*(volatile uint16_t *) (va))
52#define readl(va)	(*(volatile uint32_t *) (va))
53#define readq(va)	(*(volatile uint64_t *) (va))
54
55#define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56#define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57#define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58#define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59
60#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61
62static __inline void
63breakpoint(void)
64{
65	__asm __volatile("int $3");
66}
67
68static __inline __pure2 u_int
69bsfl(u_int mask)
70{
71	u_int	result;
72
73	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
74	return (result);
75}
76
77static __inline __pure2 u_long
78bsfq(u_long mask)
79{
80	u_long	result;
81
82	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
83	return (result);
84}
85
86static __inline __pure2 u_int
87bsrl(u_int mask)
88{
89	u_int	result;
90
91	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
92	return (result);
93}
94
95static __inline __pure2 u_long
96bsrq(u_long mask)
97{
98	u_long	result;
99
100	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
101	return (result);
102}
103
104static __inline void
105clflush(u_long addr)
106{
107
108	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
109}
110
111static __inline void
112clflushopt(u_long addr)
113{
114
115	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
116}
117
118static __inline void
119clwb(u_long addr)
120{
121
122	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
123}
124
125static __inline void
126clts(void)
127{
128
129	__asm __volatile("clts");
130}
131
132static __inline void
133disable_intr(void)
134{
135	__asm __volatile("cli" : : : "memory");
136}
137
138static __inline void
139do_cpuid(u_int ax, u_int *p)
140{
141	__asm __volatile("cpuid"
142			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
143			 :  "0" (ax));
144}
145
146static __inline void
147cpuid_count(u_int ax, u_int cx, u_int *p)
148{
149	__asm __volatile("cpuid"
150			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
151			 :  "0" (ax), "c" (cx));
152}
153
154static __inline void
155enable_intr(void)
156{
157	__asm __volatile("sti");
158}
159
160#ifdef _KERNEL
161
162#define	HAVE_INLINE_FFS
163#define        ffs(x)  __builtin_ffs(x)
164
165#define	HAVE_INLINE_FFSL
166
167static __inline __pure2 int
168ffsl(long mask)
169{
170
171	return (__builtin_ffsl(mask));
172}
173
174#define	HAVE_INLINE_FFSLL
175
176static __inline __pure2 int
177ffsll(long long mask)
178{
179	return (ffsl((long)mask));
180}
181
182#define	HAVE_INLINE_FLS
183
184static __inline __pure2 int
185fls(int mask)
186{
187	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
188}
189
190#define	HAVE_INLINE_FLSL
191
192static __inline __pure2 int
193flsl(long mask)
194{
195	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
196}
197
198#define	HAVE_INLINE_FLSLL
199
200static __inline __pure2 int
201flsll(long long mask)
202{
203	return (flsl((long)mask));
204}
205
206#endif /* _KERNEL */
207
208static __inline void
209halt(void)
210{
211	__asm __volatile("hlt");
212}
213
214static __inline u_char
215inb(u_int port)
216{
217	u_char	data;
218
219	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
220	return (data);
221}
222
223static __inline u_int
224inl(u_int port)
225{
226	u_int	data;
227
228	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
229	return (data);
230}
231
232static __inline void
233insb(u_int port, void *addr, size_t count)
234{
235	__asm __volatile("rep; insb"
236			 : "+D" (addr), "+c" (count)
237			 : "d" (port)
238			 : "memory");
239}
240
241static __inline void
242insw(u_int port, void *addr, size_t count)
243{
244	__asm __volatile("rep; insw"
245			 : "+D" (addr), "+c" (count)
246			 : "d" (port)
247			 : "memory");
248}
249
250static __inline void
251insl(u_int port, void *addr, size_t count)
252{
253	__asm __volatile("rep; insl"
254			 : "+D" (addr), "+c" (count)
255			 : "d" (port)
256			 : "memory");
257}
258
259static __inline void
260invd(void)
261{
262	__asm __volatile("invd");
263}
264
265static __inline u_short
266inw(u_int port)
267{
268	u_short	data;
269
270	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
271	return (data);
272}
273
274static __inline void
275outb(u_int port, u_char data)
276{
277	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
278}
279
280static __inline void
281outl(u_int port, u_int data)
282{
283	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
284}
285
286static __inline void
287outsb(u_int port, const void *addr, size_t count)
288{
289	__asm __volatile("rep; outsb"
290			 : "+S" (addr), "+c" (count)
291			 : "d" (port));
292}
293
294static __inline void
295outsw(u_int port, const void *addr, size_t count)
296{
297	__asm __volatile("rep; outsw"
298			 : "+S" (addr), "+c" (count)
299			 : "d" (port));
300}
301
302static __inline void
303outsl(u_int port, const void *addr, size_t count)
304{
305	__asm __volatile("rep; outsl"
306			 : "+S" (addr), "+c" (count)
307			 : "d" (port));
308}
309
310static __inline void
311outw(u_int port, u_short data)
312{
313	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
314}
315
316static __inline u_long
317popcntq(u_long mask)
318{
319	u_long result;
320
321	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
322	return (result);
323}
324
325static __inline void
326lfence(void)
327{
328
329	__asm __volatile("lfence" : : : "memory");
330}
331
332static __inline void
333mfence(void)
334{
335
336	__asm __volatile("mfence" : : : "memory");
337}
338
339static __inline void
340sfence(void)
341{
342
343	__asm __volatile("sfence" : : : "memory");
344}
345
346static __inline void
347ia32_pause(void)
348{
349	__asm __volatile("pause");
350}
351
352static __inline u_long
353read_rflags(void)
354{
355	u_long	rf;
356
357	__asm __volatile("pushfq; popq %0" : "=r" (rf));
358	return (rf);
359}
360
361static __inline uint64_t
362rdmsr(u_int msr)
363{
364	uint32_t low, high;
365
366	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
367	return (low | ((uint64_t)high << 32));
368}
369
370static __inline uint32_t
371rdmsr32(u_int msr)
372{
373	uint32_t low;
374
375	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
376	return (low);
377}
378
379static __inline uint64_t
380rdpmc(u_int pmc)
381{
382	uint32_t low, high;
383
384	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
385	return (low | ((uint64_t)high << 32));
386}
387
388static __inline uint64_t
389rdtsc(void)
390{
391	uint32_t low, high;
392
393	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
394	return (low | ((uint64_t)high << 32));
395}
396
397static __inline uint64_t
398rdtscp(void)
399{
400	uint32_t low, high;
401
402	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
403	return (low | ((uint64_t)high << 32));
404}
405
406static __inline uint32_t
407rdtsc32(void)
408{
409	uint32_t rv;
410
411	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
412	return (rv);
413}
414
415static __inline void
416wbinvd(void)
417{
418	__asm __volatile("wbinvd");
419}
420
421static __inline void
422write_rflags(u_long rf)
423{
424	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
425}
426
427static __inline void
428wrmsr(u_int msr, uint64_t newval)
429{
430	uint32_t low, high;
431
432	low = newval;
433	high = newval >> 32;
434	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
435}
436
437static __inline void
438load_cr0(u_long data)
439{
440
441	__asm __volatile("movq %0,%%cr0" : : "r" (data));
442}
443
444static __inline u_long
445rcr0(void)
446{
447	u_long	data;
448
449	__asm __volatile("movq %%cr0,%0" : "=r" (data));
450	return (data);
451}
452
453static __inline u_long
454rcr2(void)
455{
456	u_long	data;
457
458	__asm __volatile("movq %%cr2,%0" : "=r" (data));
459	return (data);
460}
461
462static __inline void
463load_cr3(u_long data)
464{
465
466	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
467}
468
469static __inline u_long
470rcr3(void)
471{
472	u_long	data;
473
474	__asm __volatile("movq %%cr3,%0" : "=r" (data));
475	return (data);
476}
477
478static __inline void
479load_cr4(u_long data)
480{
481	__asm __volatile("movq %0,%%cr4" : : "r" (data));
482}
483
484static __inline u_long
485rcr4(void)
486{
487	u_long	data;
488
489	__asm __volatile("movq %%cr4,%0" : "=r" (data));
490	return (data);
491}
492
493static __inline u_long
494rxcr(u_int reg)
495{
496	u_int low, high;
497
498	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
499	return (low | ((uint64_t)high << 32));
500}
501
502static __inline void
503load_xcr(u_int reg, u_long val)
504{
505	u_int low, high;
506
507	low = val;
508	high = val >> 32;
509	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
510}
511
512/*
513 * Global TLB flush (except for thise for pages marked PG_G)
514 */
515static __inline void
516invltlb(void)
517{
518
519	load_cr3(rcr3());
520}
521
522#ifndef CR4_PGE
523#define	CR4_PGE	0x00000080	/* Page global enable */
524#endif
525
526/*
527 * Perform the guaranteed invalidation of all TLB entries.  This
528 * includes the global entries, and entries in all PCIDs, not only the
529 * current context.  The function works both on non-PCID CPUs and CPUs
530 * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
531 * Operations that Invalidate TLBs and Paging-Structure Caches.
532 */
533static __inline void
534invltlb_glob(void)
535{
536	uint64_t cr4;
537
538	cr4 = rcr4();
539	load_cr4(cr4 & ~CR4_PGE);
540	/*
541	 * Although preemption at this point could be detrimental to
542	 * performance, it would not lead to an error.  PG_G is simply
543	 * ignored if CR4.PGE is clear.  Moreover, in case this block
544	 * is re-entered, the load_cr4() either above or below will
545	 * modify CR4.PGE flushing the TLB.
546	 */
547	load_cr4(cr4 | CR4_PGE);
548}
549
550/*
551 * TLB flush for an individual page (even if it has PG_G).
552 * Only works on 486+ CPUs (i386 does not have PG_G).
553 */
554static __inline void
555invlpg(u_long addr)
556{
557
558	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
559}
560
561#define	INVPCID_ADDR	0
562#define	INVPCID_CTX	1
563#define	INVPCID_CTXGLOB	2
564#define	INVPCID_ALLCTX	3
565
566struct invpcid_descr {
567	uint64_t	pcid:12 __packed;
568	uint64_t	pad:52 __packed;
569	uint64_t	addr;
570} __packed;
571
572static __inline void
573invpcid(struct invpcid_descr *d, int type)
574{
575
576	__asm __volatile("invpcid (%0),%1"
577	    : : "r" (d), "r" ((u_long)type) : "memory");
578}
579
580static __inline u_short
581rfs(void)
582{
583	u_short sel;
584	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
585	return (sel);
586}
587
588static __inline u_short
589rgs(void)
590{
591	u_short sel;
592	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
593	return (sel);
594}
595
596static __inline u_short
597rss(void)
598{
599	u_short sel;
600	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
601	return (sel);
602}
603
604static __inline void
605load_ds(u_short sel)
606{
607	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
608}
609
610static __inline void
611load_es(u_short sel)
612{
613	__asm __volatile("movw %0,%%es" : : "rm" (sel));
614}
615
616static __inline void
617cpu_monitor(const void *addr, u_long extensions, u_int hints)
618{
619
620	__asm __volatile("monitor"
621	    : : "a" (addr), "c" (extensions), "d" (hints));
622}
623
624static __inline void
625cpu_mwait(u_long extensions, u_int hints)
626{
627
628	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
629}
630
631static __inline uint32_t
632rdpkru(void)
633{
634	uint32_t res;
635
636	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
637	return (res);
638}
639
640static __inline void
641wrpkru(uint32_t mask)
642{
643
644	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
645}
646
647#ifdef _KERNEL
648/* This is defined in <machine/specialreg.h> but is too painful to get to */
649#ifndef	MSR_FSBASE
650#define	MSR_FSBASE	0xc0000100
651#endif
652static __inline void
653load_fs(u_short sel)
654{
655	/* Preserve the fsbase value across the selector load */
656	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
657	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
658}
659
660#ifndef	MSR_GSBASE
661#define	MSR_GSBASE	0xc0000101
662#endif
663static __inline void
664load_gs(u_short sel)
665{
666	/*
667	 * Preserve the gsbase value across the selector load.
668	 * Note that we have to disable interrupts because the gsbase
669	 * being trashed happens to be the kernel gsbase at the time.
670	 */
671	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
672	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
673}
674#else
675/* Usable by userland */
676static __inline void
677load_fs(u_short sel)
678{
679	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
680}
681
682static __inline void
683load_gs(u_short sel)
684{
685	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
686}
687#endif
688
689static __inline uint64_t
690rdfsbase(void)
691{
692	uint64_t x;
693
694	__asm __volatile("rdfsbase %0" : "=r" (x));
695	return (x);
696}
697
698static __inline void
699wrfsbase(uint64_t x)
700{
701
702	__asm __volatile("wrfsbase %0" : : "r" (x));
703}
704
705static __inline uint64_t
706rdgsbase(void)
707{
708	uint64_t x;
709
710	__asm __volatile("rdgsbase %0" : "=r" (x));
711	return (x);
712}
713
714static __inline void
715wrgsbase(uint64_t x)
716{
717
718	__asm __volatile("wrgsbase %0" : : "r" (x));
719}
720
721static __inline void
722bare_lgdt(struct region_descriptor *addr)
723{
724	__asm __volatile("lgdt (%0)" : : "r" (addr));
725}
726
727static __inline void
728sgdt(struct region_descriptor *addr)
729{
730	char *loc;
731
732	loc = (char *)addr;
733	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
734}
735
736static __inline void
737lidt(struct region_descriptor *addr)
738{
739	__asm __volatile("lidt (%0)" : : "r" (addr));
740}
741
742static __inline void
743sidt(struct region_descriptor *addr)
744{
745	char *loc;
746
747	loc = (char *)addr;
748	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
749}
750
751static __inline void
752lldt(u_short sel)
753{
754	__asm __volatile("lldt %0" : : "r" (sel));
755}
756
757static __inline u_short
758sldt(void)
759{
760	u_short sel;
761
762	__asm __volatile("sldt %0" : "=r" (sel));
763	return (sel);
764}
765
766static __inline void
767ltr(u_short sel)
768{
769	__asm __volatile("ltr %0" : : "r" (sel));
770}
771
772static __inline uint32_t
773read_tr(void)
774{
775	u_short sel;
776
777	__asm __volatile("str %0" : "=r" (sel));
778	return (sel);
779}
780
781static __inline uint64_t
782rdr0(void)
783{
784	uint64_t data;
785	__asm __volatile("movq %%dr0,%0" : "=r" (data));
786	return (data);
787}
788
789static __inline void
790load_dr0(uint64_t dr0)
791{
792	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
793}
794
795static __inline uint64_t
796rdr1(void)
797{
798	uint64_t data;
799	__asm __volatile("movq %%dr1,%0" : "=r" (data));
800	return (data);
801}
802
803static __inline void
804load_dr1(uint64_t dr1)
805{
806	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
807}
808
809static __inline uint64_t
810rdr2(void)
811{
812	uint64_t data;
813	__asm __volatile("movq %%dr2,%0" : "=r" (data));
814	return (data);
815}
816
817static __inline void
818load_dr2(uint64_t dr2)
819{
820	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
821}
822
823static __inline uint64_t
824rdr3(void)
825{
826	uint64_t data;
827	__asm __volatile("movq %%dr3,%0" : "=r" (data));
828	return (data);
829}
830
831static __inline void
832load_dr3(uint64_t dr3)
833{
834	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
835}
836
837static __inline uint64_t
838rdr6(void)
839{
840	uint64_t data;
841	__asm __volatile("movq %%dr6,%0" : "=r" (data));
842	return (data);
843}
844
845static __inline void
846load_dr6(uint64_t dr6)
847{
848	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
849}
850
851static __inline uint64_t
852rdr7(void)
853{
854	uint64_t data;
855	__asm __volatile("movq %%dr7,%0" : "=r" (data));
856	return (data);
857}
858
859static __inline void
860load_dr7(uint64_t dr7)
861{
862	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
863}
864
865static __inline register_t
866intr_disable(void)
867{
868	register_t rflags;
869
870	rflags = read_rflags();
871	disable_intr();
872	return (rflags);
873}
874
875static __inline void
876intr_restore(register_t rflags)
877{
878	write_rflags(rflags);
879}
880
881static __inline void
882stac(void)
883{
884
885	__asm __volatile("stac" : : : "cc");
886}
887
888static __inline void
889clac(void)
890{
891
892	__asm __volatile("clac" : : : "cc");
893}
894
895enum {
896	SGX_ECREATE	= 0x0,
897	SGX_EADD	= 0x1,
898	SGX_EINIT	= 0x2,
899	SGX_EREMOVE	= 0x3,
900	SGX_EDGBRD	= 0x4,
901	SGX_EDGBWR	= 0x5,
902	SGX_EEXTEND	= 0x6,
903	SGX_ELDU	= 0x8,
904	SGX_EBLOCK	= 0x9,
905	SGX_EPA		= 0xA,
906	SGX_EWB		= 0xB,
907	SGX_ETRACK	= 0xC,
908};
909
910enum {
911	SGX_PT_SECS = 0x00,
912	SGX_PT_TCS  = 0x01,
913	SGX_PT_REG  = 0x02,
914	SGX_PT_VA   = 0x03,
915	SGX_PT_TRIM = 0x04,
916};
917
918int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
919
920static __inline int
921sgx_ecreate(void *pginfo, void *secs)
922{
923
924	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
925	    (uint64_t)secs, 0));
926}
927
928static __inline int
929sgx_eadd(void *pginfo, void *epc)
930{
931
932	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
933	    (uint64_t)epc, 0));
934}
935
936static __inline int
937sgx_einit(void *sigstruct, void *secs, void *einittoken)
938{
939
940	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
941	    (uint64_t)secs, (uint64_t)einittoken));
942}
943
944static __inline int
945sgx_eextend(void *secs, void *epc)
946{
947
948	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
949	    (uint64_t)epc, 0));
950}
951
952static __inline int
953sgx_epa(void *epc)
954{
955
956	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
957}
958
959static __inline int
960sgx_eldu(uint64_t rbx, uint64_t rcx,
961    uint64_t rdx)
962{
963
964	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
965}
966
967static __inline int
968sgx_eremove(void *epc)
969{
970
971	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
972}
973
974#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
975
976int	breakpoint(void);
977u_int	bsfl(u_int mask);
978u_int	bsrl(u_int mask);
979void	clflush(u_long addr);
980void	clts(void);
981void	cpuid_count(u_int ax, u_int cx, u_int *p);
982void	disable_intr(void);
983void	do_cpuid(u_int ax, u_int *p);
984void	enable_intr(void);
985void	halt(void);
986void	ia32_pause(void);
987u_char	inb(u_int port);
988u_int	inl(u_int port);
989void	insb(u_int port, void *addr, size_t count);
990void	insl(u_int port, void *addr, size_t count);
991void	insw(u_int port, void *addr, size_t count);
992register_t	intr_disable(void);
993void	intr_restore(register_t rf);
994void	invd(void);
995void	invlpg(u_int addr);
996void	invltlb(void);
997u_short	inw(u_int port);
998void	lidt(struct region_descriptor *addr);
999void	lldt(u_short sel);
1000void	load_cr0(u_long cr0);
1001void	load_cr3(u_long cr3);
1002void	load_cr4(u_long cr4);
1003void	load_dr0(uint64_t dr0);
1004void	load_dr1(uint64_t dr1);
1005void	load_dr2(uint64_t dr2);
1006void	load_dr3(uint64_t dr3);
1007void	load_dr6(uint64_t dr6);
1008void	load_dr7(uint64_t dr7);
1009void	load_fs(u_short sel);
1010void	load_gs(u_short sel);
1011void	ltr(u_short sel);
1012void	outb(u_int port, u_char data);
1013void	outl(u_int port, u_int data);
1014void	outsb(u_int port, const void *addr, size_t count);
1015void	outsl(u_int port, const void *addr, size_t count);
1016void	outsw(u_int port, const void *addr, size_t count);
1017void	outw(u_int port, u_short data);
1018u_long	rcr0(void);
1019u_long	rcr2(void);
1020u_long	rcr3(void);
1021u_long	rcr4(void);
1022uint64_t rdmsr(u_int msr);
1023uint32_t rdmsr32(u_int msr);
1024uint64_t rdpmc(u_int pmc);
1025uint64_t rdr0(void);
1026uint64_t rdr1(void);
1027uint64_t rdr2(void);
1028uint64_t rdr3(void);
1029uint64_t rdr6(void);
1030uint64_t rdr7(void);
1031uint64_t rdtsc(void);
1032u_long	read_rflags(void);
1033u_int	rfs(void);
1034u_int	rgs(void);
1035void	wbinvd(void);
1036void	write_rflags(u_int rf);
1037void	wrmsr(u_int msr, uint64_t newval);
1038
1039#endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1040
1041void	reset_dbregs(void);
1042
1043#ifdef _KERNEL
1044int	rdmsr_safe(u_int msr, uint64_t *val);
1045int	wrmsr_safe(u_int msr, uint64_t newval);
1046#endif
1047
1048#endif /* !_MACHINE_CPUFUNC_H_ */
1049