xref: /illumos-gate/usr/src/uts/intel/asm/cpu.h (revision 42cd1931)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2019 Joyent, Inc.
27  */
28 
29 #ifndef _ASM_CPU_H
30 #define	_ASM_CPU_H
31 
32 #include <sys/ccompile.h>
33 
34 #ifdef	__cplusplus
35 extern "C" {
36 #endif
37 
38 #if !defined(__lint) && defined(__GNUC__)
39 
40 #if defined(__i386) || defined(__amd64)
41 
42 extern __GNU_INLINE void
ht_pause(void)43 ht_pause(void)
44 {
45 	__asm__ __volatile__(
46 	    "pause");
47 }
48 
49 /*
50  * prefetch 64 bytes
51  *
52  * prefetch is an SSE extension which is not supported on
53  * older 32-bit processors, so define this as a no-op for now
54  */
55 
56 extern __GNU_INLINE void
prefetch_read_many(void * addr)57 prefetch_read_many(void *addr)
58 {
59 #if defined(__amd64)
60 	__asm__(
61 	    "prefetcht0 (%0);"
62 	    "prefetcht0 32(%0);"
63 	    : /* no output */
64 	    : "r" (addr));
65 #endif	/* __amd64 */
66 }
67 
68 extern __GNU_INLINE void
prefetch_read_once(void * addr)69 prefetch_read_once(void *addr)
70 {
71 #if defined(__amd64)
72 	__asm__(
73 	    "prefetchnta (%0);"
74 	    "prefetchnta 32(%0);"
75 	    : /* no output */
76 	    : "r" (addr));
77 #endif	/* __amd64 */
78 }
79 
80 extern __GNU_INLINE void
prefetch_write_many(void * addr)81 prefetch_write_many(void *addr)
82 {
83 #if defined(__amd64)
84 	__asm__(
85 	    "prefetcht0 (%0);"
86 	    "prefetcht0 32(%0);"
87 	    : /* no output */
88 	    : "r" (addr));
89 #endif	/* __amd64 */
90 }
91 
92 extern __GNU_INLINE void
prefetch_write_once(void * addr)93 prefetch_write_once(void *addr)
94 {
95 #if defined(__amd64)
96 	__asm__(
97 	    "prefetcht0 (%0);"
98 	    "prefetcht0 32(%0);"
99 	    : /* no output */
100 	    : "r" (addr));
101 #endif	/* __amd64 */
102 }
103 
104 #if !defined(__xpv)
105 
106 extern __GNU_INLINE void
cli(void)107 cli(void)
108 {
109 	__asm__ __volatile__(
110 	    "cli" : : : "memory");
111 }
112 
113 extern __GNU_INLINE void
sti(void)114 sti(void)
115 {
116 	__asm__ __volatile__(
117 	    "sti");
118 }
119 
120 /*
121  * Any newer callers of halt need to make sure that they consider calling
122  * x86_md_clear() before calling this to deal with any potential issues with
123  * MDS. Because this version of hlt is also used in panic context, we do not
124  * unconditionally call x86_md_clear() here and require callers to do so.
125  */
126 extern __GNU_INLINE void
i86_halt(void)127 i86_halt(void)
128 {
129 	__asm__ __volatile__(
130 	    "sti; hlt");
131 }
132 
133 #endif /* !__xpv */
134 
135 #endif	/* __i386 || defined(__amd64) */
136 
137 #if defined(__amd64)
138 
139 extern __GNU_INLINE void
__set_ds(selector_t value)140 __set_ds(selector_t value)
141 {
142 	__asm__ __volatile__(
143 	    "movw	%0, %%ds"
144 	    : /* no output */
145 	    : "r" (value));
146 }
147 
148 extern __GNU_INLINE void
__set_es(selector_t value)149 __set_es(selector_t value)
150 {
151 	__asm__ __volatile__(
152 	    "movw	%0, %%es"
153 	    : /* no output */
154 	    : "r" (value));
155 }
156 
157 extern __GNU_INLINE void
__set_fs(selector_t value)158 __set_fs(selector_t value)
159 {
160 	__asm__ __volatile__(
161 	    "movw	%0, %%fs"
162 	    : /* no output */
163 	    : "r" (value));
164 }
165 
166 extern __GNU_INLINE void
__set_gs(selector_t value)167 __set_gs(selector_t value)
168 {
169 	__asm__ __volatile__(
170 	    "movw	%0, %%gs"
171 	    : /* no output */
172 	    : "r" (value));
173 }
174 
175 #endif	/* __amd64 */
176 
177 #endif	/* !__lint && __GNUC__ */
178 
179 #ifdef	__cplusplus
180 }
181 #endif
182 
183 #endif	/* _ASM_CPU_H */
184