1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#ifndef _MACHINE_CPUFUNC_H_
32#define	_MACHINE_CPUFUNC_H_
33
34#ifdef _KERNEL
35
36#include <sys/types.h>
37
38#include <machine/psl.h>
39#include <machine/spr.h>
40
41struct thread;
42
43#ifdef KDB
44void breakpoint(void);
45#else
46static __inline void
47breakpoint(void)
48{
49
50	return;
51}
52#endif
53
54/* CPU register mangling inlines */
55
56static __inline void
57mtmsr(register_t value)
58{
59
60	__asm __volatile ("mtmsr %0; isync" :: "r"(value));
61}
62
63#ifdef __powerpc64__
64static __inline void
65mtmsrd(register_t value)
66{
67
68	__asm __volatile ("mtmsrd %0; isync" :: "r"(value));
69}
70#endif
71
72static __inline register_t
73mfmsr(void)
74{
75	register_t value;
76
77	__asm __volatile ("mfmsr %0" : "=r"(value));
78
79	return (value);
80}
81
82#ifndef __powerpc64__
83static __inline void
84mtsrin(vm_offset_t va, register_t value)
85{
86
87	__asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
88}
89
90static __inline register_t
91mfsrin(vm_offset_t va)
92{
93	register_t value;
94
95	__asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
96
97	return (value);
98}
99#endif
100
101static __inline register_t
102mfctrl(void)
103{
104	register_t value;
105
106	__asm __volatile ("mfspr %0,136" : "=r"(value));
107
108	return (value);
109}
110
111static __inline void
112mtdec(register_t value)
113{
114
115	__asm __volatile ("mtdec %0" :: "r"(value));
116}
117
118static __inline register_t
119mfdec(void)
120{
121	register_t value;
122
123	__asm __volatile ("mfdec %0" : "=r"(value));
124
125	return (value);
126}
127
128static __inline register_t
129mfpvr(void)
130{
131	register_t value;
132
133	__asm __volatile ("mfpvr %0" : "=r"(value));
134
135	return (value);
136}
137
138static __inline u_quad_t
139mftb(void)
140{
141	u_quad_t tb;
142      #ifdef __powerpc64__
143	__asm __volatile ("mftb %0" : "=r"(tb));
144      #else
145	uint32_t *tbup = (uint32_t *)&tb;
146	uint32_t *tblp = tbup + 1;
147
148	do {
149		*tbup = mfspr(TBR_TBU);
150		*tblp = mfspr(TBR_TBL);
151	} while (*tbup != mfspr(TBR_TBU));
152      #endif
153
154	return (tb);
155}
156
157static __inline void
158mttb(u_quad_t time)
159{
160
161	mtspr(TBR_TBWL, 0);
162	mtspr(TBR_TBWU, (uint32_t)(time >> 32));
163	mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
164}
165
166static __inline void
167eieio(void)
168{
169
170	__asm __volatile ("eieio" : : : "memory");
171}
172
173static __inline void
174isync(void)
175{
176
177	__asm __volatile ("isync" : : : "memory");
178}
179
180static __inline void
181powerpc_sync(void)
182{
183
184	__asm __volatile ("sync" : : : "memory");
185}
186
187static __inline int
188cntlzd(uint64_t word)
189{
190	uint64_t result;
191	/* cntlzd %0, %1 */
192	__asm __volatile(".long 0x7c000074 |  (%1 << 21) | (%0 << 16)" :
193	    "=r"(result) : "r"(word));
194
195	return (int)result;
196}
197
198static __inline int
199cnttzd(uint64_t word)
200{
201	uint64_t result;
202	/* cnttzd %0, %1 */
203	__asm __volatile(".long 0x7c000474 |  (%1 << 21) | (%0 << 16)" :
204	    "=r"(result) : "r"(word));
205
206	return (int)result;
207}
208
209static __inline void
210ptesync(void)
211{
212	__asm __volatile("ptesync");
213}
214
215static __inline register_t
216intr_disable(void)
217{
218	register_t msr;
219
220	msr = mfmsr();
221	mtmsr(msr & ~PSL_EE);
222	return (msr);
223}
224
225static __inline void
226intr_restore(register_t msr)
227{
228
229	mtmsr(msr);
230}
231
232static __inline struct pcpu *
233get_pcpu(void)
234{
235	struct pcpu *ret;
236
237	__asm __volatile("mfsprg %0, 0" : "=r"(ret));
238
239	return (ret);
240}
241
242#define	HAVE_INLINE_FLS
243static __inline __pure2 int
244fls(int mask)
245{
246	return (mask ? 32 - __builtin_clz(mask) : 0);
247}
248
249#define HAVE_INLINE_FLSL
250static __inline __pure2 int
251flsl(long mask)
252{
253	return (mask ? (8 * sizeof(long) - __builtin_clzl(mask)) : 0);
254}
255
256/* "NOP" operations to signify priorities to the kernel. */
257static __inline void
258nop_prio_vlow(void)
259{
260	__asm __volatile("or 31,31,31");
261}
262
263static __inline void
264nop_prio_low(void)
265{
266	__asm __volatile("or 1,1,1");
267}
268
269static __inline void
270nop_prio_mlow(void)
271{
272	__asm __volatile("or 6,6,6");
273}
274
275static __inline void
276nop_prio_medium(void)
277{
278	__asm __volatile("or 2,2,2");
279}
280
281static __inline void
282nop_prio_mhigh(void)
283{
284	__asm __volatile("or 5,5,5");
285}
286
287static __inline void
288nop_prio_high(void)
289{
290	__asm __volatile("or 3,3,3");
291}
292
293#endif /* _KERNEL */
294
295#endif /* !_MACHINE_CPUFUNC_H_ */
296