xref: /illumos-gate/usr/src/uts/i86pc/ml/mpcore.S (revision 62ce774c)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24/*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 *
28 * Copyright 2019 Joyent, Inc.
29 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
30 * Copyright 2023 Oxide Computer Co.
31 */
32
33#include <sys/asm_linkage.h>
34#include <sys/asm_misc.h>
35#include <sys/regset.h>
36#include <sys/privregs.h>
37#include <sys/x86_archext.h>
38
39#include <sys/segments.h>
40#include "assym.h"
41
42/*
43 *	Our assumptions:
44 *		- We are running in real mode.
45 *		- Interrupts are disabled.
46 *		- Selectors are equal (cs == ds == ss) for all real mode code
47 *		- The GDT, IDT, ktss and page directory has been built for us
48 *
49 *	Our actions:
50 *	Start CPU:
51 *		- We start using our GDT by loading correct values in the
52 *		  selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
53 *		  gs=KGS_SEL).
54 *		- We change over to using our IDT.
55 *		- We load the default LDT into the hardware LDT register.
56 *		- We load the default TSS into the hardware task register.
57 *		- call mp_startup(void) indirectly through the T_PC
58 *	Stop CPU:
59 *		- Put CPU into halted state with interrupts disabled
60 *
61 */
62
63	ENTRY_NP(real_mode_start_cpu)
64
65	/*
66	 * NOTE:  The GNU assembler automatically does the right thing to
67	 *	  generate data size operand prefixes based on the code size
68	 *	  generation mode (e.g. .code16, .code32, .code64) and as such
69	 *	  prefixes need not be used on instructions EXCEPT in the case
70	 *	  of address prefixes for code for which the reference is not
71	 *	  automatically of the default operand size.
72	 */
73	.code16
74	cli
75	movw		%cs, %ax
76	movw		%ax, %ds	/* load cs into ds */
77	movw		%ax, %ss	/* and into ss */
78
79	/*
80	 * Helps in debugging by giving us the fault address.
81	 *
82	 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
83	 */
84	movl		$0xffc, %esp
85	movl		%cr0, %eax
86
87	/*
88	 * Enable protected-mode, write protect, and alignment mask
89	 */
90	orl		$(CR0_PE|CR0_WP|CR0_AM), %eax
91	movl		%eax, %cr0
92
93	/*
94	 * Do a jmp immediately after writing to cr0 when enabling protected
95	 * mode to clear the real mode prefetch queue (per Intel's docs)
96	 */
97	jmp		pestart
98
99pestart:
100	/*
101	 * 16-bit protected mode is now active, so prepare to turn on long
102	 * mode.
103	 */
104
105	/*
106	 * Add any initial cr4 bits
107	 */
108	movl		%cr4, %eax
109	addr32 orl	CR4OFF, %eax
110
111	/*
112	 * Enable PAE mode (CR4.PAE)
113	 */
114	orl		$CR4_PAE, %eax
115	movl		%eax, %cr4
116
117	/*
118	 * Point cr3 to the 64-bit long mode page tables.
119	 *
120	 * Note that these MUST exist in 32-bit space, as we don't have
121	 * a way to load %cr3 with a 64-bit base address for the page tables
122	 * until the CPU is actually executing in 64-bit long mode.
123	 */
124	addr32 movl	CR3OFF, %eax
125	movl		%eax, %cr3
126
127	/*
128	 * Set long mode enable in EFER (EFER.LME = 1)
129	 */
130	movl	$MSR_AMD_EFER, %ecx
131	rdmsr
132	orl	$AMD_EFER_LME, %eax
133	wrmsr
134
135	/*
136	 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
137	 */
138	movl	%cr0, %eax
139	orl	$CR0_PG, %eax
140	movl	%eax, %cr0
141
142	/*
143	 * The instruction after enabling paging in CR0 MUST be a branch.
144	 */
145	jmp	long_mode_active
146
147long_mode_active:
148	/*
149	 * Long mode is now active but since we're still running with the
150	 * original 16-bit CS we're actually in 16-bit compatability mode.
151	 *
152	 * We have to load an intermediate GDT and IDT here that we know are
153	 * in 32-bit space before we can use the kernel's GDT and IDT, which
154	 * may be in the 64-bit address space, and since we're in compatability
155	 * mode, we only have access to 16 and 32-bit instructions at the
156	 * moment.
157	 */
158	addr32 lgdtl	TEMPGDTOFF	/* load temporary GDT */
159	addr32 lidtl	TEMPIDTOFF	/* load temporary IDT */
160
161	/*
162	 * Do a far transfer to 64-bit mode.  Set the CS selector to a 64-bit
163	 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
164	 * to the real mode platter address of long_mode 64 as until the 64-bit
165	 * CS is in place we don't have access to 64-bit instructions and thus
166	 * can't reference a 64-bit %rip.
167	 */
168	pushl		$TEMP_CS64_SEL
169	addr32 pushl	LM64OFF
170	lretl
171
172	.globl	long_mode_64
173long_mode_64:
174	.code64
175	/*
176	 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
177	 * CS.L=1) so we now have access to 64-bit instructions.
178	 *
179	 * First, set the 64-bit GDT base.
180	 */
181	.globl	rm_platter_pa
182	movl	rm_platter_pa, %eax
183	lgdtq	GDTROFF(%rax)		/* load 64-bit GDT */
184
185	/*
186	 * Save the CPU number in %r11; get the value here since it's saved in
187	 * the real mode platter.
188	 */
189	movl	CPUNOFF(%rax), %r11d
190
191	/*
192	 * Add rm_platter_pa to %rsp to point it to the same location as seen
193	 * from 64-bit mode.
194	 */
195	addq	%rax, %rsp
196
197	/*
198	 * Now do an lretq to load CS with the appropriate selector for the
199	 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
200	 * virtual address where boot originally loaded this code rather than
201	 * the copy in the real mode platter's rm_code array as we've been
202	 * doing so far.
203	 */
204	pushq	$KCS_SEL
205	pushq	$kernel_cs_code
206	lretq
207	.globl real_mode_start_cpu_end
208real_mode_start_cpu_end:
209	nop
210
211kernel_cs_code:
212	/*
213	 * Complete the balance of the setup we need to before executing
214	 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
215	 */
216	.globl	rm_platter_va
217	movq	rm_platter_va, %rax
218	lidtq	IDTROFF(%rax)
219
220	movw	$KDS_SEL, %ax
221	movw	%ax, %ds
222	movw	%ax, %es
223	movw	%ax, %ss
224
225	movw	$KTSS_SEL, %ax		/* setup kernel TSS */
226	ltr	%ax
227
228	xorw	%ax, %ax		/* clear LDTR */
229	lldt	%ax
230
231	/*
232	 * Set GS to the address of the per-cpu structure as contained in
233	 * cpu[cpu_number].
234	 *
235	 * Unfortunately there's no way to set the 64-bit gsbase with a mov,
236	 * so we have to stuff the low 32 bits in %eax and the high 32 bits in
237	 * %edx, then call wrmsr.
238	 */
239	leaq	cpu(%rip), %rdi
240	movl	(%rdi, %r11, 8), %eax
241	movl	4(%rdi, %r11, 8), %edx
242	movl	$MSR_AMD_GSBASE, %ecx
243	wrmsr
244
245	/*
246	 * Init FS and KernelGSBase.
247	 *
248	 * Based on code in mlsetup(), set them both to 8G (which shouldn't be
249	 * valid until some 64-bit processes run); this will then cause an
250	 * exception in any code that tries to index off them before they are
251	 * properly setup.
252	 */
253	xorl	%eax, %eax		/* low 32 bits = 0 */
254	movl	$2, %edx		/* high 32 bits = 2 */
255	movl	$MSR_AMD_FSBASE, %ecx
256	wrmsr
257
258	movl	$MSR_AMD_KGSBASE, %ecx
259	wrmsr
260
261	/*
262	 * Init %rsp to the exception stack set in tss_ist1 and create a legal
263	 * AMD64 ABI stack frame
264	 */
265	movq	%gs:CPU_TSS, %rax
266	movq	TSS_IST1(%rax), %rsp
267	pushq	$0		/* null return address */
268	pushq	$0		/* null frame pointer terminates stack trace */
269	movq	%rsp, %rbp	/* stack aligned on 16-byte boundary */
270
271	/*
272	 * Get %cr0 into the state we (mostly) want, including turning on the
273	 * caches.
274	 */
275	movq	%cr0, %rax
276	andq    $~(CR0_CD|CR0_NW|CR0_TS|CR0_EM), %rax
277	orq     $(CR0_MP|CR0_NE), %rax
278	movq    %rax, %cr0		/* set machine status word */
279
280	/*
281	 * Before going any further, enable usage of page table NX bit if
282	 * that's how our page tables are set up.
283	 */
284	btl	$X86FSET_NX, x86_featureset(%rip)
285	jnc	1f
286	movl	$MSR_AMD_EFER, %ecx
287	rdmsr
288	orl	$AMD_EFER_NXE, %eax
289	wrmsr
2901:
291
292	/*
293	 * Complete the rest of the setup and call mp_startup().
294	 */
295	movq	%gs:CPU_THREAD, %rax	/* get thread ptr */
296	movq	T_PC(%rax), %rax
297	INDIRECT_CALL_REG(rax)		/* call mp_startup_boot */
298	/* not reached */
299	int	$20			/* whoops, returned somehow! */
300
301	SET_SIZE(real_mode_start_cpu)
302
303	ENTRY_NP(real_mode_stop_cpu_stage1)
304
305	/*
306	 * NOTE:  The GNU assembler automatically does the right thing to
307	 *	  generate data size operand prefixes based on the code size
308	 *	  generation mode (e.g. .code16, .code32, .code64) and as such
309	 *	  prefixes need not be used on instructions EXCEPT in the case
310	 *	  of address prefixes for code for which the reference is not
311	 *	  automatically of the default operand size.
312	 */
313	.code16
314	cli
315	movw		%cs, %ax
316	movw		%ax, %ds	/* load cs into ds */
317	movw		%ax, %ss	/* and into ss */
318
319	/*
320	 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
321	 */
322	movw		$CPUHALTCODEOFF, %ax
323	jmp		*%ax
324
325	.globl real_mode_stop_cpu_stage1_end
326real_mode_stop_cpu_stage1_end:
327	nop
328
329	SET_SIZE(real_mode_stop_cpu_stage1)
330
331	ENTRY_NP(real_mode_stop_cpu_stage2)
332
333	movw		$0xdead, %ax
334	movw		%ax, CPUHALTEDOFF
335
336real_mode_stop_cpu_loop:
337	/*
338	 * Put CPU into halted state.
339	 * Only INIT, SMI, NMI could break the loop.
340	 */
341	hlt
342	jmp		real_mode_stop_cpu_loop
343
344	.globl real_mode_stop_cpu_stage2_end
345real_mode_stop_cpu_stage2_end:
346	nop
347
348	SET_SIZE(real_mode_stop_cpu_stage2)
349