xref: /illumos-gate/usr/src/uts/i86pc/ml/cpr_wakecode.S (revision 5d9d9091)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2019 Joyent, Inc.
24 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
25 */
26
27#include <sys/asm_linkage.h>
28#include <sys/asm_misc.h>
29#include <sys/regset.h>
30#include <sys/privregs.h>
31#include <sys/x86_archext.h>
32#include <sys/cpr_wakecode.h>
33
34#include <sys/segments.h>
35#include "assym.h"
36
37#ifdef  DEBUG
38#define LED     1
39#define SERIAL  1
40#endif	/*	DEBUG	*/
41
42#ifdef	DEBUG
43#define	COM1	0x3f8
44#define	COM2	0x2f8
45#define	WC_COM	COM2	/* either COM1 or COM2			*/
46#define	WC_LED	0x80    /* diagnostic led port ON motherboard	*/
47
48/*
49 * defined as offsets from the data register
50 */
51#define	DLL	0	/* divisor latch (lsb) */
52#define	DLH	1	/* divisor latch (msb) */
53#define	LCR	3	/* line control register		*/
54#define	MCR	4	/* modem control register		*/
55
56
57#define	DLAB	0x80    /* divisor latch access bit		*/
58#define	B9600L	0X0c	/* lsb bit pattern for 9600 baud	*/
59#define	B9600H	0X0	/* hsb bit pattern for 9600 baud	*/
60#define	DTR	0x01    /* Data Terminal Ready			*/
61#define	RTS	0x02    /* Request To Send			*/
62#define	STOP1	0x00	/* 1 stop bit				*/
63#define	BITS8	0x03    /* 8 bits per char			*/
64
65#endif	/*	DEBUG	*/
66
67/*
68 *	This file contains the low level routines involved in getting
69 *	into and out of ACPI S3, including those needed for restarting
70 *	the non-boot cpus.
71 *
72 *	Our assumptions:
73 *
74 *	Our actions:
75 *
76 */
77
78	ENTRY_NP(wc_save_context)
79
80	movq	(%rsp), %rdx		/ return address
81	movq	%rdx, WC_RETADDR(%rdi)
82	pushq	%rbp
83	movq	%rsp,%rbp
84
85	movq    %rdi, WC_VIRTADDR(%rdi)
86	movq    %rdi, WC_RDI(%rdi)
87
88	movq    %rdx, WC_RDX(%rdi)
89
90/ stash everything else we need
91	sgdt	WC_GDT(%rdi)
92	sidt	WC_IDT(%rdi)
93	sldt	WC_LDT(%rdi)
94	str	WC_TR(%rdi)
95
96	movq	%cr0, %rdx
97	movq	%rdx, WC_CR0(%rdi)
98	movq	%cr3, %rdx
99	movq	%rdx, WC_CR3(%rdi)
100	movq	%cr4, %rdx
101	movq	%rdx, WC_CR4(%rdi)
102	movq	%cr8, %rdx
103	movq	%rdx, WC_CR8(%rdi)
104
105	movq    %r8, WC_R8(%rdi)
106	movq    %r9, WC_R9(%rdi)
107	movq    %r10, WC_R10(%rdi)
108	movq    %r11, WC_R11(%rdi)
109	movq    %r12, WC_R12(%rdi)
110	movq    %r13, WC_R13(%rdi)
111	movq    %r14, WC_R14(%rdi)
112	movq    %r15, WC_R15(%rdi)
113	movq    %rax, WC_RAX(%rdi)
114	movq    %rbp, WC_RBP(%rdi)
115	movq    %rbx, WC_RBX(%rdi)
116	movq    %rcx, WC_RCX(%rdi)
117	movq    %rsi, WC_RSI(%rdi)
118	movq    %rsp, WC_RSP(%rdi)
119
120	movw	%ss, WC_SS(%rdi)
121	movw	%cs, WC_CS(%rdi)
122	movw	%ds, WC_DS(%rdi)
123	movw	%es, WC_ES(%rdi)
124
125	movq	$0, %rcx		/ save %fs register
126	movw    %fs, %cx
127	movq    %rcx, WC_FS(%rdi)
128
129	movl    $MSR_AMD_FSBASE, %ecx
130	rdmsr
131	movl    %eax, WC_FSBASE(%rdi)
132	movl    %edx, WC_FSBASE+4(%rdi)
133
134	movq	$0, %rcx		/ save %gs register
135	movw    %gs, %cx
136	movq    %rcx, WC_GS(%rdi)
137
138	movl    $MSR_AMD_GSBASE, %ecx	/ save gsbase msr
139	rdmsr
140	movl    %eax, WC_GSBASE(%rdi)
141	movl    %edx, WC_GSBASE+4(%rdi)
142
143	movl    $MSR_AMD_KGSBASE, %ecx	/ save kgsbase msr
144	rdmsr
145	movl    %eax, WC_KGSBASE(%rdi)
146	movl    %edx, WC_KGSBASE+4(%rdi)
147
148	movq	%gs:CPU_ID, %rax	/ save current cpu id
149	movq	%rax, WC_CPU_ID(%rdi)
150
151	pushfq
152	popq	WC_EFLAGS(%rdi)
153
154	wbinvd				/ flush the cache
155	mfence
156
157	movq	$1, %rax		/ at suspend return 1
158
159	leave
160
161	ret
162
163	SET_SIZE(wc_save_context)
164
165
166/*
167 *	Our assumptions:
168 *		- We are running in real mode.
169 *		- Interrupts are disabled.
170 *
171 *	Our actions:
172 *		- We start using our GDT by loading correct values in the
173 *		  selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
174 *		  gs=KGS_SEL).
175 *		- We change over to using our IDT.
176 *		- We load the default LDT into the hardware LDT register.
177 *		- We load the default TSS into the hardware task register.
178 *		- We restore registers
179 *		- We return to original caller (a la setjmp)
180 */
181
182	ENTRY_NP(wc_rm_start)
183
184	/*
185	 * For the Sun Studio 10 assembler we needed to do a .code32 and
186	 * mentally invert the meaning of the addr16 and data16 prefixes to
187	 * get 32-bit access when generating code to be executed in 16-bit
188	 * mode (sigh...)
189	 *
190	 * This code, despite always being built with GNU as, has inherited
191	 * the conceptual damage.
192	 */
193
194	.code32
195
196	cli
197	movw		%cs, %ax
198	movw		%ax, %ds		/ establish ds ...
199	movw		%ax, %ss		/ ... and ss:esp
200	D16 movl	$WC_STKSTART, %esp
201/ using the following value blows up machines! - DO NOT USE
202/	D16 movl	0xffc, %esp
203
204
205#if     LED
206	D16 movl        $WC_LED, %edx
207	D16 movb        $0xd1, %al
208	outb    (%dx)
209#endif
210
211#if     SERIAL
212	D16 movl        $WC_COM, %edx
213	D16 movb        $0x61, %al
214	outb    (%dx)
215#endif
216
217	D16 call	cominit
218
219	/*
220	 * Enable protected-mode, write protect, and alignment mask
221	 * %cr0 has already been initialsed to zero
222	 */
223	movl		%cr0, %eax
224	D16 orl		$_CONST(CR0_PE|CR0_WP|CR0_AM), %eax
225	movl		%eax, %cr0
226
227	/*
228	 * Do a jmp immediately after writing to cr0 when enabling protected
229	 * mode to clear the real mode prefetch queue (per Intel's docs)
230	 */
231	jmp		pestart
232pestart:
233
234#if     LED
235	D16 movl        $WC_LED, %edx
236	D16 movb        $0xd2, %al
237	outb    (%dx)
238#endif
239
240#if     SERIAL
241	D16 movl        $WC_COM, %edx
242	D16 movb        $0x62, %al
243	outb    (%dx)
244#endif
245
246	/*
247	 * 16-bit protected mode is now active, so prepare to turn on long
248	 * mode
249	 */
250
251#if     LED
252	D16 movl        $WC_LED, %edx
253	D16 movb        $0xd3, %al
254	outb    (%dx)
255#endif
256
257#if     SERIAL
258	D16 movl        $WC_COM, %edx
259	D16 movb        $0x63, %al
260	outb    (%dx)
261#endif
262
263	/*
264	 * Add any initial cr4 bits
265	 */
266	movl		%cr4, %eax
267	A16 D16 orl	CR4OFF, %eax
268
269	/*
270	 * Enable PAE mode (CR4.PAE)
271	 */
272	D16 orl		$CR4_PAE, %eax
273	movl		%eax, %cr4
274
275#if     LED
276	D16 movl        $WC_LED, %edx
277	D16 movb        $0xd4, %al
278	outb    (%dx)
279#endif
280
281#if     SERIAL
282	D16 movl        $WC_COM, %edx
283	D16 movb        $0x64, %al
284	outb    (%dx)
285#endif
286
287	/*
288	 * Point cr3 to the 64-bit long mode page tables.
289	 *
290	 * Note that these MUST exist in 32-bit space, as we don't have
291	 * a way to load %cr3 with a 64-bit base address for the page tables
292	 * until the CPU is actually executing in 64-bit long mode.
293	 */
294	A16 D16 movl	CR3OFF, %eax
295	movl		%eax, %cr3
296
297	/*
298	 * Set long mode enable in EFER (EFER.LME = 1)
299	 */
300	D16 movl	$MSR_AMD_EFER, %ecx
301	rdmsr
302
303	D16 orl		$AMD_EFER_LME, %eax
304	wrmsr
305
306#if     LED
307	D16 movl        $WC_LED, %edx
308	D16 movb        $0xd5, %al
309	outb    (%dx)
310#endif
311
312#if     SERIAL
313	D16 movl        $WC_COM, %edx
314	D16 movb        $0x65, %al
315	outb    (%dx)
316#endif
317
318	/*
319	 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
320	 */
321	movl		%cr0, %eax
322	D16 orl		$CR0_PG, %eax
323	movl		%eax, %cr0
324
325	/*
326	 * The instruction after enabling paging in CR0 MUST be a branch.
327	 */
328	jmp		long_mode_active
329
330long_mode_active:
331
332#if     LED
333	D16 movl        $WC_LED, %edx
334	D16 movb        $0xd6, %al
335	outb    (%dx)
336#endif
337
338#if     SERIAL
339	D16 movl        $WC_COM, %edx
340	D16 movb        $0x66, %al
341	outb    (%dx)
342#endif
343
344	/*
345	 * Long mode is now active but since we're still running with the
346	 * original 16-bit CS we're actually in 16-bit compatability mode.
347	 *
348	 * We have to load an intermediate GDT and IDT here that we know are
349	 * in 32-bit space before we can use the kernel's GDT and IDT, which
350	 * may be in the 64-bit address space, and since we're in compatability
351	 * mode, we only have access to 16 and 32-bit instructions at the
352	 * moment.
353	 */
354	A16 D16 lgdt	TEMPGDTOFF	/* load temporary GDT */
355	A16 D16 lidt	TEMPIDTOFF	/* load temporary IDT */
356
357
358	/*
359	 * Do a far transfer to 64-bit mode.  Set the CS selector to a 64-bit
360	 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
361	 * to the real mode platter address of wc_long_mode_64 as until the
362	 * 64-bit CS is in place we don't have access to 64-bit instructions
363	 * and thus can't reference a 64-bit %rip.
364	 */
365
366#if     LED
367	D16 movl        $WC_LED, %edx
368	D16 movb        $0xd7, %al
369	outb    (%dx)
370#endif
371
372#if     SERIAL
373	D16 movl        $WC_COM, %edx
374	D16 movb        $0x67, %al
375	outb    (%dx)
376#endif
377
378	D16	pushl	$TEMP_CS64_SEL
379	A16 D16 pushl	LM64OFF
380
381	D16 lret
382
383
384/*
385 * Support routine to re-initialize VGA subsystem
386 */
387vgainit:
388	D16 ret
389
390/*
391 * Support routine to re-initialize keyboard (which is USB - help!)
392 */
393kbdinit:
394	D16 ret
395
396/*
397 * Support routine to re-initialize COM ports to something sane
398 */
399cominit:
400	/ init COM1 & COM2
401
402#if     DEBUG
403/*
404 * on debug kernels we need to initialize COM1 & COM2 here, so that
405 * we can get debug output before the asy driver has resumed
406 */
407
408/ select COM1
409	D16 movl	$_CONST(COM1+LCR), %edx
410	D16 movb	$DLAB, %al		/ divisor latch
411	outb	(%dx)
412
413	D16 movl	$_CONST(COM1+DLL), %edx	/ divisor latch lsb
414	D16 movb	$B9600L, %al		/ divisor latch
415	outb	(%dx)
416
417	D16 movl	$_CONST(COM1+DLH), %edx	/ divisor latch hsb
418	D16 movb	$B9600H, %al		/ divisor latch
419	outb	(%dx)
420
421	D16 movl	$_CONST(COM1+LCR), %edx	/ select COM1
422	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
423	outb	(%dx)
424
425	D16 movl	$_CONST(COM1+MCR), %edx	/ select COM1
426	D16 movb	$_CONST(RTS|DTR), %al		/ data term ready & req to send
427	outb	(%dx)
428
429/ select COM2
430	D16 movl	$_CONST(COM2+LCR), %edx
431	D16 movb	$DLAB, %al		/ divisor latch
432	outb	(%dx)
433
434	D16 movl	$_CONST(COM2+DLL), %edx	/ divisor latch lsb
435	D16 movb	$B9600L, %al		/ divisor latch
436	outb	(%dx)
437
438	D16 movl	$_CONST(COM2+DLH), %edx	/ divisor latch hsb
439	D16 movb	$B9600H, %al		/ divisor latch
440	outb	(%dx)
441
442	D16 movl	$_CONST(COM2+LCR), %edx	/ select COM1
443	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
444	outb	(%dx)
445
446	D16 movl	$_CONST(COM2+MCR), %edx	/ select COM1
447	D16 movb	$_CONST(RTS|DTR), %al		/ data term ready & req to send
448	outb	(%dx)
449#endif	/*	DEBUG	*/
450
451	D16 ret
452
453	.code64
454
455	.globl wc_long_mode_64
456wc_long_mode_64:
457
458#if     LED
459	movw        $WC_LED, %dx
460	movb        $0xd8, %al
461	outb    (%dx)
462#endif
463
464#if     SERIAL
465	movw        $WC_COM, %dx
466	movb        $0x68, %al
467	outb    (%dx)
468#endif
469
470	/*
471	 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
472	 * CS.L=1) so we now have access to 64-bit instructions.
473	 *
474	 * First, set the 64-bit GDT base.
475	 */
476	.globl	rm_platter_pa
477	movl	rm_platter_pa, %eax
478
479	lgdtq	GDTROFF(%rax)		/* load 64-bit GDT */
480
481	/*
482	 * Save the CPU number in %r11; get the value here since it's saved in
483	 * the real mode platter.
484	 */
485/ JAN
486/ the following is wrong! need to figure out MP systems
487/	movl	CPUNOFF(%rax), %r11d
488
489	/*
490	 * Add rm_platter_pa to %rsp to point it to the same location as seen
491	 * from 64-bit mode.
492	 */
493	addq	%rax, %rsp
494
495	/*
496	 * Now do an lretq to load CS with the appropriate selector for the
497	 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
498	 * virtual address where boot originally loaded this code rather than
499	 * the copy in the real mode platter's rm_code array as we've been
500	 * doing so far.
501	 */
502
503#if     LED
504	movw        $WC_LED, %dx
505	movb        $0xd9, %al
506	outb    (%dx)
507#endif
508
509/ JAN this should produce 'i' but we get 'g' instead ???
510#if     SERIAL
511	movw        $WC_COM, %dx
512	movb        $0x69, %al
513	outb    (%dx)
514#endif
515
516	pushq	$KCS_SEL
517	pushq	$kernel_wc_code
518	lretq
519
520	.globl kernel_wc_code
521kernel_wc_code:
522
523#if     LED
524	movw        $WC_LED, %dx
525	movb        $0xda, %al
526	outb    (%dx)
527#endif
528
529/ JAN this should produce 'j' but we get 'g' instead ???
530#if     SERIAL
531	movw        $WC_COM, %dx
532	movb        $0x6a, %al
533	outb    (%dx)
534#endif
535
536	/*
537	 * Complete the balance of the setup we need to before executing
538	 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
539	 */
540	.globl  rm_platter_va
541	movq    rm_platter_va, %rbx
542	addq	$WC_CPU, %rbx
543
544#if     LED
545	movw        $WC_LED, %dx
546	movb        $0xdb, %al
547	outb    (%dx)
548#endif
549
550#if     SERIAL
551	movw        $WC_COM, %dx
552	movw        $0x6b, %ax
553	outb    (%dx)
554#endif
555
556	/*
557	 * restore the rest of the registers
558	 */
559
560	lidtq	WC_IDT(%rbx)
561
562#if     LED
563	movw        $WC_LED, %dx
564	movb        $0xdc, %al
565	outb    (%dx)
566#endif
567
568#if     SERIAL
569	movw        $WC_COM, %dx
570	movw        $0x6c, %ax
571	outb    (%dx)
572#endif
573
574	/*
575	 * restore the rest of the registers
576	 */
577
578	movw    $KDS_SEL, %ax
579	movw    %ax, %ds
580	movw    %ax, %es
581	movw    %ax, %ss
582
583	/*
584	 * Before proceeding, enable usage of the page table NX bit if
585	 * that's how the page tables are set up.
586	 */
587	btl     $X86FSET_NX, x86_featureset(%rip)
588	jnc     1f
589	movl    $MSR_AMD_EFER, %ecx
590	rdmsr
591	orl     $AMD_EFER_NXE, %eax
592	wrmsr
5931:
594
595	movq	WC_CR4(%rbx), %rax	/ restore full cr4 (with Global Enable)
596	movq	%rax, %cr4
597
598	lldt	WC_LDT(%rbx)
599	movzwq	WC_TR(%rbx), %rax	/ clear TSS busy bit
600	addq	WC_GDT+2(%rbx), %rax
601	andl	$0xfffffdff, 4(%rax)
602	movq	4(%rax), %rcx
603	ltr	WC_TR(%rbx)
604
605#if     LED
606	movw        $WC_LED, %dx
607	movb        $0xdd, %al
608	outb    (%dx)
609#endif
610
611#if     SERIAL
612	movw        $WC_COM, %dx
613	movw        $0x6d, %ax
614	outb    (%dx)
615#endif
616
617/ restore %fsbase %gsbase %kgbase registers using wrmsr instruction
618
619	movq    WC_FS(%rbx), %rcx	/ restore fs register
620	movw    %cx, %fs
621
622	movl    $MSR_AMD_FSBASE, %ecx
623	movl    WC_FSBASE(%rbx), %eax
624	movl    WC_FSBASE+4(%rbx), %edx
625	wrmsr
626
627	movq    WC_GS(%rbx), %rcx	/ restore gs register
628	movw    %cx, %gs
629
630	movl    $MSR_AMD_GSBASE, %ecx	/ restore gsbase msr
631	movl    WC_GSBASE(%rbx), %eax
632	movl    WC_GSBASE+4(%rbx), %edx
633	wrmsr
634
635	movl    $MSR_AMD_KGSBASE, %ecx	/ restore kgsbase msr
636	movl    WC_KGSBASE(%rbx), %eax
637	movl    WC_KGSBASE+4(%rbx), %edx
638	wrmsr
639
640	movq	WC_CR0(%rbx), %rdx
641	movq	%rdx, %cr0
642	movq	WC_CR3(%rbx), %rdx
643	movq	%rdx, %cr3
644	movq	WC_CR8(%rbx), %rdx
645	movq	%rdx, %cr8
646
647#if     LED
648	movw        $WC_LED, %dx
649	movb        $0xde, %al
650	outb    (%dx)
651#endif
652
653#if     SERIAL
654	movw        $WC_COM, %dx
655	movb        $0x6e, %al
656	outb    (%dx)
657#endif
658
659	/*
660	 * if we are not running on the boot CPU restore stack contents by
661	 * calling i_cpr_restore_stack(curthread, save_stack);
662	 */
663	movq    %rsp, %rbp
664	call	i_cpr_bootcpuid
665	cmpl	%eax, WC_CPU_ID(%rbx)
666	je	2f
667
668	movq	%gs:CPU_THREAD, %rdi
669	movq	WC_SAVED_STACK(%rbx), %rsi
670	call	i_cpr_restore_stack
6712:
672
673	movq    WC_RSP(%rbx), %rsp	/ restore stack pointer
674
675	/*
676	 * APIC initialization
677	 */
678	movq    %rsp, %rbp
679
680	/*
681	 * skip iff function pointer is NULL
682	 */
683	cmpq	$0, ap_mlsetup
684	je	3f
685	leaq	ap_mlsetup, %rax
686	INDIRECT_CALL_REG(rax)
6873:
688
689	leaq	cpr_start_cpu_func, %rax
690	INDIRECT_CALL_REG(rax)
691
692/ restore %rbx to the value it ahd before we called the functions above
693	movq    rm_platter_va, %rbx
694	addq	$WC_CPU, %rbx
695
696	movq    WC_R8(%rbx), %r8
697	movq    WC_R9(%rbx), %r9
698	movq    WC_R10(%rbx), %r10
699	movq    WC_R11(%rbx), %r11
700	movq    WC_R12(%rbx), %r12
701	movq    WC_R13(%rbx), %r13
702	movq    WC_R14(%rbx), %r14
703	movq    WC_R15(%rbx), %r15
704/	movq    WC_RAX(%rbx), %rax
705	movq    WC_RBP(%rbx), %rbp
706	movq    WC_RCX(%rbx), %rcx
707/	movq    WC_RDX(%rbx), %rdx
708	movq    WC_RDI(%rbx), %rdi
709	movq    WC_RSI(%rbx), %rsi
710
711
712/ assume that %cs does not need to be restored
713/ %ds, %es & %ss are ignored in 64bit mode
714	movw	WC_SS(%rbx), %ss
715	movw	WC_DS(%rbx), %ds
716	movw	WC_ES(%rbx), %es
717
718#if     LED
719	movw        $WC_LED, %dx
720	movb        $0xdf, %al
721	outb    (%dx)
722#endif
723
724#if     SERIAL
725	movw        $WC_COM, %dx
726	movb        $0x6f, %al
727	outb    (%dx)
728#endif
729
730
731	movq    WC_RBP(%rbx), %rbp
732	movq    WC_RSP(%rbx), %rsp
733
734#if     LED
735	movw        $WC_LED, %dx
736	movb        $0xe0, %al
737	outb    (%dx)
738#endif
739
740#if     SERIAL
741	movw        $WC_COM, %dx
742	movb        $0x70, %al
743	outb    (%dx)
744#endif
745
746
747	movq    WC_RCX(%rbx), %rcx
748
749	pushq	WC_EFLAGS(%rbx)			/ restore flags
750	popfq
751
752#if     LED
753	movw        $WC_LED, %dx
754	movb        $0xe1, %al
755	outb    (%dx)
756#endif
757
758#if     SERIAL
759	movw        $WC_COM, %dx
760	movb        $0x71, %al
761	outb    (%dx)
762#endif
763
764/*
765 * can not use outb after this point, because doing so would mean using
766 * %dx which would modify %rdx which is restored here
767 */
768
769	movq	%rbx, %rax
770	movq    WC_RDX(%rax), %rdx
771	movq    WC_RBX(%rax), %rbx
772
773	leave
774
775	movq	WC_RETADDR(%rax), %rax
776	movq	%rax, (%rsp)		/ return to caller of wc_save_context
777
778	xorl	%eax, %eax			/ at wakeup return 0
779	ret
780
781
782	SET_SIZE(wc_rm_start)
783
784	ENTRY_NP(asmspin)
785
786	movl	%edi, %ecx
787A1:
788	loop	A1
789
790	SET_SIZE(asmspin)
791
792	.globl wc_rm_end
793wc_rm_end:
794	nop
795
796