1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * Copyright 2019 Joyent, Inc.
29 * Copyright 2020 Oxide Computer Company
30 */
31
32#include <sys/asm_linkage.h>
33
34#include "svm_assym.h"
35
36/* Porting note: This is named 'svm_support.S' upstream. */
37
38
39/*
40 * Flush scratch registers to avoid lingering guest state being used for
41 * Spectre v1 attacks when returning from guest entry.
42 */
43#define	SVM_GUEST_FLUSH_SCRATCH						\
44	xorl	%edi, %edi;						\
45	xorl	%esi, %esi;						\
46	xorl	%edx, %edx;						\
47	xorl	%ecx, %ecx;						\
48	xorl	%r8d, %r8d;						\
49	xorl	%r9d, %r9d;						\
50	xorl	%r10d, %r10d;						\
51	xorl	%r11d, %r11d;
52
53/* Stack layout (offset from %rsp) for svm_launch */
54#define	SVMSTK_R15	0x00	/* callee saved %r15			*/
55#define	SVMSTK_R14	0x08	/* callee saved %r14			*/
56#define	SVMSTK_R13	0x10	/* callee saved %r13			*/
57#define	SVMSTK_R12	0x18	/* callee saved %r12			*/
58#define	SVMSTK_RBX	0x20	/* callee saved %rbx			*/
59#define	SVMSTK_RDX	0x28	/* save-args %rdx (struct cpu *)	*/
60#define	SVMSTK_RSI	0x30	/* save-args %rsi (struct svm_regctx *)	*/
61#define	SVMSTK_RDI	0x38	/* save-args %rdi (uint64_t vmcb_pa)	*/
62#define	SVMSTK_FP	0x40	/* frame pointer %rbp			*/
63#define	SVMSTKSIZE	SVMSTK_FP
64
65/*
66 * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
67 * %rdi: physical address of VMCB
68 * %rsi: pointer to guest context
69 * %rdx: pointer to the pcpu data
70 */
71ENTRY_NP(svm_launch)
72	pushq	%rbp
73	movq	%rsp, %rbp
74	subq	$SVMSTKSIZE, %rsp
75	movq	%r15, SVMSTK_R15(%rsp)
76	movq	%r14, SVMSTK_R14(%rsp)
77	movq	%r13, SVMSTK_R13(%rsp)
78	movq	%r12, SVMSTK_R12(%rsp)
79	movq	%rbx, SVMSTK_RBX(%rsp)
80	movq	%rdx, SVMSTK_RDX(%rsp)
81	movq	%rsi, SVMSTK_RSI(%rsp)
82	movq	%rdi, SVMSTK_RDI(%rsp)
83
84	/* Save the physical address of the VMCB in %rax */
85	movq	%rdi, %rax
86
87	/* Restore guest state. */
88	movq	SCTX_R8(%rsi), %r8
89	movq	SCTX_R9(%rsi), %r9
90	movq	SCTX_R10(%rsi), %r10
91	movq	SCTX_R11(%rsi), %r11
92	movq	SCTX_R12(%rsi), %r12
93	movq	SCTX_R13(%rsi), %r13
94	movq	SCTX_R14(%rsi), %r14
95	movq	SCTX_R15(%rsi), %r15
96	movq	SCTX_RBP(%rsi), %rbp
97	movq	SCTX_RBX(%rsi), %rbx
98	movq	SCTX_RCX(%rsi), %rcx
99	movq	SCTX_RDX(%rsi), %rdx
100	movq	SCTX_RDI(%rsi), %rdi
101	movq	SCTX_RSI(%rsi), %rsi	/* %rsi must be restored last */
102
103	vmload	%rax
104	vmrun	%rax
105	vmsave	%rax
106
107	/* Grab the svm_regctx pointer */
108	movq	SVMSTK_RSI(%rsp), %rax
109
110	/* Save guest state. */
111	movq	%r8, SCTX_R8(%rax)
112	movq	%r9, SCTX_R9(%rax)
113	movq	%r10, SCTX_R10(%rax)
114	movq	%r11, SCTX_R11(%rax)
115	movq	%r12, SCTX_R12(%rax)
116	movq	%r13, SCTX_R13(%rax)
117	movq	%r14, SCTX_R14(%rax)
118	movq	%r15, SCTX_R15(%rax)
119	movq	%rbp, SCTX_RBP(%rax)
120	movq	%rbx, SCTX_RBX(%rax)
121	movq	%rcx, SCTX_RCX(%rax)
122	movq	%rdx, SCTX_RDX(%rax)
123	movq	%rdi, SCTX_RDI(%rax)
124	movq	%rsi, SCTX_RSI(%rax)
125
126	/* Restore callee-saved registers */
127	movq	SVMSTK_R15(%rsp), %r15
128	movq	SVMSTK_R14(%rsp), %r14
129	movq	SVMSTK_R13(%rsp), %r13
130	movq	SVMSTK_R12(%rsp), %r12
131	movq	SVMSTK_RBX(%rsp), %rbx
132
133	/* Fix %gsbase to point back to the correct 'struct cpu *' */
134	movq	SVMSTK_RDX(%rsp), %rdx
135	movl	%edx, %eax
136	shrq	$32, %rdx
137	movl	$MSR_GSBASE, %ecx
138	wrmsr
139
140	/*
141	 * While SVM will save/restore the GDTR and IDTR, the TR does not enjoy
142	 * such treatment.  Reload the KTSS immediately, since it is used by
143	 * dtrace and other fault/trap handlers.
144	 */
145	movq	SVMSTK_RDX(%rsp), %rdi		/* %rdi = CPU */
146	movq	CPU_GDT(%rdi), %rdi		/* %rdi = cpu->cpu_gdt */
147	leaq	GDT_KTSS_OFF(%rdi), %rdi	/* %rdi = &cpu_gdt[GDT_KTSS] */
148	andb	$0xfd, SSD_TYPE(%rdi)		/* ssd_type.busy = 0 */
149	movw	$KTSS_SEL, %ax			/* reload kernel TSS */
150	ltr	%ax
151
152	SVM_GUEST_FLUSH_SCRATCH
153
154	addq	$SVMSTKSIZE, %rsp
155	popq	%rbp
156	ret
157SET_SIZE(svm_launch)
158