1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/*
28 * This file and its contents are supplied under the terms of the
29 * Common Development and Distribution License ("CDDL"), version 1.0.
30 * You may only use this file in accordance with the terms of version
31 * 1.0 of the CDDL.
32 *
33 * A full copy of the text of the CDDL should have accompanied this
34 * source.  A copy of the CDDL is also available via the Internet at
35 * http://www.illumos.org/license/CDDL.
36 *
37 * Copyright 2013 Pluribus Networks Inc.
38 * Copyright 2018 Joyent, Inc.
39 * Copyright 2022 MNX Cloud, Inc.
40 */
41
42#include <sys/asm_linkage.h>
43#include <sys/segments.h>
44
45/* Porting note: This is named 'vmx_support.S' upstream. */
46
47#include "vmx_assym.h"
48#include "vmcs.h"
49
50/*
51 * Assumes that %rdi holds a pointer to the 'vmxctx'.
52 *
53 * On "return" all registers are updated to reflect guest state. The two
54 * exceptions are %rip and %rsp. These registers are atomically switched
55 * by hardware from the guest area of the vmcs.
56 *
57 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
58 * host context in case of an error with 'vmlaunch' or 'vmresume'.
59 */
60/* BEGIN CSTYLED */
61#define	VMX_GUEST_RESTORE						\
62	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
63	movq	%rsi,%cr2;						\
64	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
65	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
66	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
67	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
68	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
69	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
70	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
71	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
72	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
73	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
74	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
75	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
76	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
77	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
78	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
79
80#define	VMX_GUEST_SAVE							\
81	movq	%rdi, VMXSTK_TMPRDI(%rsp);				\
82	movq	VMXSTK_RDI(%rsp), %rdi;					\
83	movq	%rbp, VMXCTX_GUEST_RBP(%rdi);				\
84	leaq	VMXSTK_FP(%rsp), %rbp;					\
85	movq	%rsi, VMXCTX_GUEST_RSI(%rdi);				\
86	movq	%rdx, VMXCTX_GUEST_RDX(%rdi);				\
87	movq	%rcx, VMXCTX_GUEST_RCX(%rdi);				\
88	movq	%r8, VMXCTX_GUEST_R8(%rdi);				\
89	movq	%r9, VMXCTX_GUEST_R9(%rdi);				\
90	movq	%rax, VMXCTX_GUEST_RAX(%rdi);				\
91	movq	%rbx, VMXCTX_GUEST_RBX(%rdi);				\
92	movq	%r10, VMXCTX_GUEST_R10(%rdi);				\
93	movq	%r11, VMXCTX_GUEST_R11(%rdi);				\
94	movq	%r12, VMXCTX_GUEST_R12(%rdi);				\
95	movq	%r13, VMXCTX_GUEST_R13(%rdi);				\
96	movq	%r14, VMXCTX_GUEST_R14(%rdi);				\
97	movq	%r15, VMXCTX_GUEST_R15(%rdi);				\
98	movq	%cr2, %rbx;						\
99	movq	%rbx, VMXCTX_GUEST_CR2(%rdi);				\
100	movq	VMXSTK_TMPRDI(%rsp), %rdx;				\
101	movq	%rdx, VMXCTX_GUEST_RDI(%rdi);
102/* END CSTYLED */
103
104
105/*
106 * Flush scratch registers to avoid lingering guest state being used for
107 * Spectre v1 attacks when returning from guest entry.
108 */
109#define	VMX_GUEST_FLUSH_SCRATCH						\
110	xorl	%edi, %edi;						\
111	xorl	%esi, %esi;						\
112	xorl	%edx, %edx;						\
113	xorl	%ecx, %ecx;						\
114	xorl	%r8d, %r8d;						\
115	xorl	%r9d, %r9d;						\
116	xorl	%r10d, %r10d;						\
117	xorl	%r11d, %r11d;
118
119
120/* Stack layout (offset from %rsp) for vmx_enter_guest */
121#define	VMXSTK_TMPRDI	0x00	/* temp store %rdi on vmexit		*/
122#define	VMXSTK_R15	0x08	/* callee saved %r15			*/
123#define	VMXSTK_R14	0x10	/* callee saved %r14			*/
124#define	VMXSTK_R13	0x18	/* callee saved %r13			*/
125#define	VMXSTK_R12	0x20	/* callee saved %r12			*/
126#define	VMXSTK_RBX	0x28	/* callee saved %rbx			*/
127#define	VMXSTK_RDX	0x30	/* save-args %rdx (int launched)	*/
128#define	VMXSTK_RSI	0x38	/* save-args %rsi (struct vmx *vmx)	*/
129#define	VMXSTK_RDI	0x40	/* save-args %rdi (struct vmxctx *ctx)	*/
130#define	VMXSTK_FP	0x48	/* frame pointer %rbp			*/
131#define	VMXSTKSIZE	VMXSTK_FP
132
133/*
134 * vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched)
135 * Interrupts must be disabled on entry.
136 */
137ENTRY_NP(vmx_enter_guest)
138	pushq	%rbp
139	movq	%rsp, %rbp
140	subq	$VMXSTKSIZE, %rsp
141	movq	%r15, VMXSTK_R15(%rsp)
142	movq	%r14, VMXSTK_R14(%rsp)
143	movq	%r13, VMXSTK_R13(%rsp)
144	movq	%r12, VMXSTK_R12(%rsp)
145	movq	%rbx, VMXSTK_RBX(%rsp)
146	movq	%rdx, VMXSTK_RDX(%rsp)
147	movq	%rsi, VMXSTK_RSI(%rsp)
148	movq	%rdi, VMXSTK_RDI(%rsp)
149
150	movq	%rdi, %r12	/* vmxctx */
151	movq	%rsi, %r13	/* vmx */
152	movl	%edx, %r14d	/* launch state */
153
154	/* Write the current %rsp into the VMCS to be restored on vmexit */
155	movl	$VMCS_HOST_RSP, %eax
156	vmwrite	%rsp, %rax
157	jbe	vmwrite_error
158
159	/* Check if vmresume is adequate or a full vmlaunch is required */
160	cmpl	$0, %r14d
161	je	do_launch
162
163	VMX_GUEST_RESTORE
164	vmresume
165	/*
166	 * In the common case, 'vmresume' returns back to the host through
167	 * 'vmx_exit_guest'. If there is an error we return VMX_VMRESUME_ERROR
168	 * to the caller.
169	 */
170	leaq	VMXSTK_FP(%rsp), %rbp
171	movq	VMXSTK_RDI(%rsp), %rdi
172	movl	$VMX_VMRESUME_ERROR, %eax
173	jmp	decode_inst_error
174
175do_launch:
176	VMX_GUEST_RESTORE
177	vmlaunch
178	/*
179	 * In the common case, 'vmlaunch' returns back to the host through
180	 * 'vmx_exit_guest'. If there is an error we return VMX_VMLAUNCH_ERROR
181	 * to the caller.
182	 */
183	leaq	VMXSTK_FP(%rsp), %rbp
184	movq	VMXSTK_RDI(%rsp), %rdi
185	movl	$VMX_VMLAUNCH_ERROR, %eax
186	jmp	decode_inst_error
187
188vmwrite_error:
189	movl	$VMX_VMWRITE_ERROR, %eax
190	jmp	decode_inst_error
191decode_inst_error:
192	movl	$VM_FAIL_VALID, %r11d
193	jz	inst_error
194	movl	$VM_FAIL_INVALID, %r11d
195inst_error:
196	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
197
198	movq	VMXSTK_RBX(%rsp), %rbx
199	movq	VMXSTK_R12(%rsp), %r12
200	movq	VMXSTK_R13(%rsp), %r13
201	movq	VMXSTK_R14(%rsp), %r14
202	movq	VMXSTK_R15(%rsp), %r15
203
204	VMX_GUEST_FLUSH_SCRATCH
205
206	addq	$VMXSTKSIZE, %rsp
207	popq	%rbp
208	ret
209
210/*
211 * Non-error VM-exit from the guest. Make this a label so it can
212 * be used by C code when setting up the VMCS.
213 * The VMCS-restored %rsp points to the struct vmxctx
214 */
215.align	ASM_ENTRY_ALIGN;
216ENTRY_NP(vmx_exit_guest)
217	/* Save guest state that is not automatically saved in the vmcs. */
218	VMX_GUEST_SAVE
219
220	VMX_GUEST_FLUSH_SCRATCH
221
222	/*
223	 * To prevent malicious branch target predictions from affecting the
224	 * host, overwrite all entries in the RSB upon exiting a guest.
225	 *
226	 * NOTE: If RSB mitigations are disabled (see cpuid.c), this call is
227	 * entirely a NOP.
228	 */
229	call	x86_rsb_stuff
230
231	/*
232	 * This will return to the caller of 'vmx_enter_guest()' with a return
233	 * value of VMX_GUEST_VMEXIT.
234	 */
235	movl	$VMX_GUEST_VMEXIT, %eax
236	movq	VMXSTK_RBX(%rsp), %rbx
237	movq	VMXSTK_R12(%rsp), %r12
238	movq	VMXSTK_R13(%rsp), %r13
239	movq	VMXSTK_R14(%rsp), %r14
240	movq	VMXSTK_R15(%rsp), %r15
241
242	addq	$VMXSTKSIZE, %rsp
243	popq	%rbp
244	ret
245SET_SIZE(vmx_exit_guest)
246
247/*
248 * %rdi = trapno
249 *
250 * We need to do enough to convince cmnint - and its iretting tail - that we're
251 * a legit interrupt stack frame.
252 */
253ENTRY_NP(vmx_call_isr)
254	pushq	%rbp
255	movq	%rsp, %rbp
256	movq	%rsp, %r11
257	andq	$~0xf, %rsp	/* align stack */
258	pushq	$KDS_SEL	/* %ss */
259	pushq	%r11		/* %rsp */
260	pushfq			/* %rflags */
261	pushq	$KCS_SEL	/* %cs */
262	leaq	.iret_dest(%rip), %rcx
263	pushq	%rcx		/* %rip */
264	pushq	$0		/* err */
265	pushq	%rdi		/* trapno */
266	cli
267	jmp	cmnint		/* %rip (and call) */
268.iret_dest:
269	popq	%rbp
270	ret
271SET_SIZE(vmx_call_isr)
272