xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm_msr.c (revision 4bd36be4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 /*
29  * This file and its contents are supplied under the terms of the
30  * Common Development and Distribution License ("CDDL"), version 1.0.
31  * You may only use this file in accordance with the terms of version
32  * 1.0 of the CDDL.
33  *
34  * A full copy of the text of the CDDL should have accompanied this
35  * source.  A copy of the CDDL is also available via the Internet at
36  * http://www.illumos.org/license/CDDL.
37  *
38  * Copyright 2020 Oxide Computer Company
39  */
40 
41 #include <sys/cdefs.h>
42 
43 #include <sys/param.h>
44 #include <sys/errno.h>
45 #include <sys/systm.h>
46 #include <sys/x86_archext.h>
47 #include <sys/privregs.h>
48 
49 #include <machine/cpufunc.h>
50 #include <machine/specialreg.h>
51 #include <machine/vmm.h>
52 #include <sys/vmm_kernel.h>
53 
54 #include "svm.h"
55 #include "vmcb.h"
56 #include "svm_softc.h"
57 #include "svm_msr.h"
58 
59 #ifndef MSR_AMDK8_IPM
60 #define	MSR_AMDK8_IPM	0xc0010055
61 #endif
62 
63 enum {
64 	IDX_MSR_LSTAR,
65 	IDX_MSR_CSTAR,
66 	IDX_MSR_STAR,
67 	IDX_MSR_SF_MASK,
68 	HOST_MSR_NUM		/* must be the last enumeration */
69 };
70 CTASSERT(HOST_MSR_NUM == SVM_HOST_MSR_NUM);
71 
72 void
svm_msr_guest_init(struct svm_softc * sc,int vcpu)73 svm_msr_guest_init(struct svm_softc *sc, int vcpu)
74 {
75 	/*
76 	 * All the MSRs accessible to the guest are either saved/restored by
77 	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
78 	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
79 	 *
80 	 * There are no guest MSRs that are saved/restored "by hand" so nothing
81 	 * more to do here.
82 	 */
83 }
84 
85 void
svm_msr_guest_enter(struct svm_softc * sc,int vcpu)86 svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
87 {
88 	uint64_t *host_msrs = sc->host_msrs[vcpu];
89 
90 	/*
91 	 * Save host MSRs (if any) and restore guest MSRs (if any).
92 	 */
93 	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
94 	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
95 	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
96 	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
97 
98 	/*
99 	 * Set the frequency multiplier MSR to enable guest TSC scaling if
100 	 * needed.
101 	 */
102 	uint64_t mult = vm_get_freq_multiplier(sc->vm);
103 	if (mult != VM_TSCM_NOSCALE) {
104 		wrmsr(MSR_AMD_TSC_RATIO, mult);
105 	}
106 }
107 
108 void
svm_msr_guest_exit(struct svm_softc * sc,int vcpu)109 svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
110 {
111 	uint64_t *host_msrs = sc->host_msrs[vcpu];
112 
113 	/*
114 	 * Save guest MSRs (if any) and restore host MSRs.
115 	 */
116 	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
117 	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
118 	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
119 	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
120 
121 	/* Reset frequency multiplier MSR if any scaling is configured */
122 	if (vm_get_freq_multiplier(sc->vm) != VM_TSCM_NOSCALE) {
123 		wrmsr(MSR_AMD_TSC_RATIO, AMD_TSCM_RESET_VAL);
124 	}
125 
126 	/* MSR_KGSBASE will be restored on the way back to userspace */
127 }
128 
129 vm_msr_result_t
svm_rdmsr(struct svm_softc * sc,int vcpu,uint32_t num,uint64_t * result)130 svm_rdmsr(struct svm_softc *sc, int vcpu, uint32_t num, uint64_t *result)
131 {
132 	switch (num) {
133 	case MSR_SYSCFG:
134 	case MSR_AMDK8_IPM:
135 	case MSR_EXTFEATURES:
136 		*result = 0;
137 		break;
138 	case MSR_AMD_DE_CFG:
139 		*result = 0;
140 		/*
141 		 * Bit 1 of DE_CFG is defined by AMD to control whether the
142 		 * lfence instruction is serializing.  Practically all CPUs
143 		 * supported by bhyve also contain this MSR, making it safe to
144 		 * expose unconditionally.
145 		 */
146 		if (is_x86_feature(x86_featureset, X86FSET_LFENCE_SER)) {
147 			*result |= AMD_DE_CFG_LFENCE_DISPATCH;
148 		}
149 		break;
150 	default:
151 		return (VMR_UNHANLDED);
152 	}
153 	return (VMR_OK);
154 }
155 
156 vm_msr_result_t
svm_wrmsr(struct svm_softc * sc,int vcpu,uint32_t num,uint64_t val)157 svm_wrmsr(struct svm_softc *sc, int vcpu, uint32_t num, uint64_t val)
158 {
159 	switch (num) {
160 	case MSR_SYSCFG:
161 		/* Ignore writes */
162 		break;
163 	case MSR_AMD_DE_CFG:
164 		/* Ignore writes */
165 		break;
166 	case MSR_AMDK8_IPM:
167 		/*
168 		 * Ignore writes to the "Interrupt Pending Message" MSR.
169 		 */
170 		break;
171 	case MSR_K8_UCODE_UPDATE:
172 		/*
173 		 * Ignore writes to microcode update register.
174 		 */
175 		break;
176 	case MSR_EXTFEATURES:
177 		break;
178 	default:
179 		return (VMR_UNHANLDED);
180 	}
181 
182 	return (VMR_OK);
183 }
184