1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 *
38 * Copyright 2020 Oxide Computer Company
39 */
40
41 /*
42 * Memory ranges are represented with an RB tree. On insertion, the range
43 * is checked for overlaps. On lookup, the key has the same base and limit
44 * so it can be searched within the range.
45 */
46
47 #include <sys/cdefs.h>
48
49 #include <sys/types.h>
50 #include <sys/errno.h>
51 #include <sys/tree.h>
52 #include <machine/vmm.h>
53
54 #include <assert.h>
55 #include <err.h>
56 #include <pthread.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <vmmapi.h>
60
61 #include "mem.h"
62
63 struct mmio_rb_range {
64 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */
65 struct mem_range mr_param;
66 uint64_t mr_base;
67 uint64_t mr_end;
68 };
69
70 struct mmio_rb_tree;
71 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
72
73 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
74
75 /*
76 * Per-vCPU cache. Since most accesses from a vCPU will be to
77 * consecutive addresses in a range, it makes sense to cache the
78 * result of a lookup.
79 */
80 static struct mmio_rb_range **mmio_hint;
81 static int mmio_ncpu;
82
83 static pthread_rwlock_t mmio_rwlock;
84
85 static int
mmio_rb_range_compare(struct mmio_rb_range * a,struct mmio_rb_range * b)86 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
87 {
88 if (a->mr_end < b->mr_base)
89 return (-1);
90 else if (a->mr_base > b->mr_end)
91 return (1);
92 return (0);
93 }
94
95 static int
mmio_rb_lookup(struct mmio_rb_tree * rbt,uint64_t addr,struct mmio_rb_range ** entry)96 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
97 struct mmio_rb_range **entry)
98 {
99 struct mmio_rb_range find, *res;
100
101 find.mr_base = find.mr_end = addr;
102
103 res = RB_FIND(mmio_rb_tree, rbt, &find);
104
105 if (res != NULL) {
106 *entry = res;
107 return (0);
108 }
109
110 return (ENOENT);
111 }
112
113 static int
mmio_rb_add(struct mmio_rb_tree * rbt,struct mmio_rb_range * new)114 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
115 {
116 struct mmio_rb_range *overlap;
117
118 overlap = RB_INSERT(mmio_rb_tree, rbt, new);
119
120 if (overlap != NULL) {
121 #ifdef RB_DEBUG
122 printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
123 "claims region already claimed for '%s'\n",
124 new->mr_base, new->mr_end,
125 overlap->mr_base, overlap->mr_end,
126 new->mr_param.name, overlap->mr_param.name);
127 #endif
128
129 return (EEXIST);
130 }
131
132 return (0);
133 }
134
135 #if 0
136 static void
137 mmio_rb_dump(struct mmio_rb_tree *rbt)
138 {
139 int perror;
140 struct mmio_rb_range *np;
141
142 pthread_rwlock_rdlock(&mmio_rwlock);
143 RB_FOREACH(np, mmio_rb_tree, rbt) {
144 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
145 np->mr_param.name);
146 }
147 perror = pthread_rwlock_unlock(&mmio_rwlock);
148 assert(perror == 0);
149 }
150 #endif
151
152 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
153
154 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
155 void *arg);
156
157 static int
mem_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)158 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
159 {
160 int error;
161 struct mem_range *mr = arg;
162
163 error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
164 mr->arg2);
165 return (error);
166 }
167
168 static int
mem_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)169 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
170 {
171 int error;
172 struct mem_range *mr = arg;
173
174 error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
175 mr->arg2);
176 return (error);
177 }
178
179 static int
access_memory(struct vcpu * vcpu,uint64_t paddr,mem_cb_t * cb,void * arg)180 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
181 {
182 struct mmio_rb_range *entry;
183 int err, perror, immutable, vcpuid;
184
185 vcpuid = vcpu_id(vcpu);
186 pthread_rwlock_rdlock(&mmio_rwlock);
187 /*
188 * First check the per-vCPU cache
189 */
190 if (mmio_hint[vcpuid] &&
191 paddr >= mmio_hint[vcpuid]->mr_base &&
192 paddr <= mmio_hint[vcpuid]->mr_end) {
193 entry = mmio_hint[vcpuid];
194 } else
195 entry = NULL;
196
197 if (entry == NULL) {
198 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
199 /* Update the per-vCPU cache */
200 mmio_hint[vcpuid] = entry;
201 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
202 perror = pthread_rwlock_unlock(&mmio_rwlock);
203 assert(perror == 0);
204 return (ESRCH);
205 }
206 }
207
208 assert(entry != NULL);
209
210 /*
211 * An 'immutable' memory range is guaranteed to be never removed
212 * so there is no need to hold 'mmio_rwlock' while calling the
213 * handler.
214 *
215 * XXX writes to the PCIR_COMMAND register can cause register_mem()
216 * to be called. If the guest is using PCI extended config space
217 * to modify the PCIR_COMMAND register then register_mem() can
218 * deadlock on 'mmio_rwlock'. However by registering the extended
219 * config space window as 'immutable' the deadlock can be avoided.
220 */
221 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
222 if (immutable) {
223 perror = pthread_rwlock_unlock(&mmio_rwlock);
224 assert(perror == 0);
225 }
226
227 err = cb(vcpu, paddr, &entry->mr_param, arg);
228
229 if (!immutable) {
230 perror = pthread_rwlock_unlock(&mmio_rwlock);
231 assert(perror == 0);
232 }
233
234 return (err);
235 }
236
237 static int
emulate_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)238 emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
239 void *arg)
240 {
241 struct vm_mmio *mmio;
242 int err = 0;
243
244 mmio = arg;
245
246 if (mmio->read != 0) {
247 err = mem_read(vcpu, paddr, &mmio->data, mmio->bytes, mr);
248 } else {
249 err = mem_write(vcpu, paddr, mmio->data, mmio->bytes, mr);
250 }
251
252 return (err);
253 }
254
255 int
emulate_mem(struct vcpu * vcpu,struct vm_mmio * mmio)256 emulate_mem(struct vcpu *vcpu, struct vm_mmio *mmio)
257 {
258 return (access_memory(vcpu, mmio->gpa, emulate_mem_cb, mmio));
259 }
260
261 struct rw_mem_args {
262 uint64_t *val;
263 int size;
264 int operation;
265 };
266
267 static int
rw_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)268 rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
269 void *arg)
270 {
271 struct rw_mem_args *rma;
272
273 rma = arg;
274 return (mr->handler(vcpu, rma->operation, paddr, rma->size,
275 rma->val, mr->arg1, mr->arg2));
276 }
277
278 int
read_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size)279 read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
280 {
281 struct rw_mem_args rma;
282
283 rma.val = rval;
284 rma.size = size;
285 rma.operation = MEM_F_READ;
286 return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
287 }
288
289 int
write_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size)290 write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
291 {
292 struct rw_mem_args rma;
293
294 rma.val = &wval;
295 rma.size = size;
296 rma.operation = MEM_F_WRITE;
297 return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
298 }
299
300 static int
register_mem_int(struct mmio_rb_tree * rbt,struct mem_range * memp)301 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
302 {
303 struct mmio_rb_range *entry, *mrp;
304 int err, perror;
305
306 err = 0;
307
308 mrp = malloc(sizeof(struct mmio_rb_range));
309 if (mrp == NULL) {
310 warn("%s: couldn't allocate memory for mrp\n",
311 __func__);
312 err = ENOMEM;
313 } else {
314 mrp->mr_param = *memp;
315 mrp->mr_base = memp->base;
316 mrp->mr_end = memp->base + memp->size - 1;
317 pthread_rwlock_wrlock(&mmio_rwlock);
318 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
319 err = mmio_rb_add(rbt, mrp);
320 #ifndef __FreeBSD__
321 else /* smatch warn: possible memory leak of 'mrp' */
322 free(mrp);
323 #endif
324 perror = pthread_rwlock_unlock(&mmio_rwlock);
325 assert(perror == 0);
326 if (err)
327 free(mrp);
328 }
329
330 return (err);
331 }
332
333 int
register_mem(struct mem_range * memp)334 register_mem(struct mem_range *memp)
335 {
336
337 return (register_mem_int(&mmio_rb_root, memp));
338 }
339
340 int
register_mem_fallback(struct mem_range * memp)341 register_mem_fallback(struct mem_range *memp)
342 {
343
344 return (register_mem_int(&mmio_rb_fallback, memp));
345 }
346
347 int
unregister_mem(struct mem_range * memp)348 unregister_mem(struct mem_range *memp)
349 {
350 struct mem_range *mr;
351 struct mmio_rb_range *entry = NULL;
352 int err, perror, i;
353
354 pthread_rwlock_wrlock(&mmio_rwlock);
355 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
356 if (err == 0) {
357 mr = &entry->mr_param;
358 assert(mr->name == memp->name);
359 assert(mr->base == memp->base && mr->size == memp->size);
360 assert((mr->flags & MEM_F_IMMUTABLE) == 0);
361 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
362
363 /* flush Per-vCPU cache */
364 for (i = 0; i < mmio_ncpu; i++) {
365 if (mmio_hint[i] == entry)
366 mmio_hint[i] = NULL;
367 }
368 }
369 perror = pthread_rwlock_unlock(&mmio_rwlock);
370 assert(perror == 0);
371
372 if (entry)
373 free(entry);
374
375 return (err);
376 }
377
378 void
init_mem(int ncpu)379 init_mem(int ncpu)
380 {
381
382 mmio_ncpu = ncpu;
383 mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
384 RB_INIT(&mmio_rb_root);
385 RB_INIT(&mmio_rb_fallback);
386 pthread_rwlock_init(&mmio_rwlock, NULL);
387 }
388