1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*
29  * This file and its contents are supplied under the terms of the
30  * Common Development and Distribution License ("CDDL"), version 1.0.
31  * You may only use this file in accordance with the terms of version
32  * 1.0 of the CDDL.
33  *
34  * A full copy of the text of the CDDL should have accompanied this
35  * source.  A copy of the CDDL is also available via the Internet at
36  * http://www.illumos.org/license/CDDL.
37  *
38  * Copyright 2015 Pluribus Networks Inc.
39  * Copyright 2019 Joyent, Inc.
40  * Copyright 2022 Oxide Computer Company
41  */
42 
43 #include <sys/cdefs.h>
44 
45 #include <sys/param.h>
46 #include <sys/sysctl.h>
47 #include <sys/ioctl.h>
48 #include <sys/mman.h>
49 #include <sys/module.h>
50 #include <sys/_iovec.h>
51 #include <sys/cpuset.h>
52 
53 #include <errno.h>
54 #include <stdbool.h>
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <assert.h>
58 #include <string.h>
59 #include <fcntl.h>
60 #include <unistd.h>
61 
62 #include <libutil.h>
63 
64 #include <machine/vmm.h>
65 #include <machine/vmm_dev.h>
66 
67 #include "vmmapi.h"
68 #include "internal.h"
69 
70 #define	MB	(1024 * 1024UL)
71 #define	GB	(1024 * 1024 * 1024UL)
72 
73 #ifndef __FreeBSD__
74 /* shim to no-op for now */
75 #define	MAP_NOCORE		0
76 #define	MAP_ALIGNED_SUPER	0
77 
78 /* Rely on PROT_NONE for guard purposes */
79 #define	MAP_GUARD		(MAP_PRIVATE | MAP_ANON | MAP_NORESERVE)
80 
81 #define	_Thread_local		__thread
82 #endif
83 
84 /*
85  * Size of the guard region before and after the virtual address space
86  * mapping the guest physical memory. This must be a multiple of the
87  * superpage size for performance reasons.
88  */
89 #define	VM_MMAP_GUARD_SIZE	(4 * MB)
90 
91 #define	PROT_RW		(PROT_READ | PROT_WRITE)
92 #define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
93 
94 struct vmctx {
95 	int	fd;
96 	uint32_t lowmem_limit;
97 	int	memflags;
98 	size_t	lowmem;
99 	size_t	highmem;
100 	char	*baseaddr;
101 	char	*name;
102 };
103 
104 #ifdef	__FreeBSD__
105 #define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
106 #define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
107 #endif
108 
109 static int
vm_device_open(const char * name)110 vm_device_open(const char *name)
111 {
112 	int fd, len;
113 	char *vmfile;
114 
115 	len = strlen("/dev/vmm/") + strlen(name) + 1;
116 	vmfile = malloc(len);
117 	assert(vmfile != NULL);
118 	snprintf(vmfile, len, "/dev/vmm/%s", name);
119 
120 	/* Open the device file */
121 	fd = open(vmfile, O_RDWR, 0);
122 
123 	free(vmfile);
124 	return (fd);
125 }
126 
127 #ifdef	__FreeBSD__
128 int
vm_create(const char * name)129 vm_create(const char *name)
130 {
131 	/* Try to load vmm(4) module before creating a guest. */
132 	if (modfind("vmm") < 0)
133 		kldload("vmm");
134 	return (CREATE(name));
135 }
136 #else
137 static int
vm_do_ctl(int cmd,void * req)138 vm_do_ctl(int cmd, void *req)
139 {
140 	int ctl_fd;
141 
142 	ctl_fd = open(VMM_CTL_DEV, O_EXCL | O_RDWR);
143 	if (ctl_fd < 0) {
144 		return (-1);
145 	}
146 
147 	if (ioctl(ctl_fd, cmd, req) == -1) {
148 		int err = errno;
149 
150 		/* Do not lose ioctl errno through the close(2) */
151 		(void) close(ctl_fd);
152 		errno = err;
153 		return (-1);
154 	}
155 	(void) close(ctl_fd);
156 
157 	return (0);
158 }
159 
160 int
vm_create(const char * name,uint64_t flags)161 vm_create(const char *name, uint64_t flags)
162 {
163 	struct vm_create_req req;
164 
165 	(void) strncpy(req.name, name, VM_MAX_NAMELEN);
166 	req.flags = flags;
167 
168 	return (vm_do_ctl(VMM_CREATE_VM, &req));
169 }
170 #endif
171 
172 struct vmctx *
vm_open(const char * name)173 vm_open(const char *name)
174 {
175 	struct vmctx *vm;
176 	int saved_errno;
177 
178 	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
179 	assert(vm != NULL);
180 
181 	vm->fd = -1;
182 	vm->memflags = 0;
183 	vm->lowmem_limit = 3 * GB;
184 	vm->name = (char *)(vm + 1);
185 	strcpy(vm->name, name);
186 
187 	if ((vm->fd = vm_device_open(vm->name)) < 0)
188 		goto err;
189 
190 	return (vm);
191 err:
192 	saved_errno = errno;
193 	free(vm);
194 	errno = saved_errno;
195 	return (NULL);
196 }
197 
198 #ifdef	__FreeBSD__
199 void
vm_close(struct vmctx * vm)200 vm_close(struct vmctx *vm)
201 {
202 	assert(vm != NULL);
203 
204 	close(vm->fd);
205 	free(vm);
206 }
207 
208 void
vm_destroy(struct vmctx * vm)209 vm_destroy(struct vmctx *vm)
210 {
211 	assert(vm != NULL);
212 
213 	if (vm->fd >= 0)
214 		close(vm->fd);
215 	DESTROY(vm->name);
216 
217 	free(vm);
218 }
219 #else
220 void
vm_close(struct vmctx * vm)221 vm_close(struct vmctx *vm)
222 {
223 	assert(vm != NULL);
224 	assert(vm->fd >= 0);
225 
226 	(void) close(vm->fd);
227 
228 	free(vm);
229 }
230 
231 void
vm_destroy(struct vmctx * vm)232 vm_destroy(struct vmctx *vm)
233 {
234 	assert(vm != NULL);
235 
236 	if (vm->fd >= 0) {
237 		(void) ioctl(vm->fd, VM_DESTROY_SELF, 0);
238 		(void) close(vm->fd);
239 		vm->fd = -1;
240 	}
241 
242 	free(vm);
243 }
244 #endif
245 
246 struct vcpu *
vm_vcpu_open(struct vmctx * ctx,int vcpuid)247 vm_vcpu_open(struct vmctx *ctx, int vcpuid)
248 {
249 	struct vcpu *vcpu;
250 
251 	vcpu = malloc(sizeof(*vcpu));
252 #ifndef	__FreeBSD__
253 	if (vcpu == NULL)
254 		return (vcpu);
255 #endif
256 	vcpu->ctx = ctx;
257 	vcpu->vcpuid = vcpuid;
258 	return (vcpu);
259 }
260 
261 void
vm_vcpu_close(struct vcpu * vcpu)262 vm_vcpu_close(struct vcpu *vcpu)
263 {
264 	free(vcpu);
265 }
266 
267 int
vcpu_id(struct vcpu * vcpu)268 vcpu_id(struct vcpu *vcpu)
269 {
270 	return (vcpu->vcpuid);
271 }
272 
273 struct vmctx *
vcpu_ctx(struct vcpu * vcpu)274 vcpu_ctx(struct vcpu *vcpu)
275 {
276 	return (vcpu->ctx);
277 }
278 
279 int
vm_parse_memsize(const char * opt,size_t * ret_memsize)280 vm_parse_memsize(const char *opt, size_t *ret_memsize)
281 {
282 	char *endptr;
283 	size_t optval;
284 	int error;
285 
286 	optval = strtoul(opt, &endptr, 0);
287 	if (*opt != '\0' && *endptr == '\0') {
288 		/*
289 		 * For the sake of backward compatibility if the memory size
290 		 * specified on the command line is less than a megabyte then
291 		 * it is interpreted as being in units of MB.
292 		 */
293 		if (optval < MB)
294 			optval *= MB;
295 		*ret_memsize = optval;
296 		error = 0;
297 	} else
298 		error = expand_number(opt, ret_memsize);
299 
300 	return (error);
301 }
302 
303 uint32_t
vm_get_lowmem_limit(struct vmctx * ctx)304 vm_get_lowmem_limit(struct vmctx *ctx)
305 {
306 
307 	return (ctx->lowmem_limit);
308 }
309 
310 void
vm_set_lowmem_limit(struct vmctx * ctx,uint32_t limit)311 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
312 {
313 
314 	ctx->lowmem_limit = limit;
315 }
316 
317 void
vm_set_memflags(struct vmctx * ctx,int flags)318 vm_set_memflags(struct vmctx *ctx, int flags)
319 {
320 
321 	ctx->memflags = flags;
322 }
323 
324 int
vm_get_memflags(struct vmctx * ctx)325 vm_get_memflags(struct vmctx *ctx)
326 {
327 
328 	return (ctx->memflags);
329 }
330 
331 /*
332  * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
333  */
334 int
vm_mmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,int segid,vm_ooffset_t off,size_t len,int prot)335 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
336     size_t len, int prot)
337 {
338 	struct vm_memmap memmap;
339 	int error, flags;
340 
341 	memmap.gpa = gpa;
342 	memmap.segid = segid;
343 	memmap.segoff = off;
344 	memmap.len = len;
345 	memmap.prot = prot;
346 	memmap.flags = 0;
347 
348 	if (ctx->memflags & VM_MEM_F_WIRED)
349 		memmap.flags |= VM_MEMMAP_F_WIRED;
350 
351 	/*
352 	 * If this mapping already exists then don't create it again. This
353 	 * is the common case for SYSMEM mappings created by bhyveload(8).
354 	 */
355 	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
356 	if (error == 0 && gpa == memmap.gpa) {
357 		if (segid != memmap.segid || off != memmap.segoff ||
358 		    prot != memmap.prot || flags != memmap.flags) {
359 			errno = EEXIST;
360 			return (-1);
361 		} else {
362 			return (0);
363 		}
364 	}
365 
366 	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
367 	return (error);
368 }
369 
370 #ifdef	__FreeBSD__
371 int
vm_get_guestmem_from_ctx(struct vmctx * ctx,char ** guest_baseaddr,size_t * lowmem_size,size_t * highmem_size)372 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
373     size_t *lowmem_size, size_t *highmem_size)
374 {
375 
376 	*guest_baseaddr = ctx->baseaddr;
377 	*lowmem_size = ctx->lowmem;
378 	*highmem_size = ctx->highmem;
379 	return (0);
380 }
381 #endif
382 
383 int
vm_munmap_memseg(struct vmctx * ctx,vm_paddr_t gpa,size_t len)384 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
385 {
386 	struct vm_munmap munmap;
387 	int error;
388 
389 	munmap.gpa = gpa;
390 	munmap.len = len;
391 
392 	error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
393 	return (error);
394 }
395 
396 int
vm_mmap_getnext(struct vmctx * ctx,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)397 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
398     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
399 {
400 	struct vm_memmap memmap;
401 	int error;
402 
403 	bzero(&memmap, sizeof(struct vm_memmap));
404 	memmap.gpa = *gpa;
405 	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
406 	if (error == 0) {
407 		*gpa = memmap.gpa;
408 		*segid = memmap.segid;
409 		*segoff = memmap.segoff;
410 		*len = memmap.len;
411 		*prot = memmap.prot;
412 		*flags = memmap.flags;
413 	}
414 	return (error);
415 }
416 
417 /*
418  * Return 0 if the segments are identical and non-zero otherwise.
419  *
420  * This is slightly complicated by the fact that only device memory segments
421  * are named.
422  */
423 static int
cmpseg(size_t len,const char * str,size_t len2,const char * str2)424 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
425 {
426 
427 	if (len == len2) {
428 		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
429 			return (0);
430 	}
431 	return (-1);
432 }
433 
434 static int
vm_alloc_memseg(struct vmctx * ctx,int segid,size_t len,const char * name)435 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
436 {
437 	struct vm_memseg memseg;
438 	size_t n;
439 	int error;
440 
441 	/*
442 	 * If the memory segment has already been created then just return.
443 	 * This is the usual case for the SYSMEM segment created by userspace
444 	 * loaders like bhyveload(8).
445 	 */
446 	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
447 	    sizeof(memseg.name));
448 	if (error)
449 		return (error);
450 
451 	if (memseg.len != 0) {
452 		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
453 			errno = EINVAL;
454 			return (-1);
455 		} else {
456 			return (0);
457 		}
458 	}
459 
460 	bzero(&memseg, sizeof(struct vm_memseg));
461 	memseg.segid = segid;
462 	memseg.len = len;
463 	if (name != NULL) {
464 		n = strlcpy(memseg.name, name, sizeof(memseg.name));
465 		if (n >= sizeof(memseg.name)) {
466 			errno = ENAMETOOLONG;
467 			return (-1);
468 		}
469 	}
470 
471 	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
472 	return (error);
473 }
474 
475 int
vm_get_memseg(struct vmctx * ctx,int segid,size_t * lenp,char * namebuf,size_t bufsize)476 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
477     size_t bufsize)
478 {
479 	struct vm_memseg memseg;
480 	size_t n;
481 	int error;
482 
483 	memseg.segid = segid;
484 	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
485 	if (error == 0) {
486 		*lenp = memseg.len;
487 		n = strlcpy(namebuf, memseg.name, bufsize);
488 		if (n >= bufsize) {
489 			errno = ENAMETOOLONG;
490 			error = -1;
491 		}
492 	}
493 	return (error);
494 }
495 
496 static int
497 #ifdef __FreeBSD__
setup_memory_segment(struct vmctx * ctx,vm_paddr_t gpa,size_t len,char * base)498 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
499 #else
500 setup_memory_segment(struct vmctx *ctx, int segid, vm_paddr_t gpa, size_t len,
501     char *base)
502 #endif
503 {
504 	char *ptr;
505 	int error, flags;
506 
507 	/* Map 'len' bytes starting at 'gpa' in the guest address space */
508 #ifdef __FreeBSD__
509 	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
510 #else
511 	/*
512 	 * As we use two segments for lowmem/highmem the offset within the
513 	 * segment is 0 on illumos.
514 	 */
515 	error = vm_mmap_memseg(ctx, gpa, segid, 0, len, PROT_ALL);
516 #endif
517 	if (error)
518 		return (error);
519 
520 	flags = MAP_SHARED | MAP_FIXED;
521 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
522 		flags |= MAP_NOCORE;
523 
524 	/* mmap into the process address space on the host */
525 	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
526 	if (ptr == MAP_FAILED)
527 		return (-1);
528 
529 	return (0);
530 }
531 
532 int
vm_setup_memory(struct vmctx * ctx,size_t memsize,enum vm_mmap_style vms)533 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
534 {
535 	size_t objsize, len;
536 	vm_paddr_t gpa;
537 	char *baseaddr, *ptr;
538 	int error;
539 
540 	assert(vms == VM_MMAP_ALL);
541 
542 	/*
543 	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
544 	 * create another 'highmem' segment above 4GB for the remainder.
545 	 */
546 	if (memsize > ctx->lowmem_limit) {
547 		ctx->lowmem = ctx->lowmem_limit;
548 		ctx->highmem = memsize - ctx->lowmem_limit;
549 		objsize = 4*GB + ctx->highmem;
550 	} else {
551 		ctx->lowmem = memsize;
552 		ctx->highmem = 0;
553 		objsize = ctx->lowmem;
554 	}
555 
556 #ifdef __FreeBSD__
557 	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
558 	if (error)
559 		return (error);
560 #endif
561 
562 	/*
563 	 * Stake out a contiguous region covering the guest physical memory
564 	 * and the adjoining guard regions.
565 	 */
566 	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
567 	ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
568 	if (ptr == MAP_FAILED)
569 		return (-1);
570 
571 	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
572 
573 #ifdef __FreeBSD__
574 	if (ctx->highmem > 0) {
575 		gpa = 4*GB;
576 		len = ctx->highmem;
577 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
578 		if (error)
579 			return (error);
580 	}
581 
582 	if (ctx->lowmem > 0) {
583 		gpa = 0;
584 		len = ctx->lowmem;
585 		error = setup_memory_segment(ctx, gpa, len, baseaddr);
586 		if (error)
587 			return (error);
588 	}
589 #else
590 	if (ctx->highmem > 0) {
591 		error = vm_alloc_memseg(ctx, VM_HIGHMEM, ctx->highmem, NULL);
592 		if (error)
593 			return (error);
594 		gpa = 4*GB;
595 		len = ctx->highmem;
596 		error = setup_memory_segment(ctx, VM_HIGHMEM, gpa, len, baseaddr);
597 		if (error)
598 			return (error);
599 	}
600 
601 	if (ctx->lowmem > 0) {
602 		error = vm_alloc_memseg(ctx, VM_LOWMEM, ctx->lowmem, NULL);
603 		if (error)
604 			return (error);
605 		gpa = 0;
606 		len = ctx->lowmem;
607 		error = setup_memory_segment(ctx, VM_LOWMEM, gpa, len, baseaddr);
608 		if (error)
609 			return (error);
610 	}
611 #endif
612 
613 	ctx->baseaddr = baseaddr;
614 
615 	return (0);
616 }
617 
618 /*
619  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
620  * the lowmem or highmem regions.
621  *
622  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
623  * The instruction emulation code depends on this behavior.
624  */
625 void *
vm_map_gpa(struct vmctx * ctx,vm_paddr_t gaddr,size_t len)626 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
627 {
628 
629 	if (ctx->lowmem > 0) {
630 		if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
631 		    gaddr + len <= ctx->lowmem)
632 			return (ctx->baseaddr + gaddr);
633 	}
634 
635 	if (ctx->highmem > 0) {
636                 if (gaddr >= 4*GB) {
637 			if (gaddr < 4*GB + ctx->highmem &&
638 			    len <= ctx->highmem &&
639 			    gaddr + len <= 4*GB + ctx->highmem)
640 				return (ctx->baseaddr + gaddr);
641 		}
642 	}
643 
644 	return (NULL);
645 }
646 
647 #ifdef	__FreeBSD__
648 vm_paddr_t
vm_rev_map_gpa(struct vmctx * ctx,void * addr)649 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
650 {
651 	vm_paddr_t offaddr;
652 
653 	offaddr = (char *)addr - ctx->baseaddr;
654 
655 	if (ctx->lowmem > 0)
656 		if (offaddr <= ctx->lowmem)
657 			return (offaddr);
658 
659 	if (ctx->highmem > 0)
660 		if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
661 			return (offaddr);
662 
663 	return ((vm_paddr_t)-1);
664 }
665 
666 const char *
vm_get_name(struct vmctx * ctx)667 vm_get_name(struct vmctx *ctx)
668 {
669 
670 	return (ctx->name);
671 }
672 #endif /* __FreeBSD__ */
673 
674 size_t
vm_get_lowmem_size(struct vmctx * ctx)675 vm_get_lowmem_size(struct vmctx *ctx)
676 {
677 
678 	return (ctx->lowmem);
679 }
680 
681 size_t
vm_get_highmem_size(struct vmctx * ctx)682 vm_get_highmem_size(struct vmctx *ctx)
683 {
684 
685 	return (ctx->highmem);
686 }
687 
688 #ifndef __FreeBSD__
689 int
vm_get_devmem_offset(struct vmctx * ctx,int segid,off_t * mapoff)690 vm_get_devmem_offset(struct vmctx *ctx, int segid, off_t *mapoff)
691 {
692 	struct vm_devmem_offset vdo;
693 	int error;
694 
695 	vdo.segid = segid;
696 	error = ioctl(ctx->fd, VM_DEVMEM_GETOFFSET, &vdo);
697 	if (error == 0)
698 		*mapoff = vdo.offset;
699 
700 	return (error);
701 }
702 #endif
703 
704 void *
vm_create_devmem(struct vmctx * ctx,int segid,const char * name,size_t len)705 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
706 {
707 #ifdef	__FreeBSD__
708 	char pathname[MAXPATHLEN];
709 #endif
710 	size_t len2;
711 	char *base, *ptr;
712 	int fd, error, flags;
713 	off_t mapoff;
714 
715 	fd = -1;
716 	ptr = MAP_FAILED;
717 	if (name == NULL || strlen(name) == 0) {
718 		errno = EINVAL;
719 		goto done;
720 	}
721 
722 	error = vm_alloc_memseg(ctx, segid, len, name);
723 	if (error)
724 		goto done;
725 
726 #ifdef	__FreeBSD__
727 	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
728 	strlcat(pathname, ctx->name, sizeof(pathname));
729 	strlcat(pathname, ".", sizeof(pathname));
730 	strlcat(pathname, name, sizeof(pathname));
731 
732 	fd = open(pathname, O_RDWR);
733 	if (fd < 0)
734 		goto done;
735 #else
736 	if (vm_get_devmem_offset(ctx, segid, &mapoff) != 0)
737 		goto done;
738 #endif
739 
740 	/*
741 	 * Stake out a contiguous region covering the device memory and the
742 	 * adjoining guard regions.
743 	 */
744 	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
745 	base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
746 	    0);
747 	if (base == MAP_FAILED)
748 		goto done;
749 
750 	flags = MAP_SHARED | MAP_FIXED;
751 	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
752 		flags |= MAP_NOCORE;
753 
754 #ifdef	__FreeBSD__
755 	/* mmap the devmem region in the host address space */
756 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
757 #else
758 	/* mmap the devmem region in the host address space */
759 	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, ctx->fd,
760 	    mapoff);
761 #endif
762 done:
763 	if (fd >= 0)
764 		close(fd);
765 	return (ptr);
766 }
767 
768 #ifdef	__FreeBSD__
769 static int
vcpu_ioctl(struct vcpu * vcpu,u_long cmd,void * arg)770 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
771 {
772 	/*
773 	 * XXX: fragile, handle with care
774 	 * Assumes that the first field of the ioctl data
775 	 * is the vcpuid.
776 	 */
777 	*(int *)arg = vcpu->vcpuid;
778 	return (ioctl(vcpu->cfx->fd, cmd, arg));
779 }
780 #else
781 /*
782  * Rather than use the fragile function above, we continue to explicitly set
783  * the vcpu field in the command struct, and use the following function to
784  * wrap the invocations, to continue to minimise the upstream diff.
785  */
786 static int
vcpu_ioctl(struct vcpu * vcpu,u_long cmd,void * arg)787 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
788 {
789 	return (ioctl(vcpu->ctx->fd, cmd, arg));
790 }
791 #endif
792 
793 int
vm_set_desc(struct vcpu * vcpu,int reg,uint64_t base,uint32_t limit,uint32_t access)794 vm_set_desc(struct vcpu *vcpu, int reg,
795 	    uint64_t base, uint32_t limit, uint32_t access)
796 {
797 	int error;
798 	struct vm_seg_desc vmsegdesc;
799 
800 	bzero(&vmsegdesc, sizeof(vmsegdesc));
801 	vmsegdesc.cpuid = vcpu->vcpuid;
802 	vmsegdesc.regnum = reg;
803 	vmsegdesc.desc.base = base;
804 	vmsegdesc.desc.limit = limit;
805 	vmsegdesc.desc.access = access;
806 
807 	error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
808 	return (error);
809 }
810 
811 int
vm_get_desc(struct vcpu * vcpu,int reg,uint64_t * base,uint32_t * limit,uint32_t * access)812 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
813     uint32_t *access)
814 {
815 	int error;
816 	struct vm_seg_desc vmsegdesc;
817 
818 	bzero(&vmsegdesc, sizeof(vmsegdesc));
819 	vmsegdesc.cpuid = vcpu->vcpuid;
820 	vmsegdesc.regnum = reg;
821 
822 	error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
823 	if (error == 0) {
824 		*base = vmsegdesc.desc.base;
825 		*limit = vmsegdesc.desc.limit;
826 		*access = vmsegdesc.desc.access;
827 	}
828 	return (error);
829 }
830 
831 int
vm_get_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * seg_desc)832 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
833 {
834 	int error;
835 
836 	error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
837 	    &seg_desc->access);
838 	return (error);
839 }
840 
841 int
vm_set_register(struct vcpu * vcpu,int reg,uint64_t val)842 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
843 {
844 	int error;
845 	struct vm_register vmreg;
846 
847 	bzero(&vmreg, sizeof(vmreg));
848 	vmreg.cpuid = vcpu->vcpuid;
849 	vmreg.regnum = reg;
850 	vmreg.regval = val;
851 
852 	error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
853 	return (error);
854 }
855 
856 int
vm_get_register(struct vcpu * vcpu,int reg,uint64_t * ret_val)857 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
858 {
859 	int error;
860 	struct vm_register vmreg;
861 
862 	bzero(&vmreg, sizeof(vmreg));
863 	vmreg.cpuid = vcpu->vcpuid;
864 	vmreg.regnum = reg;
865 
866 	error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
867 	*ret_val = vmreg.regval;
868 	return (error);
869 }
870 
871 int
vm_set_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals)872 vm_set_register_set(struct vcpu *vcpu, unsigned int count,
873     const int *regnums, uint64_t *regvals)
874 {
875 	int error;
876 	struct vm_register_set vmregset;
877 
878 	bzero(&vmregset, sizeof(vmregset));
879 	vmregset.cpuid = vcpu->vcpuid;
880 	vmregset.count = count;
881 	vmregset.regnums = regnums;
882 	vmregset.regvals = regvals;
883 
884 	error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
885 	return (error);
886 }
887 
888 int
vm_get_register_set(struct vcpu * vcpu,unsigned int count,const int * regnums,uint64_t * regvals)889 vm_get_register_set(struct vcpu *vcpu, unsigned int count,
890     const int *regnums, uint64_t *regvals)
891 {
892 	int error;
893 	struct vm_register_set vmregset;
894 
895 	bzero(&vmregset, sizeof(vmregset));
896 	vmregset.cpuid = vcpu->vcpuid;
897 	vmregset.count = count;
898 	vmregset.regnums = regnums;
899 	vmregset.regvals = regvals;
900 
901 	error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
902 	return (error);
903 }
904 
905 #ifdef	__FreeBSD__
906 int
vm_run(struct vcpu * vcpu,struct vm_exit * vmexit)907 vm_run(struct vcpu *vcpu, struct vm_exit *vmexit)
908 {
909 	int error;
910 	struct vm_run vmrun;
911 
912 	bzero(&vmrun, sizeof(vmrun));
913 
914 	error = vcpu_ioctl(vcpu, VM_RUN, &vmrun);
915 	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
916 	return (error);
917 }
918 #else
919 int
vm_run(struct vcpu * vcpu,const struct vm_entry * vm_entry,struct vm_exit * vm_exit)920 vm_run(struct vcpu *vcpu, const struct vm_entry *vm_entry,
921     struct vm_exit *vm_exit)
922 {
923 	struct vm_entry entry;
924 
925 	bcopy(vm_entry, &entry, sizeof (entry));
926 	entry.cpuid = vcpu->vcpuid;
927 	entry.exit_data = vm_exit;
928 
929 	return (vcpu_ioctl(vcpu, VM_RUN, &entry));
930 }
931 #endif
932 
933 int
vm_suspend(struct vmctx * ctx,enum vm_suspend_how how)934 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
935 {
936 	struct vm_suspend vmsuspend;
937 
938 	bzero(&vmsuspend, sizeof(vmsuspend));
939 	vmsuspend.how = how;
940 #ifndef __FreeBSD__
941 	/*
942 	 * The existing userspace does not (currently) inject targeted
943 	 * triple-fault suspend states, so it does not need to specify source.
944 	 */
945 	vmsuspend.source = -1;
946 #endif /* __FreeBSD__ */
947 	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
948 }
949 
950 #ifdef __FreeBSD__
951 int
vm_reinit(struct vmctx * ctx)952 vm_reinit(struct vmctx *ctx)
953 {
954 
955 	return (ioctl(ctx->fd, VM_REINIT, 0));
956 }
957 #else
958 int
vm_reinit(struct vmctx * ctx,uint64_t flags)959 vm_reinit(struct vmctx *ctx, uint64_t flags)
960 {
961 	struct vm_reinit reinit = {
962 		.flags = flags
963 	};
964 
965 	return (ioctl(ctx->fd, VM_REINIT, &reinit));
966 }
967 #endif
968 
969 int
vm_inject_exception(struct vcpu * vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction)970 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
971     uint32_t errcode, int restart_instruction)
972 {
973 	struct vm_exception exc;
974 
975 	exc.cpuid = vcpu->vcpuid;
976 	exc.vector = vector;
977 	exc.error_code = errcode;
978 	exc.error_code_valid = errcode_valid;
979 	exc.restart_instruction = restart_instruction;
980 
981 	return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
982 }
983 
984 #ifndef __FreeBSD__
985 void
vm_inject_fault(struct vcpu * vcpu,int vector,int errcode_valid,int errcode)986 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
987     int errcode)
988 {
989 	int error;
990 	struct vm_exception exc;
991 
992 	exc.cpuid = vcpu->vcpuid;
993 	exc.vector = vector;
994 	exc.error_code = errcode;
995 	exc.error_code_valid = errcode_valid;
996 	exc.restart_instruction = 1;
997 	error = vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc);
998 
999 	assert(error == 0);
1000 }
1001 #endif /* __FreeBSD__ */
1002 
1003 int
vm_apicid2vcpu(struct vmctx * ctx __unused,int apicid)1004 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
1005 {
1006 	/*
1007 	 * The apic id associated with the 'vcpu' has the same numerical value
1008 	 * as the 'vcpu' itself.
1009 	 */
1010 	return (apicid);
1011 }
1012 
1013 int
vm_lapic_irq(struct vcpu * vcpu,int vector)1014 vm_lapic_irq(struct vcpu *vcpu, int vector)
1015 {
1016 	struct vm_lapic_irq vmirq;
1017 
1018 	bzero(&vmirq, sizeof(vmirq));
1019 	vmirq.cpuid = vcpu->vcpuid;
1020 	vmirq.vector = vector;
1021 
1022 	return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
1023 }
1024 
1025 int
vm_lapic_local_irq(struct vcpu * vcpu,int vector)1026 vm_lapic_local_irq(struct vcpu *vcpu, int vector)
1027 {
1028 	struct vm_lapic_irq vmirq;
1029 
1030 	bzero(&vmirq, sizeof(vmirq));
1031 	vmirq.cpuid = vcpu->vcpuid;
1032 	vmirq.vector = vector;
1033 
1034 	return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
1035 }
1036 
1037 int
vm_lapic_msi(struct vmctx * ctx,uint64_t addr,uint64_t msg)1038 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
1039 {
1040 	struct vm_lapic_msi vmmsi;
1041 
1042 	bzero(&vmmsi, sizeof(vmmsi));
1043 	vmmsi.addr = addr;
1044 	vmmsi.msg = msg;
1045 
1046 	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
1047 }
1048 
1049 int
vm_ioapic_assert_irq(struct vmctx * ctx,int irq)1050 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
1051 {
1052 	struct vm_ioapic_irq ioapic_irq;
1053 
1054 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1055 	ioapic_irq.irq = irq;
1056 
1057 	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
1058 }
1059 
1060 int
vm_ioapic_deassert_irq(struct vmctx * ctx,int irq)1061 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
1062 {
1063 	struct vm_ioapic_irq ioapic_irq;
1064 
1065 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1066 	ioapic_irq.irq = irq;
1067 
1068 	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
1069 }
1070 
1071 int
vm_ioapic_pulse_irq(struct vmctx * ctx,int irq)1072 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
1073 {
1074 	struct vm_ioapic_irq ioapic_irq;
1075 
1076 	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
1077 	ioapic_irq.irq = irq;
1078 
1079 	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
1080 }
1081 
1082 int
vm_ioapic_pincount(struct vmctx * ctx,int * pincount)1083 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
1084 {
1085 
1086 	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
1087 }
1088 
1089 int
vm_readwrite_kernemu_device(struct vcpu * vcpu,vm_paddr_t gpa,bool write,int size,uint64_t * value)1090 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
1091     bool write, int size, uint64_t *value)
1092 {
1093 	struct vm_readwrite_kernemu_device irp = {
1094 		.vcpuid = vcpu->vcpuid,
1095 		.access_width = fls(size) - 1,
1096 		.gpa = gpa,
1097 		.value = write ? *value : ~0ul,
1098 	};
1099 	long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
1100 	int rc;
1101 
1102 	rc = vcpu_ioctl(vcpu, cmd, &irp);
1103 	if (rc == 0 && !write)
1104 		*value = irp.value;
1105 	return (rc);
1106 }
1107 
1108 int
vm_isa_assert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)1109 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1110 {
1111 	struct vm_isa_irq isa_irq;
1112 
1113 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1114 	isa_irq.atpic_irq = atpic_irq;
1115 	isa_irq.ioapic_irq = ioapic_irq;
1116 
1117 	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
1118 }
1119 
1120 int
vm_isa_deassert_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)1121 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1122 {
1123 	struct vm_isa_irq isa_irq;
1124 
1125 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1126 	isa_irq.atpic_irq = atpic_irq;
1127 	isa_irq.ioapic_irq = ioapic_irq;
1128 
1129 	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
1130 }
1131 
1132 int
vm_isa_pulse_irq(struct vmctx * ctx,int atpic_irq,int ioapic_irq)1133 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
1134 {
1135 	struct vm_isa_irq isa_irq;
1136 
1137 	bzero(&isa_irq, sizeof(struct vm_isa_irq));
1138 	isa_irq.atpic_irq = atpic_irq;
1139 	isa_irq.ioapic_irq = ioapic_irq;
1140 
1141 	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
1142 }
1143 
1144 int
vm_isa_set_irq_trigger(struct vmctx * ctx,int atpic_irq,enum vm_intr_trigger trigger)1145 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
1146     enum vm_intr_trigger trigger)
1147 {
1148 	struct vm_isa_irq_trigger isa_irq_trigger;
1149 
1150 	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
1151 	isa_irq_trigger.atpic_irq = atpic_irq;
1152 	isa_irq_trigger.trigger = trigger;
1153 
1154 	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
1155 }
1156 
1157 int
vm_inject_nmi(struct vcpu * vcpu)1158 vm_inject_nmi(struct vcpu *vcpu)
1159 {
1160 	struct vm_nmi vmnmi;
1161 
1162 	bzero(&vmnmi, sizeof(vmnmi));
1163 	vmnmi.cpuid = vcpu->vcpuid;
1164 
1165 	return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
1166 }
1167 
1168 static const char *capstrmap[] = {
1169 	[VM_CAP_HALT_EXIT]  = "hlt_exit",
1170 	[VM_CAP_MTRAP_EXIT] = "mtrap_exit",
1171 	[VM_CAP_PAUSE_EXIT] = "pause_exit",
1172 #ifdef __FreeBSD__
1173 	[VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
1174 #endif
1175 	[VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
1176 	[VM_CAP_BPT_EXIT] = "bpt_exit",
1177 };
1178 
1179 int
vm_capability_name2type(const char * capname)1180 vm_capability_name2type(const char *capname)
1181 {
1182 	int i;
1183 
1184 	for (i = 0; i < (int)nitems(capstrmap); i++) {
1185 		if (strcmp(capstrmap[i], capname) == 0)
1186 			return (i);
1187 	}
1188 
1189 	return (-1);
1190 }
1191 
1192 const char *
vm_capability_type2name(int type)1193 vm_capability_type2name(int type)
1194 {
1195 	if (type >= 0 && type < (int)nitems(capstrmap))
1196 		return (capstrmap[type]);
1197 
1198 	return (NULL);
1199 }
1200 
1201 int
vm_get_capability(struct vcpu * vcpu,enum vm_cap_type cap,int * retval)1202 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap,
1203 		  int *retval)
1204 {
1205 	int error;
1206 	struct vm_capability vmcap;
1207 
1208 	bzero(&vmcap, sizeof(vmcap));
1209 	vmcap.cpuid = vcpu->vcpuid;
1210 	vmcap.captype = cap;
1211 
1212 	error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
1213 	*retval = vmcap.capval;
1214 	return (error);
1215 }
1216 
1217 int
vm_set_capability(struct vcpu * vcpu,enum vm_cap_type cap,int val)1218 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
1219 {
1220 	struct vm_capability vmcap;
1221 
1222 	bzero(&vmcap, sizeof(vmcap));
1223 	vmcap.cpuid = vcpu->vcpuid;
1224 	vmcap.captype = cap;
1225 	vmcap.capval = val;
1226 
1227 	return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
1228 }
1229 
1230 #ifdef __FreeBSD__
1231 int
vm_assign_pptdev(struct vmctx * ctx,int bus,int slot,int func)1232 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1233 {
1234 	struct vm_pptdev pptdev;
1235 
1236 	bzero(&pptdev, sizeof(pptdev));
1237 	pptdev.bus = bus;
1238 	pptdev.slot = slot;
1239 	pptdev.func = func;
1240 
1241 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1242 }
1243 
1244 int
vm_unassign_pptdev(struct vmctx * ctx,int bus,int slot,int func)1245 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1246 {
1247 	struct vm_pptdev pptdev;
1248 
1249 	bzero(&pptdev, sizeof(pptdev));
1250 	pptdev.bus = bus;
1251 	pptdev.slot = slot;
1252 	pptdev.func = func;
1253 
1254 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1255 }
1256 
1257 int
vm_map_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)1258 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1259 		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1260 {
1261 	struct vm_pptdev_mmio pptmmio;
1262 
1263 	bzero(&pptmmio, sizeof(pptmmio));
1264 	pptmmio.bus = bus;
1265 	pptmmio.slot = slot;
1266 	pptmmio.func = func;
1267 	pptmmio.gpa = gpa;
1268 	pptmmio.len = len;
1269 	pptmmio.hpa = hpa;
1270 
1271 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1272 }
1273 
1274 int
vm_unmap_pptdev_mmio(struct vmctx * ctx,int bus,int slot,int func,vm_paddr_t gpa,size_t len)1275 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1276 		     vm_paddr_t gpa, size_t len)
1277 {
1278 	struct vm_pptdev_mmio pptmmio;
1279 
1280 	bzero(&pptmmio, sizeof(pptmmio));
1281 	pptmmio.bus = bus;
1282 	pptmmio.slot = slot;
1283 	pptmmio.func = func;
1284 	pptmmio.gpa = gpa;
1285 	pptmmio.len = len;
1286 
1287 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1288 }
1289 
1290 int
vm_setup_pptdev_msi(struct vmctx * ctx,int bus,int slot,int func,uint64_t addr,uint64_t msg,int numvec)1291 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
1292     uint64_t addr, uint64_t msg, int numvec)
1293 {
1294 	struct vm_pptdev_msi pptmsi;
1295 
1296 	bzero(&pptmsi, sizeof(pptmsi));
1297 	pptmsi.bus = bus;
1298 	pptmsi.slot = slot;
1299 	pptmsi.func = func;
1300 	pptmsi.msg = msg;
1301 	pptmsi.addr = addr;
1302 	pptmsi.numvec = numvec;
1303 
1304 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1305 }
1306 
1307 int
vm_setup_pptdev_msix(struct vmctx * ctx,int bus,int slot,int func,int idx,uint64_t addr,uint64_t msg,uint32_t vector_control)1308 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
1309     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1310 {
1311 	struct vm_pptdev_msix pptmsix;
1312 
1313 	bzero(&pptmsix, sizeof(pptmsix));
1314 	pptmsix.bus = bus;
1315 	pptmsix.slot = slot;
1316 	pptmsix.func = func;
1317 	pptmsix.idx = idx;
1318 	pptmsix.msg = msg;
1319 	pptmsix.addr = addr;
1320 	pptmsix.vector_control = vector_control;
1321 
1322 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1323 }
1324 
1325 int
vm_disable_pptdev_msix(struct vmctx * ctx,int bus,int slot,int func)1326 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1327 {
1328 	struct vm_pptdev ppt;
1329 
1330 	bzero(&ppt, sizeof(ppt));
1331 	ppt.bus = bus;
1332 	ppt.slot = slot;
1333 	ppt.func = func;
1334 
1335 	return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1336 }
1337 
1338 #else /* __FreeBSD__ */
1339 
1340 int
vm_assign_pptdev(struct vmctx * ctx,int pptfd)1341 vm_assign_pptdev(struct vmctx *ctx, int pptfd)
1342 {
1343 	struct vm_pptdev pptdev;
1344 
1345 	pptdev.pptfd = pptfd;
1346 	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1347 }
1348 
1349 int
vm_unassign_pptdev(struct vmctx * ctx,int pptfd)1350 vm_unassign_pptdev(struct vmctx *ctx, int pptfd)
1351 {
1352 	struct vm_pptdev pptdev;
1353 
1354 	pptdev.pptfd = pptfd;
1355 	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1356 }
1357 
1358 int
vm_map_pptdev_mmio(struct vmctx * ctx,int pptfd,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)1359 vm_map_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len,
1360     vm_paddr_t hpa)
1361 {
1362 	struct vm_pptdev_mmio pptmmio;
1363 
1364 	pptmmio.pptfd = pptfd;
1365 	pptmmio.gpa = gpa;
1366 	pptmmio.len = len;
1367 	pptmmio.hpa = hpa;
1368 	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1369 }
1370 
1371 int
vm_unmap_pptdev_mmio(struct vmctx * ctx,int pptfd,vm_paddr_t gpa,size_t len)1372 vm_unmap_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len)
1373 {
1374 	struct vm_pptdev_mmio pptmmio;
1375 
1376 	bzero(&pptmmio, sizeof(pptmmio));
1377 	pptmmio.pptfd = pptfd;
1378 	pptmmio.gpa = gpa;
1379 	pptmmio.len = len;
1380 
1381 	return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1382 }
1383 
1384 int
vm_setup_pptdev_msi(struct vmctx * ctx,int pptfd,uint64_t addr,uint64_t msg,int numvec)1385 vm_setup_pptdev_msi(struct vmctx *ctx, int pptfd, uint64_t addr,
1386     uint64_t msg, int numvec)
1387 {
1388 	struct vm_pptdev_msi pptmsi;
1389 
1390 	pptmsi.pptfd = pptfd;
1391 	pptmsi.msg = msg;
1392 	pptmsi.addr = addr;
1393 	pptmsi.numvec = numvec;
1394 	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1395 }
1396 
1397 int
vm_setup_pptdev_msix(struct vmctx * ctx,int pptfd,int idx,uint64_t addr,uint64_t msg,uint32_t vector_control)1398 vm_setup_pptdev_msix(struct vmctx *ctx, int pptfd, int idx,
1399     uint64_t addr, uint64_t msg, uint32_t vector_control)
1400 {
1401 	struct vm_pptdev_msix pptmsix;
1402 
1403 	pptmsix.pptfd = pptfd;
1404 	pptmsix.idx = idx;
1405 	pptmsix.msg = msg;
1406 	pptmsix.addr = addr;
1407 	pptmsix.vector_control = vector_control;
1408 	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1409 }
1410 
1411 int
vm_get_pptdev_limits(struct vmctx * ctx,int pptfd,int * msi_limit,int * msix_limit)1412 vm_get_pptdev_limits(struct vmctx *ctx, int pptfd, int *msi_limit,
1413     int *msix_limit)
1414 {
1415 	struct vm_pptdev_limits pptlimits;
1416 	int error;
1417 
1418 	bzero(&pptlimits, sizeof (pptlimits));
1419 	pptlimits.pptfd = pptfd;
1420 	error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1421 
1422 	*msi_limit = pptlimits.msi_limit;
1423 	*msix_limit = pptlimits.msix_limit;
1424 	return (error);
1425 }
1426 
1427 int
vm_disable_pptdev_msix(struct vmctx * ctx,int pptfd)1428 vm_disable_pptdev_msix(struct vmctx *ctx, int pptfd)
1429 {
1430 	struct vm_pptdev pptdev;
1431 
1432 	pptdev.pptfd = pptfd;
1433 	return (ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &pptdev));
1434 }
1435 #endif /* __FreeBSD__ */
1436 
1437 uint64_t *
vm_get_stats(struct vcpu * vcpu,struct timeval * ret_tv,int * ret_entries)1438 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
1439 	     int *ret_entries)
1440 {
1441 	static _Thread_local uint64_t *stats_buf;
1442 	static _Thread_local uint32_t stats_count;
1443 	uint64_t *new_stats;
1444 	struct vm_stats vmstats;
1445 	uint32_t count, index;
1446 	bool have_stats;
1447 
1448 	have_stats = false;
1449 	vmstats.cpuid = vcpu->vcpuid;
1450 	count = 0;
1451 	for (index = 0;; index += nitems(vmstats.statbuf)) {
1452 		vmstats.index = index;
1453 		if (vcpu_ioctl(vcpu, VM_STATS_IOC, &vmstats) != 0)
1454 			break;
1455 		if (stats_count < index + vmstats.num_entries) {
1456 			new_stats = reallocarray(stats_buf,
1457 			    index + vmstats.num_entries, sizeof(uint64_t));
1458 			if (new_stats == NULL) {
1459 				errno = ENOMEM;
1460 				return (NULL);
1461 			}
1462 			stats_count = index + vmstats.num_entries;
1463 			stats_buf = new_stats;
1464 		}
1465 		memcpy(stats_buf + index, vmstats.statbuf,
1466 		    vmstats.num_entries * sizeof(uint64_t));
1467 		count += vmstats.num_entries;
1468 		have_stats = true;
1469 
1470 		if (vmstats.num_entries != nitems(vmstats.statbuf))
1471 			break;
1472 	}
1473 	if (have_stats) {
1474 		if (ret_entries)
1475 			*ret_entries = count;
1476 		if (ret_tv)
1477 			*ret_tv = vmstats.tv;
1478 		return (stats_buf);
1479 	} else {
1480 		return (NULL);
1481 	}
1482 }
1483 
1484 const char *
vm_get_stat_desc(struct vmctx * ctx,int index)1485 vm_get_stat_desc(struct vmctx *ctx, int index)
1486 {
1487 	static struct vm_stat_desc statdesc;
1488 
1489 	statdesc.index = index;
1490 	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1491 		return (statdesc.desc);
1492 	else
1493 		return (NULL);
1494 }
1495 
1496 int
vm_get_x2apic_state(struct vcpu * vcpu,enum x2apic_state * state)1497 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
1498 {
1499 	int error;
1500 	struct vm_x2apic x2apic;
1501 
1502 	bzero(&x2apic, sizeof(x2apic));
1503 	x2apic.cpuid = vcpu->vcpuid;
1504 
1505 	error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
1506 	*state = x2apic.state;
1507 	return (error);
1508 }
1509 
1510 int
vm_set_x2apic_state(struct vcpu * vcpu,enum x2apic_state state)1511 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
1512 {
1513 	int error;
1514 	struct vm_x2apic x2apic;
1515 
1516 	bzero(&x2apic, sizeof(x2apic));
1517 	x2apic.cpuid = vcpu->vcpuid;
1518 	x2apic.state = state;
1519 
1520 	error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
1521 
1522 	return (error);
1523 }
1524 
1525 #ifndef __FreeBSD__
1526 int
vcpu_reset(struct vcpu * vcpu)1527 vcpu_reset(struct vcpu *vcpu)
1528 {
1529 	struct vm_vcpu_reset vvr;
1530 
1531 	vvr.vcpuid = vcpu->vcpuid;
1532 	vvr.kind = VRK_RESET;
1533 
1534 	return (vcpu_ioctl(vcpu, VM_RESET_CPU, &vvr));
1535 }
1536 #else /* __FreeBSD__ */
1537 /*
1538  * From Intel Vol 3a:
1539  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1540  */
1541 int
vcpu_reset(struct vcpu * vcpu)1542 vcpu_reset(struct vcpu *vcpu)
1543 {
1544 	int error;
1545 	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1546 	uint32_t desc_access, desc_limit;
1547 	uint16_t sel;
1548 
1549 	zero = 0;
1550 
1551 	rflags = 0x2;
1552 	error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
1553 	if (error)
1554 		goto done;
1555 
1556 	rip = 0xfff0;
1557 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1558 		goto done;
1559 
1560 	/*
1561 	 * According to Intels Software Developer Manual CR0 should be
1562 	 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some
1563 	 * guests like Windows.
1564 	 */
1565 	cr0 = CR0_NE;
1566 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1567 		goto done;
1568 
1569 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
1570 		goto done;
1571 
1572 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1573 		goto done;
1574 
1575 	cr4 = 0;
1576 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1577 		goto done;
1578 
1579 	/*
1580 	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1581 	 */
1582 	desc_base = 0xffff0000;
1583 	desc_limit = 0xffff;
1584 	desc_access = 0x0093;
1585 	error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
1586 			    desc_base, desc_limit, desc_access);
1587 	if (error)
1588 		goto done;
1589 
1590 	sel = 0xf000;
1591 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
1592 		goto done;
1593 
1594 	/*
1595 	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1596 	 */
1597 	desc_base = 0;
1598 	desc_limit = 0xffff;
1599 	desc_access = 0x0093;
1600 	error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
1601 			    desc_base, desc_limit, desc_access);
1602 	if (error)
1603 		goto done;
1604 
1605 	error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
1606 			    desc_base, desc_limit, desc_access);
1607 	if (error)
1608 		goto done;
1609 
1610 	error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
1611 			    desc_base, desc_limit, desc_access);
1612 	if (error)
1613 		goto done;
1614 
1615 	error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
1616 			    desc_base, desc_limit, desc_access);
1617 	if (error)
1618 		goto done;
1619 
1620 	error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
1621 			    desc_base, desc_limit, desc_access);
1622 	if (error)
1623 		goto done;
1624 
1625 	sel = 0;
1626 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
1627 		goto done;
1628 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
1629 		goto done;
1630 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
1631 		goto done;
1632 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
1633 		goto done;
1634 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
1635 		goto done;
1636 
1637 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
1638 		goto done;
1639 
1640 	/* General purpose registers */
1641 	rdx = 0xf00;
1642 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1643 		goto done;
1644 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1645 		goto done;
1646 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1647 		goto done;
1648 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1649 		goto done;
1650 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1651 		goto done;
1652 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1653 		goto done;
1654 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1655 		goto done;
1656 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1657 		goto done;
1658 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
1659 		goto done;
1660 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
1661 		goto done;
1662 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
1663 		goto done;
1664 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
1665 		goto done;
1666 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
1667 		goto done;
1668 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
1669 		goto done;
1670 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
1671 		goto done;
1672 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
1673 		goto done;
1674 
1675 	/* GDTR, IDTR */
1676 	desc_base = 0;
1677 	desc_limit = 0xffff;
1678 	desc_access = 0;
1679 	error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
1680 			    desc_base, desc_limit, desc_access);
1681 	if (error != 0)
1682 		goto done;
1683 
1684 	error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
1685 			    desc_base, desc_limit, desc_access);
1686 	if (error != 0)
1687 		goto done;
1688 
1689 	/* TR */
1690 	desc_base = 0;
1691 	desc_limit = 0xffff;
1692 	desc_access = 0x0000008b;
1693 	error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1694 	if (error)
1695 		goto done;
1696 
1697 	sel = 0;
1698 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
1699 		goto done;
1700 
1701 	/* LDTR */
1702 	desc_base = 0;
1703 	desc_limit = 0xffff;
1704 	desc_access = 0x00000082;
1705 	error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
1706 			    desc_limit, desc_access);
1707 	if (error)
1708 		goto done;
1709 
1710 	sel = 0;
1711 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1712 		goto done;
1713 
1714 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
1715 		 0xffff0ff0)) != 0)
1716 		goto done;
1717 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
1718 	    0)
1719 		goto done;
1720 
1721 	if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
1722 		 zero)) != 0)
1723 		goto done;
1724 
1725 	error = 0;
1726 done:
1727 	return (error);
1728 }
1729 #endif /* __FreeBSD__ */
1730 
1731 int
vm_get_gpa_pmap(struct vmctx * ctx,uint64_t gpa,uint64_t * pte,int * num)1732 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1733 {
1734 	int error, i;
1735 	struct vm_gpa_pte gpapte;
1736 
1737 	bzero(&gpapte, sizeof(gpapte));
1738 	gpapte.gpa = gpa;
1739 
1740 	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1741 
1742 	if (error == 0) {
1743 		*num = gpapte.ptenum;
1744 		for (i = 0; i < gpapte.ptenum; i++)
1745 			pte[i] = gpapte.pte[i];
1746 	}
1747 
1748 	return (error);
1749 }
1750 
1751 int
vm_get_hpet_capabilities(struct vmctx * ctx,uint32_t * capabilities)1752 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1753 {
1754 	int error;
1755 	struct vm_hpet_cap cap;
1756 
1757 	bzero(&cap, sizeof(struct vm_hpet_cap));
1758 	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1759 	if (capabilities != NULL)
1760 		*capabilities = cap.capabilities;
1761 	return (error);
1762 }
1763 
1764 int
vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1765 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
1766     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1767 {
1768 	struct vm_gla2gpa gg;
1769 	int error;
1770 
1771 	bzero(&gg, sizeof(struct vm_gla2gpa));
1772 	gg.vcpuid = vcpu->vcpuid;
1773 	gg.prot = prot;
1774 	gg.gla = gla;
1775 	gg.paging = *paging;
1776 
1777 	error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
1778 	if (error == 0) {
1779 		*fault = gg.fault;
1780 		*gpa = gg.gpa;
1781 	}
1782 	return (error);
1783 }
1784 
1785 int
vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * fault)1786 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
1787     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1788 {
1789 	struct vm_gla2gpa gg;
1790 	int error;
1791 
1792 	bzero(&gg, sizeof(struct vm_gla2gpa));
1793 	gg.vcpuid = vcpu->vcpuid;
1794 	gg.prot = prot;
1795 	gg.gla = gla;
1796 	gg.paging = *paging;
1797 
1798 	error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
1799 	if (error == 0) {
1800 		*fault = gg.fault;
1801 		*gpa = gg.gpa;
1802 	}
1803 	return (error);
1804 }
1805 
1806 #ifndef min
1807 #define	min(a,b)	(((a) < (b)) ? (a) : (b))
1808 #endif
1809 
1810 int
vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct iovec * iov,int iovcnt,int * fault)1811 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1812     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1813     int *fault)
1814 {
1815 	void *va;
1816 	uint64_t gpa, off;
1817 	int error, i, n;
1818 
1819 	for (i = 0; i < iovcnt; i++) {
1820 		iov[i].iov_base = 0;
1821 		iov[i].iov_len = 0;
1822 	}
1823 
1824 	while (len) {
1825 		assert(iovcnt > 0);
1826 		error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1827 		if (error || *fault)
1828 			return (error);
1829 
1830 		off = gpa & PAGE_MASK;
1831 		n = MIN(len, PAGE_SIZE - off);
1832 
1833 		va = vm_map_gpa(vcpu->ctx, gpa, n);
1834 		if (va == NULL)
1835 			return (EFAULT);
1836 
1837 		iov->iov_base = va;
1838 		iov->iov_len = n;
1839 		iov++;
1840 		iovcnt--;
1841 
1842 		gla += n;
1843 		len -= n;
1844 	}
1845 	return (0);
1846 }
1847 
1848 void
vm_copy_teardown(struct iovec * iov __unused,int iovcnt __unused)1849 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
1850 {
1851 	/*
1852 	 * Intentionally empty.  This is used by the instruction
1853 	 * emulation code shared with the kernel.  The in-kernel
1854 	 * version of this is non-empty.
1855 	 */
1856 }
1857 
1858 void
vm_copyin(struct iovec * iov,void * vp,size_t len)1859 vm_copyin(struct iovec *iov, void *vp, size_t len)
1860 {
1861 	const char *src;
1862 	char *dst;
1863 	size_t n;
1864 
1865 	dst = vp;
1866 	while (len) {
1867 		assert(iov->iov_len);
1868 		n = min(len, iov->iov_len);
1869 		src = iov->iov_base;
1870 		bcopy(src, dst, n);
1871 
1872 		iov++;
1873 		dst += n;
1874 		len -= n;
1875 	}
1876 }
1877 
1878 void
vm_copyout(const void * vp,struct iovec * iov,size_t len)1879 vm_copyout(const void *vp, struct iovec *iov, size_t len)
1880 {
1881 	const char *src;
1882 	char *dst;
1883 	size_t n;
1884 
1885 	src = vp;
1886 	while (len) {
1887 		assert(iov->iov_len);
1888 		n = min(len, iov->iov_len);
1889 		dst = iov->iov_base;
1890 		bcopy(src, dst, n);
1891 
1892 		iov++;
1893 		src += n;
1894 		len -= n;
1895 	}
1896 }
1897 
1898 static int
vm_get_cpus(struct vmctx * ctx,int which,cpuset_t * cpus)1899 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1900 {
1901 	struct vm_cpuset vm_cpuset;
1902 	int error;
1903 
1904 	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1905 	vm_cpuset.which = which;
1906 	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1907 	vm_cpuset.cpus = cpus;
1908 
1909 	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1910 	return (error);
1911 }
1912 
1913 int
vm_active_cpus(struct vmctx * ctx,cpuset_t * cpus)1914 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1915 {
1916 
1917 	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1918 }
1919 
1920 #ifdef __FreeBSD__
1921 int
vm_suspended_cpus(struct vmctx * ctx,cpuset_t * cpus)1922 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1923 {
1924 
1925 	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1926 }
1927 #endif /* __FreeBSD__ */
1928 
1929 int
vm_debug_cpus(struct vmctx * ctx,cpuset_t * cpus)1930 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1931 {
1932 
1933 	return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1934 }
1935 
1936 int
vm_activate_cpu(struct vcpu * vcpu)1937 vm_activate_cpu(struct vcpu *vcpu)
1938 {
1939 	struct vm_activate_cpu ac;
1940 	int error;
1941 
1942 	bzero(&ac, sizeof(struct vm_activate_cpu));
1943 	ac.vcpuid = vcpu->vcpuid;
1944 	error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
1945 	return (error);
1946 }
1947 
1948 int
vm_suspend_all_cpus(struct vmctx * ctx)1949 vm_suspend_all_cpus(struct vmctx *ctx)
1950 {
1951 	struct vm_activate_cpu ac;
1952 	int error;
1953 
1954 	bzero(&ac, sizeof(struct vm_activate_cpu));
1955 	ac.vcpuid = -1;
1956 	error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1957 	return (error);
1958 }
1959 
1960 int
vm_suspend_cpu(struct vcpu * vcpu)1961 vm_suspend_cpu(struct vcpu *vcpu)
1962 {
1963 	struct vm_activate_cpu ac;
1964 	int error;
1965 
1966 	bzero(&ac, sizeof(struct vm_activate_cpu));
1967 	ac.vcpuid = vcpu->vcpuid;
1968 	error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
1969 	return (error);
1970 }
1971 
1972 int
vm_resume_cpu(struct vcpu * vcpu)1973 vm_resume_cpu(struct vcpu *vcpu)
1974 {
1975 	struct vm_activate_cpu ac;
1976 	int error;
1977 
1978 	bzero(&ac, sizeof(struct vm_activate_cpu));
1979 	ac.vcpuid = vcpu->vcpuid;
1980 	error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
1981 	return (error);
1982 }
1983 
1984 int
vm_resume_all_cpus(struct vmctx * ctx)1985 vm_resume_all_cpus(struct vmctx *ctx)
1986 {
1987 	struct vm_activate_cpu ac;
1988 	int error;
1989 
1990 	bzero(&ac, sizeof(struct vm_activate_cpu));
1991 	ac.vcpuid = -1;
1992 	error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1993 	return (error);
1994 }
1995 
1996 int
vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2)1997 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1998 {
1999 	struct vm_intinfo vmii;
2000 	int error;
2001 
2002 	bzero(&vmii, sizeof(struct vm_intinfo));
2003 	vmii.vcpuid = vcpu->vcpuid;
2004 	error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
2005 	if (error == 0) {
2006 		*info1 = vmii.info1;
2007 		*info2 = vmii.info2;
2008 	}
2009 	return (error);
2010 }
2011 
2012 int
vm_set_intinfo(struct vcpu * vcpu,uint64_t info1)2013 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
2014 {
2015 	struct vm_intinfo vmii;
2016 	int error;
2017 
2018 	bzero(&vmii, sizeof(struct vm_intinfo));
2019 	vmii.vcpuid = vcpu->vcpuid;
2020 	vmii.info1 = info1;
2021 	error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
2022 	return (error);
2023 }
2024 
2025 int
vm_rtc_write(struct vmctx * ctx,int offset,uint8_t value)2026 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
2027 {
2028 	struct vm_rtc_data rtcdata;
2029 	int error;
2030 
2031 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
2032 	rtcdata.offset = offset;
2033 	rtcdata.value = value;
2034 	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
2035 	return (error);
2036 }
2037 
2038 int
vm_rtc_read(struct vmctx * ctx,int offset,uint8_t * retval)2039 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
2040 {
2041 	struct vm_rtc_data rtcdata;
2042 	int error;
2043 
2044 	bzero(&rtcdata, sizeof(struct vm_rtc_data));
2045 	rtcdata.offset = offset;
2046 	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
2047 	if (error == 0)
2048 		*retval = rtcdata.value;
2049 	return (error);
2050 }
2051 
2052 #ifdef __FreeBSD__
2053 int
vm_rtc_settime(struct vmctx * ctx,time_t secs)2054 vm_rtc_settime(struct vmctx *ctx, time_t secs)
2055 {
2056 	struct vm_rtc_time rtctime;
2057 	int error;
2058 
2059 	bzero(&rtctime, sizeof(struct vm_rtc_time));
2060 	rtctime.secs = secs;
2061 	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
2062 	return (error);
2063 }
2064 
2065 int
vm_rtc_gettime(struct vmctx * ctx,time_t * secs)2066 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
2067 {
2068 	struct vm_rtc_time rtctime;
2069 	int error;
2070 
2071 	bzero(&rtctime, sizeof(struct vm_rtc_time));
2072 	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
2073 	if (error == 0)
2074 		*secs = rtctime.secs;
2075 	return (error);
2076 }
2077 #else /* __FreeBSD__ */
2078 
2079 int
vm_rtc_settime(struct vmctx * ctx,const timespec_t * ts)2080 vm_rtc_settime(struct vmctx *ctx, const timespec_t *ts)
2081 {
2082 	return (ioctl(ctx->fd, VM_RTC_SETTIME, ts));
2083 }
2084 
2085 int
vm_rtc_gettime(struct vmctx * ctx,timespec_t * ts)2086 vm_rtc_gettime(struct vmctx *ctx, timespec_t *ts)
2087 {
2088 	return (ioctl(ctx->fd, VM_RTC_GETTIME, ts));
2089 }
2090 
2091 #endif /* __FreeBSD__ */
2092 
2093 int
vm_restart_instruction(void * ctxp,int vcpu __unused)2094 vm_restart_instruction(void *ctxp, int vcpu __unused)
2095 {
2096 	struct vmctx *ctx = ctxp;
2097 	int arg;
2098 
2099 	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &arg));
2100 }
2101 
2102 int
vm_set_topology(struct vmctx * ctx,uint16_t sockets,uint16_t cores,uint16_t threads,uint16_t maxcpus)2103 vm_set_topology(struct vmctx *ctx,
2104     uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
2105 {
2106 	struct vm_cpu_topology topology;
2107 
2108 	bzero(&topology, sizeof (struct vm_cpu_topology));
2109 	topology.sockets = sockets;
2110 	topology.cores = cores;
2111 	topology.threads = threads;
2112 	topology.maxcpus = maxcpus;
2113 	return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
2114 }
2115 
2116 int
vm_get_topology(struct vmctx * ctx,uint16_t * sockets,uint16_t * cores,uint16_t * threads,uint16_t * maxcpus)2117 vm_get_topology(struct vmctx *ctx,
2118     uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
2119 {
2120 	struct vm_cpu_topology topology;
2121 	int error;
2122 
2123 	bzero(&topology, sizeof (struct vm_cpu_topology));
2124 	error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
2125 	if (error == 0) {
2126 		*sockets = topology.sockets;
2127 		*cores = topology.cores;
2128 		*threads = topology.threads;
2129 		*maxcpus = topology.maxcpus;
2130 	}
2131 	return (error);
2132 }
2133 
2134 #ifdef	__FreeBSD__
2135 /* Keep in sync with machine/vmm_dev.h. */
2136 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
2137     VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
2138     VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
2139     VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
2140     VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
2141     VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
2142     VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
2143     VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
2144     VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
2145     VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
2146     VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
2147     VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
2148     VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
2149     VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
2150     VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
2151     VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
2152     VM_GLA2GPA_NOFAULT,
2153     VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
2154     VM_SET_INTINFO, VM_GET_INTINFO,
2155     VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
2156     VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY,
2157     VM_SNAPSHOT_REQ, VM_RESTORE_TIME
2158 };
2159 
2160 int
vm_limit_rights(struct vmctx * ctx)2161 vm_limit_rights(struct vmctx *ctx)
2162 {
2163 	cap_rights_t rights;
2164 	size_t ncmds;
2165 
2166 	cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
2167 	if (caph_rights_limit(ctx->fd, &rights) != 0)
2168 		return (-1);
2169 	ncmds = nitems(vm_ioctl_cmds);
2170 	if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0)
2171 		return (-1);
2172 	return (0);
2173 }
2174 #endif
2175 
2176 /*
2177  * Avoid using in new code.  Operations on the fd should be wrapped here so that
2178  * capability rights can be kept in sync.
2179  */
2180 int
vm_get_device_fd(struct vmctx * ctx)2181 vm_get_device_fd(struct vmctx *ctx)
2182 {
2183 
2184 	return (ctx->fd);
2185 }
2186 
2187 #ifndef __FreeBSD__
2188 int
vm_pmtmr_set_location(struct vmctx * ctx,uint16_t ioport)2189 vm_pmtmr_set_location(struct vmctx *ctx, uint16_t ioport)
2190 {
2191 	return (ioctl(ctx->fd, VM_PMTMR_LOCATE, ioport));
2192 }
2193 
2194 int
vm_wrlock_cycle(struct vmctx * ctx)2195 vm_wrlock_cycle(struct vmctx *ctx)
2196 {
2197 	if (ioctl(ctx->fd, VM_WRLOCK_CYCLE, 0) != 0) {
2198 		return (errno);
2199 	}
2200 	return (0);
2201 }
2202 
2203 int
vm_get_run_state(struct vcpu * vcpu,enum vcpu_run_state * state,uint8_t * sipi_vector)2204 vm_get_run_state(struct vcpu *vcpu, enum vcpu_run_state *state,
2205     uint8_t *sipi_vector)
2206 {
2207 	struct vm_run_state data;
2208 
2209 	data.vcpuid = vcpu->vcpuid;
2210 	if (vcpu_ioctl(vcpu, VM_GET_RUN_STATE, &data) != 0) {
2211 		return (errno);
2212 	}
2213 
2214 	*state = data.state;
2215 	*sipi_vector = data.sipi_vector;
2216 	return (0);
2217 }
2218 
2219 int
vm_set_run_state(struct vcpu * vcpu,enum vcpu_run_state state,uint8_t sipi_vector)2220 vm_set_run_state(struct vcpu *vcpu, enum vcpu_run_state state,
2221     uint8_t sipi_vector)
2222 {
2223 	struct vm_run_state data;
2224 
2225 	data.vcpuid = vcpu->vcpuid;
2226 	data.state = state;
2227 	data.sipi_vector = sipi_vector;
2228 	if (vcpu_ioctl(vcpu, VM_SET_RUN_STATE, &data) != 0) {
2229 		return (errno);
2230 	}
2231 
2232 	return (0);
2233 }
2234 
2235 int
vm_vcpu_barrier(struct vcpu * vcpu)2236 vm_vcpu_barrier(struct vcpu *vcpu)
2237 {
2238 	if (ioctl(vcpu->ctx->fd, VM_VCPU_BARRIER, vcpu->vcpuid) != 0) {
2239 		return (errno);
2240 	}
2241 
2242 	return (0);
2243 }
2244 #endif /* __FreeBSD__ */
2245 
2246 #ifdef __FreeBSD__
2247 const cap_ioctl_t *
vm_get_ioctls(size_t * len)2248 vm_get_ioctls(size_t *len)
2249 {
2250 	cap_ioctl_t *cmds;
2251 
2252 	if (len == NULL) {
2253 		cmds = malloc(sizeof(vm_ioctl_cmds));
2254 		if (cmds == NULL)
2255 			return (NULL);
2256 		bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
2257 		return (cmds);
2258 	}
2259 
2260 	*len = nitems(vm_ioctl_cmds);
2261 	return (NULL);
2262 }
2263 #endif /* __FreeBSD__ */
2264