1 /*
2  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/kern/subr_sleepqueue.c 261520 2014-02-05 18:13:27Z jhb $
27  */
28 /*-
29  * Copyright (c) 2004 Poul-Henning Kamp
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  * $FreeBSD: head/sys/kern/subr_unit.c 255057 2013-08-30 07:37:45Z kib $
54  */
55 /*
56  * This file and its contents are supplied under the terms of the
57  * Common Development and Distribution License ("CDDL"), version 1.0.
58  * You may only use this file in accordance with the terms of version
59  * 1.0 of the CDDL.
60  *
61  * A full copy of the text of the CDDL should have accompanied this
62  * source.  A copy of the CDDL is also available via the Internet at
63  * http://www.illumos.org/license/CDDL.
64  *
65  * Copyright 2014 Pluribus Networks Inc.
66  */
67 
68 #include <sys/types.h>
69 #include <sys/archsystm.h>
70 #include <sys/cpuset.h>
71 #include <sys/fp.h>
72 #include <sys/malloc.h>
73 #include <sys/queue.h>
74 #include <sys/spl.h>
75 #include <sys/systm.h>
76 
77 #include <machine/cpufunc.h>
78 #include <machine/fpu.h>
79 #include <machine/md_var.h>
80 #include <machine/specialreg.h>
81 #include <machine/vmm.h>
82 #include <sys/vmm_impl.h>
83 
84 #include <vm/as.h>
85 #include <vm/seg_kmem.h>
86 
87 vm_paddr_t
88 pmap_kextract(vm_offset_t va)
89 {
90 	pfn_t	pfn;
91 
92 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)va);
93 	ASSERT(pfn != PFN_INVALID);
94 	return (pfn << PAGE_SHIFT) | ((uintptr_t)va & PAGE_MASK);
95 }
96 
97 int
98 cpusetobj_ffs(const cpuset_t *set)
99 {
100 #if	CPUSET_WORDS > 1
101 	int	i, cbit;
102 
103 	cbit = 0;
104 	for (i = 0; i < CPUSET_WORDS; i++) {
105 		if (set->cpub[i] != 0) {
106 			cbit = ffsl(set->cpub[i]);
107 			cbit += i * sizeof (set->cpub[0]);
108 			break;
109 		}
110 	}
111 	return (cbit);
112 #else
113 	return(ffsl(*set));
114 #endif
115 }
116 
117 void
118 smp_rendezvous(void (* setup_func)(void *),
119 	       void (* action_func)(void *),
120 	       void (* teardown_func)(void *),
121 	       void *arg)
122 {
123 	cpuset_t cpuset;
124 
125 	ASSERT(setup_func == NULL);
126 	ASSERT(teardown_func == NULL);
127 
128 	CPUSET_ALL(cpuset);
129 	xc_sync((xc_arg_t)arg, 0, 0, CPUSET2BV(cpuset), (xc_func_t)action_func);
130 }
131 
132 struct kmem_item {
133 	void			*addr;
134 	size_t			size;
135 	LIST_ENTRY(kmem_item)	next;
136 };
137 static kmutex_t kmem_items_lock;
138 static LIST_HEAD(, kmem_item) kmem_items;
139 
140 void *
141 malloc(unsigned long size, struct malloc_type *mtp, int flags)
142 {
143 	void			*p;
144 	struct kmem_item	*i;
145 	int			kmem_flag = KM_SLEEP;
146 
147 	if (flags & M_NOWAIT)
148 		kmem_flag = KM_NOSLEEP;
149 
150 	if (flags & M_ZERO) {
151 		p = kmem_zalloc(size + sizeof(struct kmem_item), kmem_flag);
152 	} else {
153 		p = kmem_alloc(size + sizeof(struct kmem_item), kmem_flag);
154 	}
155 
156 	mutex_enter(&kmem_items_lock);
157 	i = p + size;
158 	i->addr = p;
159 	i->size = size;
160 
161 	LIST_INSERT_HEAD(&kmem_items, i, next);
162 	mutex_exit(&kmem_items_lock);
163 
164 	return (p);
165 }
166 
167 void
168 free(void *addr, struct malloc_type *mtp)
169 {
170 	struct kmem_item	*i;
171 
172 	mutex_enter(&kmem_items_lock);
173 	LIST_FOREACH(i, &kmem_items, next) {
174 		if (i->addr == addr)
175 			break;
176 	}
177 	ASSERT(i != NULL);
178 	LIST_REMOVE(i, next);
179 	mutex_exit(&kmem_items_lock);
180 
181 	kmem_free(addr, i->size + sizeof(struct kmem_item));
182 }
183 
184 void
185 mtx_init(struct mtx *mtx, char *name, const char *type_name, int opts)
186 {
187 	if (opts & MTX_SPIN) {
188 		mutex_init(&mtx->m, name, MUTEX_SPIN,
189 		    (ddi_iblock_cookie_t)ipltospl(DISP_LEVEL));
190 	} else {
191 		mutex_init(&mtx->m, name, MUTEX_DRIVER, NULL);
192 	}
193 }
194 
195 void
196 mtx_destroy(struct mtx *mtx)
197 {
198 	mutex_destroy(&mtx->m);
199 }
200 
201 void
202 critical_enter(void)
203 {
204 	kpreempt_disable();
205 	thread_affinity_set(curthread, CPU_CURRENT);
206 }
207 
208 void
209 critical_exit(void)
210 {
211 	thread_affinity_clear(curthread);
212 	kpreempt_enable();
213 }
214 
215 struct unr {
216 	u_int		item;
217 	struct unr	*link;
218 };
219 
220 #define	UNR_HASHSIZE	8
221 
222 struct unrhdr {
223 	struct mtx	*mtx;
224 	struct unr	*hash[UNR_HASHSIZE];
225 	u_int		min;
226 	u_int		max;
227 	u_int		next;
228 };
229 
230 #define	HASH_UNR(uh, i)	((uh)->hash[(i) & ((UNR_HASHSIZE) - 1)])
231 
232 static struct mtx unr_mtx;
233 
234 /*
235  * Allocate a new unrheader set.
236  *
237  * Highest and lowest valid values given as parameters.
238  */
239 struct unrhdr *
240 new_unrhdr(int low, int high, struct mtx *mtx)
241 {
242 	struct unrhdr	*uh;
243 
244 	uh = kmem_zalloc(sizeof (struct unrhdr), KM_SLEEP);
245 	if (mtx) {
246 		uh->mtx = mtx;
247 	} else {
248 		uh->mtx = &unr_mtx;
249 	}
250 	uh->min = low;
251 	uh->max = high;
252 	uh->next = uh->min;
253 
254 	return (uh);
255 }
256 
257 void
258 delete_unrhdr(struct unrhdr *uh)
259 {
260 	kmem_free(uh, sizeof (struct unrhdr));
261 }
262 
263 static struct unr *
264 unr_lookup(struct unrhdr *uh, int item)
265 {
266 	struct unr	*unr;
267 
268 	ASSERT(MUTEX_HELD(&uh->mtx->m));
269 
270 	for (unr = HASH_UNR(uh, item); unr != NULL; unr = unr->link) {
271 		if (unr->item == item)
272 			break;
273 	}
274 
275 	return (unr);
276 }
277 
278 int
279 alloc_unr(struct unrhdr *uh)
280 {
281 	struct unr	*unr;
282 	int		item, start;
283 
284 	mutex_enter(&uh->mtx->m);
285 	start = uh->next;
286 	for (;;) {
287 		item = uh->next;
288 		if (++uh->next == uh->max) {
289 			uh->next = uh->min;
290 		}
291 
292 		if (unr_lookup(uh, item) == NULL) {
293 			unr = kmem_zalloc(sizeof (struct unr), KM_SLEEP);
294 			unr->item = item;
295 			unr->link = HASH_UNR(uh, item);
296 			HASH_UNR(uh, item) = unr;
297 			break;
298 		}
299 
300 		if (item == start) {
301 			item = -1;
302 			break;
303 		}
304 	}
305 	mutex_exit(&uh->mtx->m);
306 
307 	return (item);
308 }
309 
310 void
311 free_unr(struct unrhdr *uh, u_int item)
312 {
313 	struct unr	*unr, **unrp;
314 
315 	mutex_enter(&uh->mtx->m);
316 	unrp = &HASH_UNR(uh, item);
317 	for (;;) {
318 		ASSERT(*unrp != NULL);
319 		if ((*unrp)->item == item)
320 			break;
321 		unrp = &(*unrp)->link;
322 	}
323 	unr = *unrp;
324 	*unrp = unr->link;
325 	mutex_exit(&uh->mtx->m);
326 	kmem_free(unr, sizeof(struct unr));
327 }
328 
329 
330 static void
331 vmm_glue_callout_handler(void *arg)
332 {
333 	struct callout *c = arg;
334 
335 	c->c_flags &= ~CALLOUT_PENDING;
336 	if (c->c_flags & CALLOUT_ACTIVE) {
337 		(c->c_func)(c->c_arg);
338 	}
339 }
340 
341 void
342 vmm_glue_callout_init(struct callout *c, int mpsafe)
343 {
344 	cyc_handler_t	hdlr;
345 	cyc_time_t	when;
346 
347 	hdlr.cyh_level = CY_LOW_LEVEL;
348 	hdlr.cyh_func = vmm_glue_callout_handler;
349 	hdlr.cyh_arg = c;
350 	when.cyt_when = CY_INFINITY;
351 	when.cyt_interval = CY_INFINITY;
352 
353 	mutex_enter(&cpu_lock);
354 	c->c_cyc_id = cyclic_add(&hdlr, &when);
355 	c->c_flags |= CALLOUT_ACTIVE;
356 	mutex_exit(&cpu_lock);
357 }
358 
359 int
360 vmm_glue_callout_reset_sbt(struct callout *c, sbintime_t sbt, sbintime_t pr,
361     void (*func)(void *), void *arg, int flags)
362 {
363 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
364 
365 	c->c_func = func;
366 	c->c_arg = arg;
367 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
368 
369 	if (flags & C_ABSOLUTE)
370 		cyclic_reprogram(c->c_cyc_id, sbt);
371 	else
372 		cyclic_reprogram(c->c_cyc_id, sbt + gethrtime());
373 
374 	return (0);
375 }
376 
377 int
378 vmm_glue_callout_stop(struct callout *c)
379 {
380 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
381 	cyclic_reprogram(c->c_cyc_id, CY_INFINITY);
382 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
383 
384 	return (0);
385 }
386 
387 int
388 vmm_glue_callout_drain(struct callout *c)
389 {
390 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
391 	mutex_enter(&cpu_lock);
392 	cyclic_remove(c->c_cyc_id);
393 	c->c_cyc_id = CYCLIC_NONE;
394 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
395 	mutex_exit(&cpu_lock);
396 
397 	return (0);
398 }
399 
400 static int
401 ipi_cpu_justreturn(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
402 {
403 	return (0);
404 }
405 
406 void
407 ipi_cpu(int cpu, u_int ipi)
408 {
409 	cpuset_t	set;
410 
411 	CPUSET_ONLY(set, cpu);
412 	xc_call_nowait(NULL, NULL, NULL, CPUSET2BV(set),
413 		       ipi_cpu_justreturn);
414 }
415 
416 #define	SC_TABLESIZE	256			/* Must be power of 2. */
417 #define	SC_MASK		(SC_TABLESIZE - 1)
418 #define	SC_SHIFT	8
419 #define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
420 			    SC_MASK)
421 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
422 
423 struct sleepqueue {
424 	u_int sq_blockedcnt;			/* Num. of blocked threads. */
425 	LIST_ENTRY(sleepqueue) sq_hash;		/* Chain. */
426 	void		*sq_wchan;		/* Wait channel. */
427 	kcondvar_t	sq_cv;
428 };
429 
430 struct sleepqueue_chain {
431 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
432 	struct mtx	sc_lock;		/* Spin lock for this chain. */
433 };
434 
435 static struct sleepqueue_chain	sleepq_chains[SC_TABLESIZE];
436 
437 #define	SLEEPQ_CACHE_SZ		(64)
438 static kmem_cache_t		*vmm_sleepq_cache;
439 
440 static int
441 vmm_sleepq_cache_init(void *buf, void *user_arg, int kmflags)
442 {
443 	struct sleepqueue *sq = (struct sleepqueue *)buf;
444 
445 	bzero(sq, sizeof (struct sleepqueue));
446 	cv_init(&sq->sq_cv, NULL, CV_DRIVER, NULL);
447 
448 	return (0);
449 }
450 
451 static void
452 vmm_sleepq_cache_fini(void *buf, void *user_arg)
453 {
454 	struct sleepqueue *sq = (struct sleepqueue *)buf;
455 	cv_destroy(&sq->sq_cv);
456 }
457 
458 static void
459 init_sleepqueues(void)
460 {
461 	int	i;
462 
463         for (i = 0; i < SC_TABLESIZE; i++) {
464 		LIST_INIT(&sleepq_chains[i].sc_queues);
465 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
466 			 MTX_SPIN);
467 	}
468 
469 	vmm_sleepq_cache = kmem_cache_create("vmm_sleepq_cache",
470 	    sizeof (struct sleepqueue), SLEEPQ_CACHE_SZ, vmm_sleepq_cache_init,
471 	    vmm_sleepq_cache_fini, NULL, NULL, NULL, 0);
472 
473 }
474 
475 /*
476  * Lock the sleep queue chain associated with the specified wait channel.
477  */
478 static void
479 sleepq_lock(void *wchan)
480 {
481 	struct sleepqueue_chain *sc;
482 
483 	sc = SC_LOOKUP(wchan);
484 	mtx_lock_spin(&sc->sc_lock);
485 }
486 
487 /*
488  * Look up the sleep queue associated with a given wait channel in the hash
489  * table locking the associated sleep queue chain.  If no queue is found in
490  * the table, NULL is returned.
491  */
492 static struct sleepqueue *
493 sleepq_lookup(void *wchan)
494 {
495 	struct sleepqueue_chain	*sc;
496 	struct sleepqueue	*sq;
497 
498 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
499 	sc = SC_LOOKUP(wchan);
500 	mtx_assert(&sc->sc_lock, MA_OWNED);
501 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
502 		if (sq->sq_wchan == wchan)
503 			return (sq);
504 	return (NULL);
505 }
506 
507 /*
508  * Unlock the sleep queue chain associated with a given wait channel.
509  */
510 static void
511 sleepq_release(void *wchan)
512 {
513 	struct sleepqueue_chain *sc;
514 
515 	sc = SC_LOOKUP(wchan);
516 	mtx_unlock_spin(&sc->sc_lock);
517 }
518 
519 struct sleepqueue *
520 sleepq_add(void *wchan)
521 {
522 	struct sleepqueue_chain	*sc;
523 	struct sleepqueue	*sq;
524 
525 	sc = SC_LOOKUP(wchan);
526 
527 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
528 	sq = sleepq_lookup(wchan);
529 
530 	if (sq == NULL) {
531 		sq = kmem_cache_alloc(vmm_sleepq_cache, KM_SLEEP);
532 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
533 		sq->sq_wchan = wchan;
534 	}
535 
536         sq->sq_blockedcnt++;
537 
538 	return (sq);
539 }
540 
541 void
542 sleepq_remove(struct sleepqueue *sq)
543 {
544 	sq->sq_blockedcnt--;
545 
546 	if (sq->sq_blockedcnt == 0) {
547 		LIST_REMOVE(sq, sq_hash);
548 		kmem_cache_free(vmm_sleepq_cache, sq);
549 	}
550 }
551 
552 int
553 msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int ticks)
554 {
555 	struct sleepqueue	*sq;
556 	int			error;
557 
558 	sleepq_lock(chan);
559 	sq = sleepq_add(chan);
560 	sleepq_release(chan);
561 
562 	cv_reltimedwait(&sq->sq_cv, &mtx->m, ticks, TR_CLOCK_TICK);
563 
564 	sleepq_lock(chan);
565 	sleepq_remove(sq);
566 	sleepq_release(chan);
567 
568 	return (error);
569 }
570 
571 void
572 wakeup(void *chan)
573 {
574 	struct sleepqueue	*sq;
575 
576 	sleepq_lock(chan);
577         sq = sleepq_lookup(chan);
578 	if (sq != NULL) {
579 		cv_broadcast(&sq->sq_cv);
580 	}
581 	sleepq_release(chan);
582 }
583 
584 void
585 wakeup_one(void *chan)
586 {
587 	struct sleepqueue	*sq;
588 
589 	sleepq_lock(chan);
590         sq = sleepq_lookup(chan);
591 	if (sq != NULL) {
592 		cv_signal(&sq->sq_cv);
593 	}
594 	sleepq_release(chan);
595 }
596 
597 u_int	cpu_high;		/* Highest arg to CPUID */
598 u_int	cpu_exthigh;		/* Highest arg to extended CPUID */
599 u_int	cpu_id;			/* Stepping ID */
600 char	cpu_vendor[20];		/* CPU Origin code */
601 
602 static void
603 vmm_cpuid_init(void)
604 {
605 	u_int regs[4];
606 
607 	do_cpuid(0, regs);
608 	cpu_high = regs[0];
609 	((u_int *)&cpu_vendor)[0] = regs[1];
610 	((u_int *)&cpu_vendor)[1] = regs[3];
611 	((u_int *)&cpu_vendor)[2] = regs[2];
612 	cpu_vendor[12] = '\0';
613 
614 	do_cpuid(1, regs);
615 	cpu_id = regs[0];
616 
617 	do_cpuid(0x80000000, regs);
618 	cpu_exthigh = regs[0];
619 }
620 
621 struct savefpu {
622 	fpu_ctx_t	fsa_fp_ctx;
623 };
624 
625 static vmem_t *fpu_save_area_arena;
626 
627 static void
628 fpu_save_area_init(void)
629 {
630 	fpu_save_area_arena = vmem_create("fpu_save_area",
631 	    NULL, 0, XSAVE_AREA_ALIGN,
632 	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_BESTFIT | VM_SLEEP);
633 }
634 
635 static void
636 fpu_save_area_cleanup(void)
637 {
638 	vmem_destroy(fpu_save_area_arena);
639 }
640 
641 struct savefpu *
642 fpu_save_area_alloc(void)
643 {
644 	return (vmem_alloc(fpu_save_area_arena, sizeof (struct savefpu),
645 			   VM_SLEEP));
646 }
647 
648 void
649 fpu_save_area_free(struct savefpu *fsa)
650 {
651 	vmem_free(fpu_save_area_arena, fsa, sizeof (struct savefpu));
652 }
653 
654 void
655 fpu_save_area_reset(struct savefpu *fsa)
656 {
657 	extern const struct fxsave_state sse_initial;
658 	extern const struct xsave_state avx_initial;
659 	struct fpu_ctx *fp;
660 	struct fxsave_state *fx;
661 	struct xsave_state *xs;
662 
663 	fp = &fsa->fsa_fp_ctx;
664 
665 	fp->fpu_regs.kfpu_status = 0;
666 	fp->fpu_regs.kfpu_xstatus = 0;
667 
668 	switch (fp_save_mech) {
669 	case FP_FXSAVE:
670 		fx = &fp->fpu_regs.kfpu_u.kfpu_fx;
671 		bcopy(&sse_initial, fx, sizeof (*fx));
672 		break;
673 	case FP_XSAVE:
674 		fp->fpu_xsave_mask = (XFEATURE_ENABLED_X87 |
675 		    XFEATURE_ENABLED_SSE | XFEATURE_ENABLED_AVX);
676 		xs = &fp->fpu_regs.kfpu_u.kfpu_xs;
677 		bcopy(&avx_initial, xs, sizeof (*xs));
678 		break;
679 	default:
680 		panic("Invalid fp_save_mech");
681 		/*NOTREACHED*/
682 	}
683 }
684 
685 void
686 fpuexit(kthread_t *td)
687 {
688 	fp_save(&curthread->t_lwp->lwp_pcb.pcb_fpu);
689 }
690 
691 static __inline void
692 vmm_fxrstor(struct fxsave_state *addr)
693 {
694 	__asm __volatile("fxrstor %0" : : "m" (*(addr)));
695 }
696 
697 static __inline void
698 vmm_fxsave(struct fxsave_state *addr)
699 {
700 	__asm __volatile("fxsave %0" : "=m" (*(addr)));
701 }
702 
703 static __inline void
704 vmm_xrstor(struct xsave_state *addr, uint64_t mask)
705 {
706 	uint32_t low, hi;
707 
708 	low = mask;
709 	hi = mask >> 32;
710 	__asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
711 }
712 
713 static __inline void
714 vmm_xsave(struct xsave_state *addr, uint64_t mask)
715 {
716 	uint32_t low, hi;
717 
718 	low = mask;
719 	hi = mask >> 32;
720 	__asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
721 	    "memory");
722 }
723 
724 void
725 fpurestore(void *arg)
726 {
727 	struct savefpu *fsa = (struct savefpu *)arg;
728 	struct fpu_ctx *fp;
729 
730 	fp = &fsa->fsa_fp_ctx;
731 
732 	switch (fp_save_mech) {
733 	case FP_FXSAVE:
734 		vmm_fxrstor(&fp->fpu_regs.kfpu_u.kfpu_fx);
735 		break;
736 	case FP_XSAVE:
737 		vmm_xrstor(&fp->fpu_regs.kfpu_u.kfpu_xs, fp->fpu_xsave_mask);
738 		break;
739 	default:
740 		panic("Invalid fp_save_mech");
741 		/*NOTREACHED*/
742 	}
743 }
744 
745 void
746 fpusave(void *arg)
747 {
748 	struct savefpu *fsa = (struct savefpu *)arg;
749 	struct fpu_ctx *fp;
750 
751 	fp = &fsa->fsa_fp_ctx;
752 
753 	switch (fp_save_mech) {
754 	case FP_FXSAVE:
755 		vmm_fxsave(&fp->fpu_regs.kfpu_u.kfpu_fx);
756 		break;
757 	case FP_XSAVE:
758 		vmm_xsave(&fp->fpu_regs.kfpu_u.kfpu_xs, fp->fpu_xsave_mask);
759 		break;
760 	default:
761 		panic("Invalid fp_save_mech");
762 		/*NOTREACHED*/
763 	}
764 }
765 
766 void
767 vmm_sol_glue_init(void)
768 {
769 	vmm_cpuid_init();
770 	fpu_save_area_init();
771 	init_sleepqueues();
772 }
773 
774 void
775 vmm_sol_glue_cleanup(void)
776 {
777 	fpu_save_area_cleanup();
778 	kmem_cache_destroy(vmm_sleepq_cache);
779 }
780