1 /*
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/kern/subr_unit.c 255057 2013-08-30 07:37:45Z kib $
27  */
28 /*
29  * This file and its contents are supplied under the terms of the
30  * Common Development and Distribution License ("CDDL"), version 1.0.
31  * You may only use this file in accordance with the terms of version
32  * 1.0 of the CDDL.
33  *
34  * A full copy of the text of the CDDL should have accompanied this
35  * source.  A copy of the CDDL is also available via the Internet at
36  * http://www.illumos.org/license/CDDL.
37  *
38  * Copyright 2014 Pluribus Networks Inc.
39  * Copyright 2019 Joyent, Inc.
40  * Copyright 2020 Oxide Computer Company
41  */
42 
43 #include <sys/types.h>
44 #include <sys/archsystm.h>
45 #include <sys/cpuset.h>
46 #include <sys/fp.h>
47 #include <sys/malloc.h>
48 #include <sys/queue.h>
49 #include <sys/spl.h>
50 #include <sys/systm.h>
51 #include <sys/ddidmareq.h>
52 #include <sys/id_space.h>
53 #include <sys/psm_defs.h>
54 #include <sys/smp_impldefs.h>
55 #include <sys/modhash.h>
56 #include <sys/hma.h>
57 
58 #include <sys/x86_archext.h>
59 
60 #include <machine/cpufunc.h>
61 #include <machine/fpu.h>
62 #include <machine/md_var.h>
63 #include <machine/specialreg.h>
64 #include <machine/vmm.h>
65 #include <machine/vmparam.h>
66 #include <sys/vmm_impl.h>
67 #include <sys/kernel.h>
68 
69 #include <vm/as.h>
70 #include <vm/seg_kmem.h>
71 
72 SET_DECLARE(sysinit_set, struct sysinit);
73 
74 void
75 sysinit(void)
76 {
77 	struct sysinit **si;
78 
79 	SET_FOREACH(si, sysinit_set)
80 		(*si)->func((*si)->data);
81 }
82 
83 uint8_t const bin2bcd_data[] = {
84 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
85 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
86 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
87 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
88 	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
89 	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
90 	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
91 	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
92 	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
93 	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99
94 };
95 
96 void
97 invalidate_cache_all(void)
98 {
99 	cpuset_t cpuset;
100 
101 	kpreempt_disable();
102 	cpuset_all_but(&cpuset, CPU->cpu_id);
103 	xc_call((xc_arg_t)NULL, (xc_arg_t)NULL, (xc_arg_t)NULL,
104 	    CPUSET2BV(cpuset), (xc_func_t)invalidate_cache);
105 	invalidate_cache();
106 	kpreempt_enable();
107 }
108 
109 vm_paddr_t
110 vtophys(void *va)
111 {
112 	pfn_t	pfn;
113 
114 	/*
115 	 * Since hat_getpfnum() may block on an htable mutex, this is not at
116 	 * all safe to run from a critical_enter/kpreempt_disable context.
117 	 * The FreeBSD analog does not have the same locking constraints, so
118 	 * close attention must be paid wherever this is called.
119 	 */
120 	ASSERT(curthread->t_preempt == 0);
121 
122 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)va);
123 	ASSERT(pfn != PFN_INVALID);
124 	return (pfn << PAGE_SHIFT) | ((uintptr_t)va & PAGE_MASK);
125 }
126 
127 int
128 cpusetobj_ffs(const cpuset_t *set)
129 {
130 	uint_t large, small;
131 
132 	/*
133 	 * Rather than reaching into the cpuset_t ourselves, leave that task to
134 	 * cpuset_bounds().  The simplicity is worth the extra wasted work to
135 	 * find the upper bound.
136 	 */
137 	cpuset_bounds(set, &small, &large);
138 
139 	if (small == CPUSET_NOTINSET) {
140 		/* The FreeBSD version returns 0 if it find nothing */
141 		return (0);
142 	}
143 
144 	ASSERT3U(small, <=, INT_MAX);
145 
146 	/* Least significant bit index starts at 1 for valid results */
147 	return (small + 1);
148 }
149 
150 struct kmem_item {
151 	void			*addr;
152 	size_t			size;
153 };
154 static kmutex_t kmem_items_lock;
155 
156 static mod_hash_t *vmm_alloc_hash;
157 uint_t vmm_alloc_hash_nchains = 16381;
158 uint_t vmm_alloc_hash_size = PAGESIZE;
159 
160 static void
161 vmm_alloc_hash_valdtor(mod_hash_val_t val)
162 {
163 	struct kmem_item *i = (struct kmem_item *)val;
164 
165 	kmem_free(i->addr, i->size);
166 	kmem_free(i, sizeof (struct kmem_item));
167 }
168 
169 static void
170 vmm_alloc_init(void)
171 {
172 	vmm_alloc_hash = mod_hash_create_ptrhash("vmm_alloc_hash",
173 	    vmm_alloc_hash_nchains, vmm_alloc_hash_valdtor,
174 	    vmm_alloc_hash_size);
175 
176 	VERIFY(vmm_alloc_hash != NULL);
177 }
178 
179 static uint_t
180 vmm_alloc_check(mod_hash_key_t key, mod_hash_val_t *val, void *unused)
181 {
182 	struct kmem_item *i = (struct kmem_item *)val;
183 
184 	cmn_err(CE_PANIC, "!vmm_alloc_check: hash not empty: %p, %lu", i->addr,
185 	    i->size);
186 
187 	return (MH_WALK_TERMINATE);
188 }
189 
190 static void
191 vmm_alloc_cleanup(void)
192 {
193 	mod_hash_walk(vmm_alloc_hash, vmm_alloc_check, NULL);
194 	mod_hash_destroy_ptrhash(vmm_alloc_hash);
195 }
196 
197 void *
198 malloc(unsigned long size, struct malloc_type *mtp, int flags)
199 {
200 	void			*p;
201 	struct kmem_item	*i;
202 	int			kmem_flag = KM_SLEEP;
203 
204 	if (flags & M_NOWAIT)
205 		kmem_flag = KM_NOSLEEP;
206 
207 	if (flags & M_ZERO) {
208 		p = kmem_zalloc(size, kmem_flag);
209 	} else {
210 		p = kmem_alloc(size, kmem_flag);
211 	}
212 
213 	if (p == NULL)
214 		return (NULL);
215 
216 	i = kmem_zalloc(sizeof (struct kmem_item), kmem_flag);
217 
218 	if (i == NULL) {
219 		kmem_free(p, size);
220 		return (NULL);
221 	}
222 
223 	mutex_enter(&kmem_items_lock);
224 	i->addr = p;
225 	i->size = size;
226 
227 	VERIFY(mod_hash_insert(vmm_alloc_hash,
228 	    (mod_hash_key_t)PHYS_TO_DMAP(vtophys(p)), (mod_hash_val_t)i) == 0);
229 
230 	mutex_exit(&kmem_items_lock);
231 
232 	return (p);
233 }
234 
235 void
236 free(void *addr, struct malloc_type *mtp)
237 {
238 	mutex_enter(&kmem_items_lock);
239 	VERIFY(mod_hash_destroy(vmm_alloc_hash,
240 	    (mod_hash_key_t)PHYS_TO_DMAP(vtophys(addr))) == 0);
241 	mutex_exit(&kmem_items_lock);
242 }
243 
244 extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
245 extern void contig_free(void *, size_t);
246 
247 void *
248 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
249     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
250     vm_paddr_t boundary)
251 {
252 	ddi_dma_attr_t attr = {
253 		/* Using fastboot_dma_attr as a guide... */
254 		DMA_ATTR_V0,
255 		low,			/* dma_attr_addr_lo */
256 		high,			/* dma_attr_addr_hi */
257 		0x00000000FFFFFFFFULL,	/* dma_attr_count_max */
258 		alignment,		/* dma_attr_align */
259 		1,			/* dma_attr_burstsize */
260 		1,			/* dma_attr_minxfer */
261 		0x00000000FFFFFFFFULL,	/* dma_attr_maxxfer */
262 		0x00000000FFFFFFFFULL,	/* dma_attr_seg: any */
263 		1,			/* dma_attr_sgllen */
264 		alignment,		/* dma_attr_granular */
265 		0,			/* dma_attr_flags */
266 	};
267 	int cansleep = (flags & M_WAITOK);
268 	void *result;
269 
270 	ASSERT(alignment == PAGESIZE);
271 
272 	result = contig_alloc((size_t)size, &attr, alignment, cansleep);
273 
274 	if (result != NULL && (flags & M_ZERO) != 0) {
275 		bzero(result, size);
276 	}
277 	return (result);
278 }
279 
280 void
281 contigfree(void *addr, unsigned long size, struct malloc_type *type)
282 {
283 	contig_free(addr, size);
284 }
285 
286 void
287 mtx_init(struct mtx *mtx, char *name, const char *type_name, int opts)
288 {
289 	/*
290 	 * Requests that a mutex be initialized to the MTX_SPIN type are
291 	 * ignored.  The limitations which may have required spinlocks on
292 	 * FreeBSD do not apply to how bhyve has been structured here.
293 	 *
294 	 * Adaptive mutexes are required to avoid deadlocks when certain
295 	 * cyclics behavior interacts with interrupts and contended locks.
296 	 */
297 	mutex_init(&mtx->m, name, MUTEX_ADAPTIVE, NULL);
298 }
299 
300 void
301 mtx_destroy(struct mtx *mtx)
302 {
303 	mutex_destroy(&mtx->m);
304 }
305 
306 void
307 critical_enter(void)
308 {
309 	kpreempt_disable();
310 }
311 
312 void
313 critical_exit(void)
314 {
315 	kpreempt_enable();
316 }
317 
318 
319 static void
320 vmm_glue_callout_handler(void *arg)
321 {
322 	struct callout *c = arg;
323 
324 	if (callout_active(c)) {
325 		/*
326 		 * Record the handler fire time so that callout_pending() is
327 		 * able to detect if the callout becomes rescheduled during the
328 		 * course of the handler.
329 		 */
330 		c->c_fired = gethrtime();
331 		(c->c_func)(c->c_arg);
332 	}
333 }
334 
335 void
336 vmm_glue_callout_init(struct callout *c, int mpsafe)
337 {
338 	cyc_handler_t	hdlr;
339 	cyc_time_t	when;
340 
341 	hdlr.cyh_level = CY_LOW_LEVEL;
342 	hdlr.cyh_func = vmm_glue_callout_handler;
343 	hdlr.cyh_arg = c;
344 	when.cyt_when = CY_INFINITY;
345 	when.cyt_interval = CY_INFINITY;
346 	bzero(c, sizeof (*c));
347 
348 	mutex_enter(&cpu_lock);
349 	c->c_cyc_id = cyclic_add(&hdlr, &when);
350 	mutex_exit(&cpu_lock);
351 }
352 
353 static __inline hrtime_t
354 sbttohrtime(sbintime_t sbt)
355 {
356 	return (((sbt >> 32) * NANOSEC) +
357 	    (((uint64_t)NANOSEC * (uint32_t)sbt) >> 32));
358 }
359 
360 int
361 vmm_glue_callout_reset_sbt(struct callout *c, sbintime_t sbt, sbintime_t pr,
362     void (*func)(void *), void *arg, int flags)
363 {
364 	hrtime_t target = sbttohrtime(sbt);
365 
366 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
367 
368 	if ((flags & C_ABSOLUTE) == 0) {
369 		target += gethrtime();
370 	}
371 
372 	c->c_func = func;
373 	c->c_arg = arg;
374 	c->c_target = target;
375 	cyclic_reprogram(c->c_cyc_id, target);
376 
377 	return (0);
378 }
379 
380 int
381 vmm_glue_callout_stop(struct callout *c)
382 {
383 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
384 
385 	c->c_target = 0;
386 	cyclic_reprogram(c->c_cyc_id, CY_INFINITY);
387 
388 	return (0);
389 }
390 
391 int
392 vmm_glue_callout_drain(struct callout *c)
393 {
394 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
395 
396 	c->c_target = 0;
397 	mutex_enter(&cpu_lock);
398 	cyclic_remove(c->c_cyc_id);
399 	c->c_cyc_id = CYCLIC_NONE;
400 	mutex_exit(&cpu_lock);
401 
402 	return (0);
403 }
404 
405 void
406 vmm_glue_callout_localize(struct callout *c)
407 {
408 	mutex_enter(&cpu_lock);
409 	cyclic_move_here(c->c_cyc_id);
410 	mutex_exit(&cpu_lock);
411 }
412 
413 uint_t	cpu_high;		/* Highest arg to CPUID */
414 uint_t	cpu_exthigh;		/* Highest arg to extended CPUID */
415 uint_t	cpu_id;			/* Stepping ID */
416 char	cpu_vendor[20];		/* CPU Origin code */
417 
418 static void
419 vmm_cpuid_init(void)
420 {
421 	uint_t regs[4];
422 
423 	do_cpuid(0, regs);
424 	cpu_high = regs[0];
425 	((uint_t *)&cpu_vendor)[0] = regs[1];
426 	((uint_t *)&cpu_vendor)[1] = regs[3];
427 	((uint_t *)&cpu_vendor)[2] = regs[2];
428 	cpu_vendor[12] = '\0';
429 
430 	do_cpuid(1, regs);
431 	cpu_id = regs[0];
432 
433 	do_cpuid(0x80000000, regs);
434 	cpu_exthigh = regs[0];
435 }
436 
437 /*
438  * FreeBSD uses the struct savefpu for managing the FPU state. That is mimicked
439  * by our hypervisor multiplexor framework structure.
440  */
441 struct savefpu *
442 fpu_save_area_alloc(void)
443 {
444 	return ((struct savefpu *)hma_fpu_alloc(KM_SLEEP));
445 }
446 
447 void
448 fpu_save_area_free(struct savefpu *fsa)
449 {
450 	hma_fpu_t *fpu = (hma_fpu_t *)fsa;
451 	hma_fpu_free(fpu);
452 }
453 
454 void
455 fpu_save_area_reset(struct savefpu *fsa)
456 {
457 	hma_fpu_t *fpu = (hma_fpu_t *)fsa;
458 	hma_fpu_init(fpu);
459 }
460 
461 /*
462  * This glue function is supposed to save the host's FPU state. This is always
463  * paired in the general bhyve code with a call to fpusave. Therefore, we treat
464  * this as a nop and do all the work in fpusave(), which will have the context
465  * argument that we want anyways.
466  */
467 void
468 fpuexit(kthread_t *td)
469 {
470 }
471 
472 /*
473  * This glue function is supposed to restore the guest's FPU state from the save
474  * area back to the host. In FreeBSD, it is assumed that the host state has
475  * already been saved by a call to fpuexit(); however, we do both here.
476  */
477 void
478 fpurestore(void *arg)
479 {
480 	hma_fpu_t *fpu = arg;
481 
482 	hma_fpu_start_guest(fpu);
483 }
484 
485 /*
486  * This glue function is supposed to save the guest's FPU state. The host's FPU
487  * state is not expected to be restored necessarily due to the use of FPU
488  * emulation through CR0.TS. However, we can and do restore it here.
489  */
490 void
491 fpusave(void *arg)
492 {
493 	hma_fpu_t *fpu = arg;
494 
495 	hma_fpu_stop_guest(fpu);
496 }
497 
498 void
499 vmm_sol_glue_init(void)
500 {
501 	vmm_alloc_init();
502 	vmm_cpuid_init();
503 }
504 
505 void
506 vmm_sol_glue_cleanup(void)
507 {
508 	vmm_alloc_cleanup();
509 }
510 
511 
512 /* From FreeBSD's sys/kern/subr_clock.c */
513 
514 /*-
515  * Copyright (c) 1988 University of Utah.
516  * Copyright (c) 1982, 1990, 1993
517  *	The Regents of the University of California.  All rights reserved.
518  *
519  * This code is derived from software contributed to Berkeley by
520  * the Systems Programming Group of the University of Utah Computer
521  * Science Department.
522  *
523  * Redistribution and use in source and binary forms, with or without
524  * modification, are permitted provided that the following conditions
525  * are met:
526  * 1. Redistributions of source code must retain the above copyright
527  *    notice, this list of conditions and the following disclaimer.
528  * 2. Redistributions in binary form must reproduce the above copyright
529  *    notice, this list of conditions and the following disclaimer in the
530  *    documentation and/or other materials provided with the distribution.
531  * 4. Neither the name of the University nor the names of its contributors
532  *    may be used to endorse or promote products derived from this software
533  *    without specific prior written permission.
534  *
535  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
536  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
537  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
538  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
539  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
540  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
541  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
542  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
543  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
544  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
545  * SUCH DAMAGE.
546  *
547  *	from: Utah $Hdr: clock.c 1.18 91/01/21$
548  *	from: @(#)clock.c	8.2 (Berkeley) 1/12/94
549  *	from: NetBSD: clock_subr.c,v 1.6 2001/07/07 17:04:02 thorpej Exp
550  *	and
551  *	from: src/sys/i386/isa/clock.c,v 1.176 2001/09/04
552  */
553 
554 #include <sys/clock.h>
555 
556 /*
557  * Generic routines to convert between a POSIX date
558  * (seconds since 1/1/1970) and yr/mo/day/hr/min/sec
559  * Derived from NetBSD arch/hp300/hp300/clock.c
560  */
561 
562 #define	FEBRUARY	2
563 #define	days_in_year(y)		(leapyear(y) ? 366 : 365)
564 #define	days_in_month(y, m) \
565 	(month_days[(m) - 1] + (m == FEBRUARY ? leapyear(y) : 0))
566 /* Day of week. Days are counted from 1/1/1970, which was a Thursday */
567 #define	day_of_week(days)	(((days) + 4) % 7)
568 
569 static const int month_days[12] = {
570 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
571 };
572 
573 
574 /*
575  * This inline avoids some unnecessary modulo operations
576  * as compared with the usual macro:
577  *   ( ((year % 4) == 0 &&
578  *      (year % 100) != 0) ||
579  *     ((year % 400) == 0) )
580  * It is otherwise equivalent.
581  */
582 static int
583 leapyear(int year)
584 {
585 	int rv = 0;
586 
587 	if ((year & 3) == 0) {
588 		rv = 1;
589 		if ((year % 100) == 0) {
590 			rv = 0;
591 			if ((year % 400) == 0)
592 				rv = 1;
593 		}
594 	}
595 	return (rv);
596 }
597 
598 int
599 clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
600 {
601 	int i, year, days;
602 
603 	year = ct->year;
604 
605 #ifdef __FreeBSD__
606 	if (ct_debug) {
607 		printf("ct_to_ts(");
608 		print_ct(ct);
609 		printf(")");
610 	}
611 #endif
612 
613 	/* Sanity checks. */
614 	if (ct->mon < 1 || ct->mon > 12 || ct->day < 1 ||
615 	    ct->day > days_in_month(year, ct->mon) ||
616 	    ct->hour > 23 || ct->min > 59 || ct->sec > 59 ||
617 	    (sizeof (time_t) == 4 && year > 2037)) {	/* time_t overflow */
618 #ifdef __FreeBSD__
619 		if (ct_debug)
620 			printf(" = EINVAL\n");
621 #endif
622 		return (EINVAL);
623 	}
624 
625 	/*
626 	 * Compute days since start of time
627 	 * First from years, then from months.
628 	 */
629 	days = 0;
630 	for (i = POSIX_BASE_YEAR; i < year; i++)
631 		days += days_in_year(i);
632 
633 	/* Months */
634 	for (i = 1; i < ct->mon; i++)
635 		days += days_in_month(year, i);
636 	days += (ct->day - 1);
637 
638 	ts->tv_sec = (((time_t)days * 24 + ct->hour) * 60 + ct->min) * 60 +
639 	    ct->sec;
640 	ts->tv_nsec = ct->nsec;
641 
642 #ifdef __FreeBSD__
643 	if (ct_debug)
644 		printf(" = %ld.%09ld\n", (long)ts->tv_sec, (long)ts->tv_nsec);
645 #endif
646 	return (0);
647 }
648 
649 void
650 clock_ts_to_ct(struct timespec *ts, struct clocktime *ct)
651 {
652 	int i, year, days;
653 	time_t rsec;	/* remainder seconds */
654 	time_t secs;
655 
656 	secs = ts->tv_sec;
657 	days = secs / SECDAY;
658 	rsec = secs % SECDAY;
659 
660 	ct->dow = day_of_week(days);
661 
662 	/* Subtract out whole years, counting them in i. */
663 	for (year = POSIX_BASE_YEAR; days >= days_in_year(year); year++)
664 		days -= days_in_year(year);
665 	ct->year = year;
666 
667 	/* Subtract out whole months, counting them in i. */
668 	for (i = 1; days >= days_in_month(year, i); i++)
669 		days -= days_in_month(year, i);
670 	ct->mon = i;
671 
672 	/* Days are what is left over (+1) from all that. */
673 	ct->day = days + 1;
674 
675 	/* Hours, minutes, seconds are easy */
676 	ct->hour = rsec / 3600;
677 	rsec = rsec % 3600;
678 	ct->min  = rsec / 60;
679 	rsec = rsec % 60;
680 	ct->sec  = rsec;
681 	ct->nsec = ts->tv_nsec;
682 #ifdef __FreeBSD__
683 	if (ct_debug) {
684 		printf("ts_to_ct(%ld.%09ld) = ",
685 		    (long)ts->tv_sec, (long)ts->tv_nsec);
686 		print_ct(ct);
687 		printf("\n");
688 	}
689 #endif
690 }
691 
692 /* Equivalent to the FreeBSD rdtsc(), but with any necessary per-cpu offset */
693 uint64_t
694 rdtsc_offset(void)
695 {
696 	/*
697 	 * The timestamp logic will decide if a delta need be applied to the
698 	 * unscaled hrtime reading (effectively rdtsc), but we do require it be
699 	 * backed by the TSC itself.
700 	 */
701 	extern hrtime_t (*gethrtimeunscaledf)(void);
702 	extern hrtime_t tsc_gethrtimeunscaled(void);
703 	extern hrtime_t tsc_gethrtimeunscaled_delta(void);
704 
705 	ASSERT(*gethrtimeunscaledf == tsc_gethrtimeunscaled ||
706 	    *gethrtimeunscaledf == tsc_gethrtimeunscaled_delta);
707 	return ((uint64_t)gethrtimeunscaledf());
708 }
709