1 /*
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/kern/subr_unit.c 255057 2013-08-30 07:37:45Z kib $
27  */
28 /*
29  * This file and its contents are supplied under the terms of the
30  * Common Development and Distribution License ("CDDL"), version 1.0.
31  * You may only use this file in accordance with the terms of version
32  * 1.0 of the CDDL.
33  *
34  * A full copy of the text of the CDDL should have accompanied this
35  * source.  A copy of the CDDL is also available via the Internet at
36  * http://www.illumos.org/license/CDDL.
37  *
38  * Copyright 2014 Pluribus Networks Inc.
39  * Copyright 2019 Joyent, Inc.
40  * Copyright 2020 Oxide Computer Company
41  */
42 
43 #include <sys/types.h>
44 #include <sys/archsystm.h>
45 #include <sys/cpuset.h>
46 #include <sys/fp.h>
47 #include <sys/malloc.h>
48 #include <sys/queue.h>
49 #include <sys/spl.h>
50 #include <sys/systm.h>
51 #include <sys/ddidmareq.h>
52 #include <sys/id_space.h>
53 #include <sys/psm_defs.h>
54 #include <sys/smp_impldefs.h>
55 #include <sys/modhash.h>
56 #include <sys/hma.h>
57 
58 #include <sys/x86_archext.h>
59 
60 #include <machine/cpufunc.h>
61 #include <machine/fpu.h>
62 #include <machine/md_var.h>
63 #include <machine/pmap.h>
64 #include <machine/specialreg.h>
65 #include <machine/vmm.h>
66 #include <sys/vmm_impl.h>
67 #include <sys/kernel.h>
68 
69 #include <vm/as.h>
70 #include <vm/seg_kmem.h>
71 
72 SET_DECLARE(sysinit_set, struct sysinit);
73 
74 void
75 sysinit(void)
76 {
77 	struct sysinit **si;
78 
79 	SET_FOREACH(si, sysinit_set)
80 		(*si)->func((*si)->data);
81 }
82 
83 uint8_t const bin2bcd_data[] = {
84 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
85 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
86 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
87 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
88 	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
89 	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
90 	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
91 	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
92 	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
93 	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99
94 };
95 
96 void
97 pmap_invalidate_cache(void)
98 {
99 	cpuset_t cpuset;
100 
101 	kpreempt_disable();
102 	cpuset_all_but(&cpuset, CPU->cpu_id);
103 	xc_call((xc_arg_t)NULL, (xc_arg_t)NULL, (xc_arg_t)NULL,
104 	    CPUSET2BV(cpuset), (xc_func_t)invalidate_cache);
105 	invalidate_cache();
106 	kpreempt_enable();
107 }
108 
109 vm_paddr_t
110 pmap_kextract(vm_offset_t va)
111 {
112 	pfn_t	pfn;
113 
114 	/*
115 	 * Since hat_getpfnum() may block on an htable mutex, this is not at
116 	 * all safe to run from a critical_enter/kpreempt_disable context.
117 	 * The FreeBSD analog does not have the same locking constraints, so
118 	 * close attention must be paid wherever this is called.
119 	 */
120 	ASSERT(curthread->t_preempt == 0);
121 
122 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)va);
123 	ASSERT(pfn != PFN_INVALID);
124 	return (pfn << PAGE_SHIFT) | ((uintptr_t)va & PAGE_MASK);
125 }
126 
127 int
128 cpusetobj_ffs(const cpuset_t *set)
129 {
130 	uint_t large, small;
131 
132 	/*
133 	 * Rather than reaching into the cpuset_t ourselves, leave that task to
134 	 * cpuset_bounds().  The simplicity is worth the extra wasted work to
135 	 * find the upper bound.
136 	 */
137 	cpuset_bounds(set, &small, &large);
138 
139 	if (small == CPUSET_NOTINSET) {
140 		/* The FreeBSD version returns 0 if it find nothing */
141 		return (0);
142 	}
143 
144 	ASSERT3U(small, <=, INT_MAX);
145 
146 	/* Least significant bit index starts at 1 for valid results */
147 	return (small + 1);
148 }
149 
150 struct kmem_item {
151 	void			*addr;
152 	size_t			size;
153 };
154 static kmutex_t kmem_items_lock;
155 
156 static mod_hash_t *vmm_alloc_hash;
157 uint_t vmm_alloc_hash_nchains = 16381;
158 uint_t vmm_alloc_hash_size = PAGESIZE;
159 
160 static void
161 vmm_alloc_hash_valdtor(mod_hash_val_t val)
162 {
163 	struct kmem_item *i = (struct kmem_item *)val;
164 
165 	kmem_free(i->addr, i->size);
166 	kmem_free(i, sizeof (struct kmem_item));
167 }
168 
169 static void
170 vmm_alloc_init(void)
171 {
172 	vmm_alloc_hash = mod_hash_create_ptrhash("vmm_alloc_hash",
173 	    vmm_alloc_hash_nchains, vmm_alloc_hash_valdtor,
174 	    vmm_alloc_hash_size);
175 
176 	VERIFY(vmm_alloc_hash != NULL);
177 }
178 
179 static uint_t
180 vmm_alloc_check(mod_hash_key_t key, mod_hash_val_t *val, void *unused)
181 {
182 	struct kmem_item *i = (struct kmem_item *)val;
183 
184 	cmn_err(CE_PANIC, "!vmm_alloc_check: hash not empty: %p, %lu", i->addr,
185 	    i->size);
186 
187 	return (MH_WALK_TERMINATE);
188 }
189 
190 static void
191 vmm_alloc_cleanup(void)
192 {
193 	mod_hash_walk(vmm_alloc_hash, vmm_alloc_check, NULL);
194 	mod_hash_destroy_ptrhash(vmm_alloc_hash);
195 }
196 
197 void *
198 malloc(unsigned long size, struct malloc_type *mtp, int flags)
199 {
200 	void			*p;
201 	struct kmem_item	*i;
202 	int			kmem_flag = KM_SLEEP;
203 
204 	if (flags & M_NOWAIT)
205 		kmem_flag = KM_NOSLEEP;
206 
207 	if (flags & M_ZERO) {
208 		p = kmem_zalloc(size, kmem_flag);
209 	} else {
210 		p = kmem_alloc(size, kmem_flag);
211 	}
212 
213 	if (p == NULL)
214 		return (NULL);
215 
216 	i = kmem_zalloc(sizeof (struct kmem_item), kmem_flag);
217 
218 	if (i == NULL) {
219 		kmem_free(p, size);
220 		return (NULL);
221 	}
222 
223 	mutex_enter(&kmem_items_lock);
224 	i->addr = p;
225 	i->size = size;
226 
227 	VERIFY(mod_hash_insert(vmm_alloc_hash,
228 	    (mod_hash_key_t)PHYS_TO_DMAP(vtophys(p)), (mod_hash_val_t)i) == 0);
229 
230 	mutex_exit(&kmem_items_lock);
231 
232 	return (p);
233 }
234 
235 void
236 free(void *addr, struct malloc_type *mtp)
237 {
238 	mutex_enter(&kmem_items_lock);
239 	VERIFY(mod_hash_destroy(vmm_alloc_hash,
240 	    (mod_hash_key_t)PHYS_TO_DMAP(vtophys(addr))) == 0);
241 	mutex_exit(&kmem_items_lock);
242 }
243 
244 extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
245 extern void contig_free(void *, size_t);
246 
247 void *
248 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
249     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
250     vm_paddr_t boundary)
251 {
252 	ddi_dma_attr_t attr = {
253 		/* Using fastboot_dma_attr as a guide... */
254 		DMA_ATTR_V0,
255 		low,			/* dma_attr_addr_lo */
256 		high,			/* dma_attr_addr_hi */
257 		0x00000000FFFFFFFFULL,	/* dma_attr_count_max */
258 		alignment,		/* dma_attr_align */
259 		1,			/* dma_attr_burstsize */
260 		1,			/* dma_attr_minxfer */
261 		0x00000000FFFFFFFFULL,	/* dma_attr_maxxfer */
262 		0x00000000FFFFFFFFULL,	/* dma_attr_seg: any */
263 		1,			/* dma_attr_sgllen */
264 		alignment,		/* dma_attr_granular */
265 		0,			/* dma_attr_flags */
266 	};
267 	int cansleep = (flags & M_WAITOK);
268 	void *result;
269 
270 	ASSERT(alignment == PAGESIZE);
271 
272 	result = contig_alloc((size_t)size, &attr, alignment, cansleep);
273 
274 	if (result != NULL && (flags & M_ZERO) != 0) {
275 		bzero(result, size);
276 	}
277 	return (result);
278 }
279 
280 void
281 contigfree(void *addr, unsigned long size, struct malloc_type *type)
282 {
283 	contig_free(addr, size);
284 }
285 
286 void
287 mtx_init(struct mtx *mtx, char *name, const char *type_name, int opts)
288 {
289 	/*
290 	 * Requests that a mutex be initialized to the MTX_SPIN type are
291 	 * ignored.  The limitations which may have required spinlocks on
292 	 * FreeBSD do not apply to how bhyve has been structured here.
293 	 *
294 	 * Adaptive mutexes are required to avoid deadlocks when certain
295 	 * cyclics behavior interacts with interrupts and contended locks.
296 	 */
297 	mutex_init(&mtx->m, name, MUTEX_ADAPTIVE, NULL);
298 }
299 
300 void
301 mtx_destroy(struct mtx *mtx)
302 {
303 	mutex_destroy(&mtx->m);
304 }
305 
306 void
307 critical_enter(void)
308 {
309 	kpreempt_disable();
310 }
311 
312 void
313 critical_exit(void)
314 {
315 	kpreempt_enable();
316 }
317 
318 
319 static void
320 vmm_glue_callout_handler(void *arg)
321 {
322 	struct callout *c = arg;
323 
324 	if (callout_active(c)) {
325 		/*
326 		 * Record the handler fire time so that callout_pending() is
327 		 * able to detect if the callout becomes rescheduled during the
328 		 * course of the handler.
329 		 */
330 		c->c_fired = gethrtime();
331 		(c->c_func)(c->c_arg);
332 	}
333 }
334 
335 void
336 vmm_glue_callout_init(struct callout *c, int mpsafe)
337 {
338 	cyc_handler_t	hdlr;
339 	cyc_time_t	when;
340 
341 	hdlr.cyh_level = CY_LOW_LEVEL;
342 	hdlr.cyh_func = vmm_glue_callout_handler;
343 	hdlr.cyh_arg = c;
344 	when.cyt_when = CY_INFINITY;
345 	when.cyt_interval = CY_INFINITY;
346 	bzero(c, sizeof (*c));
347 
348 	mutex_enter(&cpu_lock);
349 	c->c_cyc_id = cyclic_add(&hdlr, &when);
350 	mutex_exit(&cpu_lock);
351 }
352 
353 static __inline hrtime_t
354 sbttohrtime(sbintime_t sbt)
355 {
356 	return (((sbt >> 32) * NANOSEC) +
357 	    (((uint64_t)NANOSEC * (uint32_t)sbt) >> 32));
358 }
359 
360 int
361 vmm_glue_callout_reset_sbt(struct callout *c, sbintime_t sbt, sbintime_t pr,
362     void (*func)(void *), void *arg, int flags)
363 {
364 	hrtime_t target = sbttohrtime(sbt);
365 
366 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
367 
368 	if ((flags & C_ABSOLUTE) == 0) {
369 		target += gethrtime();
370 	}
371 
372 	c->c_func = func;
373 	c->c_arg = arg;
374 	c->c_target = target;
375 	cyclic_reprogram(c->c_cyc_id, target);
376 
377 	return (0);
378 }
379 
380 int
381 vmm_glue_callout_stop(struct callout *c)
382 {
383 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
384 
385 	c->c_target = 0;
386 	cyclic_reprogram(c->c_cyc_id, CY_INFINITY);
387 
388 	return (0);
389 }
390 
391 int
392 vmm_glue_callout_drain(struct callout *c)
393 {
394 	ASSERT(c->c_cyc_id != CYCLIC_NONE);
395 
396 	c->c_target = 0;
397 	mutex_enter(&cpu_lock);
398 	cyclic_remove(c->c_cyc_id);
399 	c->c_cyc_id = CYCLIC_NONE;
400 	mutex_exit(&cpu_lock);
401 
402 	return (0);
403 }
404 
405 void
406 vmm_glue_callout_localize(struct callout *c)
407 {
408 	mutex_enter(&cpu_lock);
409 	cyclic_move_here(c->c_cyc_id);
410 	mutex_exit(&cpu_lock);
411 }
412 
413 void
414 ipi_cpu(int cpu, uint_t ipi)
415 {
416 	/*
417 	 * This was previously implemented as an invocation of asynchronous
418 	 * no-op crosscalls to interrupt the target CPU.  Since even nowait
419 	 * crosscalls can block in certain circumstances, a direct poke_cpu()
420 	 * is safer when called from delicate contexts.
421 	 */
422 	poke_cpu(cpu);
423 }
424 
425 uint_t	cpu_high;		/* Highest arg to CPUID */
426 uint_t	cpu_exthigh;		/* Highest arg to extended CPUID */
427 uint_t	cpu_id;			/* Stepping ID */
428 char	cpu_vendor[20];		/* CPU Origin code */
429 
430 static void
431 vmm_cpuid_init(void)
432 {
433 	uint_t regs[4];
434 
435 	do_cpuid(0, regs);
436 	cpu_high = regs[0];
437 	((uint_t *)&cpu_vendor)[0] = regs[1];
438 	((uint_t *)&cpu_vendor)[1] = regs[3];
439 	((uint_t *)&cpu_vendor)[2] = regs[2];
440 	cpu_vendor[12] = '\0';
441 
442 	do_cpuid(1, regs);
443 	cpu_id = regs[0];
444 
445 	do_cpuid(0x80000000, regs);
446 	cpu_exthigh = regs[0];
447 }
448 
449 /*
450  * FreeBSD uses the struct savefpu for managing the FPU state. That is mimicked
451  * by our hypervisor multiplexor framework structure.
452  */
453 struct savefpu *
454 fpu_save_area_alloc(void)
455 {
456 	return ((struct savefpu *)hma_fpu_alloc(KM_SLEEP));
457 }
458 
459 void
460 fpu_save_area_free(struct savefpu *fsa)
461 {
462 	hma_fpu_t *fpu = (hma_fpu_t *)fsa;
463 	hma_fpu_free(fpu);
464 }
465 
466 void
467 fpu_save_area_reset(struct savefpu *fsa)
468 {
469 	hma_fpu_t *fpu = (hma_fpu_t *)fsa;
470 	hma_fpu_init(fpu);
471 }
472 
473 /*
474  * This glue function is supposed to save the host's FPU state. This is always
475  * paired in the general bhyve code with a call to fpusave. Therefore, we treat
476  * this as a nop and do all the work in fpusave(), which will have the context
477  * argument that we want anyways.
478  */
479 void
480 fpuexit(kthread_t *td)
481 {
482 }
483 
484 /*
485  * This glue function is supposed to restore the guest's FPU state from the save
486  * area back to the host. In FreeBSD, it is assumed that the host state has
487  * already been saved by a call to fpuexit(); however, we do both here.
488  */
489 void
490 fpurestore(void *arg)
491 {
492 	hma_fpu_t *fpu = arg;
493 
494 	hma_fpu_start_guest(fpu);
495 }
496 
497 /*
498  * This glue function is supposed to save the guest's FPU state. The host's FPU
499  * state is not expected to be restored necessarily due to the use of FPU
500  * emulation through CR0.TS. However, we can and do restore it here.
501  */
502 void
503 fpusave(void *arg)
504 {
505 	hma_fpu_t *fpu = arg;
506 
507 	hma_fpu_stop_guest(fpu);
508 }
509 
510 void
511 vmm_sol_glue_init(void)
512 {
513 	vmm_alloc_init();
514 	vmm_cpuid_init();
515 }
516 
517 void
518 vmm_sol_glue_cleanup(void)
519 {
520 	vmm_alloc_cleanup();
521 }
522 
523 
524 /* From FreeBSD's sys/kern/subr_clock.c */
525 
526 /*-
527  * Copyright (c) 1988 University of Utah.
528  * Copyright (c) 1982, 1990, 1993
529  *	The Regents of the University of California.  All rights reserved.
530  *
531  * This code is derived from software contributed to Berkeley by
532  * the Systems Programming Group of the University of Utah Computer
533  * Science Department.
534  *
535  * Redistribution and use in source and binary forms, with or without
536  * modification, are permitted provided that the following conditions
537  * are met:
538  * 1. Redistributions of source code must retain the above copyright
539  *    notice, this list of conditions and the following disclaimer.
540  * 2. Redistributions in binary form must reproduce the above copyright
541  *    notice, this list of conditions and the following disclaimer in the
542  *    documentation and/or other materials provided with the distribution.
543  * 4. Neither the name of the University nor the names of its contributors
544  *    may be used to endorse or promote products derived from this software
545  *    without specific prior written permission.
546  *
547  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
548  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
549  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
550  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
551  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
552  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
553  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
554  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
555  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
556  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
557  * SUCH DAMAGE.
558  *
559  *	from: Utah $Hdr: clock.c 1.18 91/01/21$
560  *	from: @(#)clock.c	8.2 (Berkeley) 1/12/94
561  *	from: NetBSD: clock_subr.c,v 1.6 2001/07/07 17:04:02 thorpej Exp
562  *	and
563  *	from: src/sys/i386/isa/clock.c,v 1.176 2001/09/04
564  */
565 
566 #include <sys/clock.h>
567 
568 /*
569  * Generic routines to convert between a POSIX date
570  * (seconds since 1/1/1970) and yr/mo/day/hr/min/sec
571  * Derived from NetBSD arch/hp300/hp300/clock.c
572  */
573 
574 #define	FEBRUARY	2
575 #define	days_in_year(y)		(leapyear(y) ? 366 : 365)
576 #define	days_in_month(y, m) \
577 	(month_days[(m) - 1] + (m == FEBRUARY ? leapyear(y) : 0))
578 /* Day of week. Days are counted from 1/1/1970, which was a Thursday */
579 #define	day_of_week(days)	(((days) + 4) % 7)
580 
581 static const int month_days[12] = {
582 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
583 };
584 
585 
586 /*
587  * This inline avoids some unnecessary modulo operations
588  * as compared with the usual macro:
589  *   ( ((year % 4) == 0 &&
590  *      (year % 100) != 0) ||
591  *     ((year % 400) == 0) )
592  * It is otherwise equivalent.
593  */
594 static int
595 leapyear(int year)
596 {
597 	int rv = 0;
598 
599 	if ((year & 3) == 0) {
600 		rv = 1;
601 		if ((year % 100) == 0) {
602 			rv = 0;
603 			if ((year % 400) == 0)
604 				rv = 1;
605 		}
606 	}
607 	return (rv);
608 }
609 
610 int
611 clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
612 {
613 	int i, year, days;
614 
615 	year = ct->year;
616 
617 #ifdef __FreeBSD__
618 	if (ct_debug) {
619 		printf("ct_to_ts(");
620 		print_ct(ct);
621 		printf(")");
622 	}
623 #endif
624 
625 	/* Sanity checks. */
626 	if (ct->mon < 1 || ct->mon > 12 || ct->day < 1 ||
627 	    ct->day > days_in_month(year, ct->mon) ||
628 	    ct->hour > 23 || ct->min > 59 || ct->sec > 59 ||
629 	    (sizeof (time_t) == 4 && year > 2037)) {	/* time_t overflow */
630 #ifdef __FreeBSD__
631 		if (ct_debug)
632 			printf(" = EINVAL\n");
633 #endif
634 		return (EINVAL);
635 	}
636 
637 	/*
638 	 * Compute days since start of time
639 	 * First from years, then from months.
640 	 */
641 	days = 0;
642 	for (i = POSIX_BASE_YEAR; i < year; i++)
643 		days += days_in_year(i);
644 
645 	/* Months */
646 	for (i = 1; i < ct->mon; i++)
647 		days += days_in_month(year, i);
648 	days += (ct->day - 1);
649 
650 	ts->tv_sec = (((time_t)days * 24 + ct->hour) * 60 + ct->min) * 60 +
651 	    ct->sec;
652 	ts->tv_nsec = ct->nsec;
653 
654 #ifdef __FreeBSD__
655 	if (ct_debug)
656 		printf(" = %ld.%09ld\n", (long)ts->tv_sec, (long)ts->tv_nsec);
657 #endif
658 	return (0);
659 }
660 
661 void
662 clock_ts_to_ct(struct timespec *ts, struct clocktime *ct)
663 {
664 	int i, year, days;
665 	time_t rsec;	/* remainder seconds */
666 	time_t secs;
667 
668 	secs = ts->tv_sec;
669 	days = secs / SECDAY;
670 	rsec = secs % SECDAY;
671 
672 	ct->dow = day_of_week(days);
673 
674 	/* Subtract out whole years, counting them in i. */
675 	for (year = POSIX_BASE_YEAR; days >= days_in_year(year); year++)
676 		days -= days_in_year(year);
677 	ct->year = year;
678 
679 	/* Subtract out whole months, counting them in i. */
680 	for (i = 1; days >= days_in_month(year, i); i++)
681 		days -= days_in_month(year, i);
682 	ct->mon = i;
683 
684 	/* Days are what is left over (+1) from all that. */
685 	ct->day = days + 1;
686 
687 	/* Hours, minutes, seconds are easy */
688 	ct->hour = rsec / 3600;
689 	rsec = rsec % 3600;
690 	ct->min  = rsec / 60;
691 	rsec = rsec % 60;
692 	ct->sec  = rsec;
693 	ct->nsec = ts->tv_nsec;
694 #ifdef __FreeBSD__
695 	if (ct_debug) {
696 		printf("ts_to_ct(%ld.%09ld) = ",
697 		    (long)ts->tv_sec, (long)ts->tv_nsec);
698 		print_ct(ct);
699 		printf("\n");
700 	}
701 #endif
702 }
703