1/*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source.  A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12/*
13 * Copyright 2019 Joyent, Inc.
14 */
15
16#include <sys/comm_page.h>
17#include <sys/tsc.h>
18
19
20/*
21 * Interrogate if querying the clock via the comm page is possible.
22 */
23int
24__cp_can_gettime(comm_page_t *cp)
25{
26	switch (cp->cp_tsc_type) {
27	case TSC_TSCP:
28	case TSC_RDTSC_MFENCE:
29	case TSC_RDTSC_LFENCE:
30	case TSC_RDTSC_CPUID:
31		return (1);
32	default:
33		break;
34	}
35	return (0);
36}
37
38#ifdef __amd64
39
40/*
41 * The functions used for calculating time (both monotonic and wall-clock) are
42 * implemented in assembly on amd64.  This is primarily for stack conservation.
43 */
44
45#else /* i386 below */
46
47/*
48 * ASM-defined functions.
49 */
50extern hrtime_t __cp_tsc_read(comm_page_t *);
51extern hrtime_t __cp_gethrtime_fasttrap();
52
53/*
54 * These are cloned from TSC and time related code in the kernel.  The should
55 * be kept in sync in the case that the source values are changed.
56 */
57#define	NSEC_SHIFT	5
58#define	ADJ_SHIFT	4
59#define	NANOSEC		1000000000LL
60
61#define	TSC_CONVERT_AND_ADD(tsc, hrt, scale) do {		\
62	uint32_t *_l = (uint32_t *)&(tsc);			\
63	uint64_t sc = (uint32_t)(scale);			\
64	(hrt) += (uint64_t)(_l[1] * sc) << NSEC_SHIFT;		\
65	(hrt) += (uint64_t)(_l[0] * sc) >> (32 - NSEC_SHIFT);	\
66} while (0)
67
68/*
69 * Userspace version of tsc_gethrtime.
70 * See: uts/i86pc/os/timestamp.c
71 */
72hrtime_t
73__cp_gethrtime(comm_page_t *cp)
74{
75	uint32_t old_hres_lock;
76	hrtime_t tsc, hrt, tsc_last;
77
78	/*
79	 * Several precautions must be taken when collecting the data necessary
80	 * to perform an accurate gethrtime calculation.
81	 *
82	 * While much of the TSC state stored in the comm page is unchanging
83	 * after boot, portions of it are periodically updated during OS ticks.
84	 * Changes to hres_lock during the course of the copy indicates a
85	 * potentially inconsistent snapshot, necessitating a loop.
86	 *
87	 * Even more complicated is the handling for TSCs which require sync
88	 * offsets between different CPUs.  Since userspace lacks the luxury of
89	 * disabling interrupts, a validation loop checking for CPU migrations
90	 * is used.  Pathological scheduling could, in theory, "outwit"
91	 * this check.  Such a possibility is considered an acceptable risk.
92	 *
93	 */
94	do {
95		old_hres_lock = cp->cp_hres_lock;
96		tsc_last = cp->cp_tsc_last;
97		hrt = cp->cp_tsc_hrtime_base;
98		tsc = __cp_tsc_read(cp);
99
100		/*
101		 * A TSC reading of 0 indicates the special case of an error
102		 * bail-out.  Rely on the fasttrap to supply an hrtime value.
103		 */
104		if (tsc == 0) {
105			return (__cp_gethrtime_fasttrap());
106		}
107	} while ((old_hres_lock & ~1) != cp->cp_hres_lock);
108
109	if (tsc >= tsc_last) {
110		tsc -= tsc_last;
111	} else if (tsc >= tsc_last - (2 * cp->cp_tsc_max_delta)) {
112		tsc = 0;
113	} else if (tsc > cp->cp_tsc_resume_cap) {
114		tsc = cp->cp_tsc_resume_cap;
115	}
116	TSC_CONVERT_AND_ADD(tsc, hrt, cp->cp_nsec_scale);
117
118	return (hrt);
119}
120
121/*
122 * Userspace version of pc_gethrestime.
123 * See: uts/i86pc/os/machdep.c
124 */
125int
126__cp_clock_gettime_realtime(comm_page_t *cp, timespec_t *tsp)
127{
128	int lock_prev, nslt;
129	timespec_t now;
130	int64_t hres_adj;
131
132loop:
133	lock_prev = cp->cp_hres_lock;
134	now.tv_sec = cp->cp_hrestime[0];
135	now.tv_nsec = cp->cp_hrestime[1];
136	nslt = (int)(__cp_gethrtime(cp) - cp->cp_hres_last_tick);
137	hres_adj = cp->cp_hrestime_adj;
138	if (nslt < 0) {
139		/*
140		 * Tick came between sampling hrtime and hres_last_tick;
141		 */
142		goto loop;
143	}
144	now.tv_nsec += nslt;
145
146	/*
147	 * Apply hres_adj skew, if needed.
148	 */
149	if (hres_adj > 0) {
150		nslt = (nslt >> ADJ_SHIFT);
151		if (nslt > hres_adj)
152			nslt = (int)hres_adj;
153		now.tv_nsec += nslt;
154	} else if (hres_adj < 0) {
155		nslt = -(nslt >> ADJ_SHIFT);
156		if (nslt < hres_adj)
157			nslt = (int)hres_adj;
158		now.tv_nsec += nslt;
159	}
160
161	/*
162	 * Rope in tv_nsec from any excessive adjustments.
163	 */
164	while ((unsigned long)now.tv_nsec >= NANOSEC) {
165		now.tv_nsec -= NANOSEC;
166		now.tv_sec++;
167	}
168
169	if ((cp->cp_hres_lock & ~1) != lock_prev)
170		goto loop;
171
172	*tsp = now;
173	return (0);
174}
175
176/*
177 * The __cp_clock_gettime_monotonic function expects that hrt2ts be present
178 * when the code is finally linked.
179 * (The amd64 version has no such requirement.)
180 */
181extern void hrt2ts(hrtime_t, timespec_t *);
182
183int
184__cp_clock_gettime_monotonic(comm_page_t *cp, timespec_t *tsp)
185{
186	hrtime_t hrt;
187
188	hrt = __cp_gethrtime(cp);
189	hrt2ts(hrt, tsp);
190	return (0);
191}
192
193#endif /* __amd64 */
194