17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ae115bc7Smrj * Common Development and Distribution License (the "License"). 6ae115bc7Smrj * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21843e1988Sjohnlev 227c478bd9Sstevel@tonic-gate /* 237997e108SSurya Prakki * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 2579ec9da8SYuri Pankov * 2679ec9da8SYuri Pankov * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 27*e014e7f8SPaul Dagnelie * Copyright (c) 2014, 2016 by Delphix. All rights reserved. 287c478bd9Sstevel@tonic-gate */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #include <sys/types.h> 317c478bd9Sstevel@tonic-gate #include <sys/param.h> 327c478bd9Sstevel@tonic-gate #include <sys/systm.h> 337c478bd9Sstevel@tonic-gate #include <sys/disp.h> 347c478bd9Sstevel@tonic-gate #include <sys/var.h> 357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 367c478bd9Sstevel@tonic-gate #include <sys/debug.h> 377c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 387c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 397c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 407c478bd9Sstevel@tonic-gate #include <sys/psm_defs.h> 417c478bd9Sstevel@tonic-gate #include <sys/clock.h> 427c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 437c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 447c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 457c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 467c478bd9Sstevel@tonic-gate #include <sys/time.h> 47843e1988Sjohnlev #include <sys/panic.h> 48b3c18020SSudheer A #include <sys/cpu.h> 49*e014e7f8SPaul Dagnelie #include <sys/sdt.h> 507c478bd9Sstevel@tonic-gate 517c478bd9Sstevel@tonic-gate /* 527c478bd9Sstevel@tonic-gate * Using the Pentium's TSC register for gethrtime() 537c478bd9Sstevel@tonic-gate * ------------------------------------------------ 547c478bd9Sstevel@tonic-gate * 557c478bd9Sstevel@tonic-gate * The Pentium family, like many chip architectures, has a high-resolution 567c478bd9Sstevel@tonic-gate * timestamp counter ("TSC") which increments once per CPU cycle. The contents 577c478bd9Sstevel@tonic-gate * of the timestamp counter are read with the RDTSC instruction. 587c478bd9Sstevel@tonic-gate * 597c478bd9Sstevel@tonic-gate * As with its UltraSPARC equivalent (the %tick register), TSC's cycle count 607c478bd9Sstevel@tonic-gate * must be translated into nanoseconds in order to implement gethrtime(). 617c478bd9Sstevel@tonic-gate * We avoid inducing floating point operations in this conversion by 627c478bd9Sstevel@tonic-gate * implementing the same nsec_scale algorithm as that found in the sun4u 637c478bd9Sstevel@tonic-gate * platform code. The sun4u NATIVE_TIME_TO_NSEC_SCALE block comment contains 647c478bd9Sstevel@tonic-gate * a detailed description of the algorithm; the comment is not reproduced 657c478bd9Sstevel@tonic-gate * here. This implementation differs only in its value for NSEC_SHIFT: 667c478bd9Sstevel@tonic-gate * we implement an NSEC_SHIFT of 5 (instead of sun4u's 4) to allow for 677c478bd9Sstevel@tonic-gate * 60 MHz Pentiums. 687c478bd9Sstevel@tonic-gate * 697c478bd9Sstevel@tonic-gate * While TSC and %tick are both cycle counting registers, TSC's functionality 707c478bd9Sstevel@tonic-gate * falls short in several critical ways: 717c478bd9Sstevel@tonic-gate * 727c478bd9Sstevel@tonic-gate * (a) TSCs on different CPUs are not guaranteed to be in sync. While in 737c478bd9Sstevel@tonic-gate * practice they often _are_ in sync, this isn't guaranteed by the 747c478bd9Sstevel@tonic-gate * architecture. 757c478bd9Sstevel@tonic-gate * 767c478bd9Sstevel@tonic-gate * (b) The TSC cannot be reliably set to an arbitrary value. The architecture 777c478bd9Sstevel@tonic-gate * only supports writing the low 32-bits of TSC, making it impractical 787c478bd9Sstevel@tonic-gate * to rewrite. 797c478bd9Sstevel@tonic-gate * 807c478bd9Sstevel@tonic-gate * (c) The architecture doesn't have the capacity to interrupt based on 817c478bd9Sstevel@tonic-gate * arbitrary values of TSC; there is no TICK_CMPR equivalent. 827c478bd9Sstevel@tonic-gate * 837c478bd9Sstevel@tonic-gate * Together, (a) and (b) imply that software must track the skew between 847c478bd9Sstevel@tonic-gate * TSCs and account for it (it is assumed that while there may exist skew, 857c478bd9Sstevel@tonic-gate * there does not exist drift). To determine the skew between CPUs, we 867c478bd9Sstevel@tonic-gate * have newly onlined CPUs call tsc_sync_slave(), while the CPU performing 87b3c18020SSudheer A * the online operation calls tsc_sync_master(). 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * In the absence of time-of-day clock adjustments, gethrtime() must stay in 907c478bd9Sstevel@tonic-gate * sync with gettimeofday(). This is problematic; given (c), the software 917c478bd9Sstevel@tonic-gate * cannot drive its time-of-day source from TSC, and yet they must somehow be 927c478bd9Sstevel@tonic-gate * kept in sync. We implement this by having a routine, tsc_tick(), which 937c478bd9Sstevel@tonic-gate * is called once per second from the interrupt which drives time-of-day. 947c478bd9Sstevel@tonic-gate * 957c478bd9Sstevel@tonic-gate * Note that the hrtime base for gethrtime, tsc_hrtime_base, is modified 967c478bd9Sstevel@tonic-gate * atomically with nsec_scale under CLOCK_LOCK. This assures that time 977c478bd9Sstevel@tonic-gate * monotonically increases. 987c478bd9Sstevel@tonic-gate */ 997c478bd9Sstevel@tonic-gate 1007c478bd9Sstevel@tonic-gate #define NSEC_SHIFT 5 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate static uint_t nsec_scale; 103113b131bSEric Saxe static uint_t nsec_unscale; 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate /* 1067c478bd9Sstevel@tonic-gate * These two variables used to be grouped together inside of a structure that 1077c478bd9Sstevel@tonic-gate * lived on a single cache line. A regression (bug ID 4623398) caused the 1087c478bd9Sstevel@tonic-gate * compiler to emit code that "optimized" away the while-loops below. The 1097c478bd9Sstevel@tonic-gate * result was that no synchronization between the onlining and onlined CPUs 1107c478bd9Sstevel@tonic-gate * took place. 1117c478bd9Sstevel@tonic-gate */ 1127c478bd9Sstevel@tonic-gate static volatile int tsc_ready; 1137c478bd9Sstevel@tonic-gate static volatile int tsc_sync_go; 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate /* 1167c478bd9Sstevel@tonic-gate * Used as indices into the tsc_sync_snaps[] array. 1177c478bd9Sstevel@tonic-gate */ 1187c478bd9Sstevel@tonic-gate #define TSC_MASTER 0 1197c478bd9Sstevel@tonic-gate #define TSC_SLAVE 1 1207c478bd9Sstevel@tonic-gate 1217c478bd9Sstevel@tonic-gate /* 1227c478bd9Sstevel@tonic-gate * Used in the tsc_master_sync()/tsc_slave_sync() rendezvous. 1237c478bd9Sstevel@tonic-gate */ 1247c478bd9Sstevel@tonic-gate #define TSC_SYNC_STOP 1 1257c478bd9Sstevel@tonic-gate #define TSC_SYNC_GO 2 126b3c18020SSudheer A #define TSC_SYNC_DONE 3 127b3c18020SSudheer A #define SYNC_ITERATIONS 10 1287c478bd9Sstevel@tonic-gate 129843e1988Sjohnlev #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) { \ 130ae115bc7Smrj unsigned int *_l = (unsigned int *)&(tsc); \ 131ae115bc7Smrj (hrt) += mul32(_l[1], scale) << NSEC_SHIFT; \ 1327c478bd9Sstevel@tonic-gate (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \ 1337c478bd9Sstevel@tonic-gate } 1347c478bd9Sstevel@tonic-gate 135ae115bc7Smrj #define TSC_CONVERT(tsc, hrt, scale) { \ 136ae115bc7Smrj unsigned int *_l = (unsigned int *)&(tsc); \ 137ae115bc7Smrj (hrt) = mul32(_l[1], scale) << NSEC_SHIFT; \ 1387c478bd9Sstevel@tonic-gate (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \ 1397c478bd9Sstevel@tonic-gate } 1407c478bd9Sstevel@tonic-gate 141ae115bc7Smrj int tsc_master_slave_sync_needed = 1; 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate static int tsc_max_delta; 1447c478bd9Sstevel@tonic-gate static hrtime_t tsc_sync_tick_delta[NCPU]; 145b3c18020SSudheer A typedef struct tsc_sync { 146b3c18020SSudheer A volatile hrtime_t master_tsc, slave_tsc; 147b3c18020SSudheer A } tsc_sync_t; 148b3c18020SSudheer A static tsc_sync_t *tscp; 149b3c18020SSudheer A static hrtime_t largest_tsc_delta = 0; 150b3c18020SSudheer A static ulong_t shortest_write_time = ~0UL; 151b3c18020SSudheer A 1527c478bd9Sstevel@tonic-gate static hrtime_t tsc_last = 0; 1537c478bd9Sstevel@tonic-gate static hrtime_t tsc_last_jumped = 0; 1547c478bd9Sstevel@tonic-gate static hrtime_t tsc_hrtime_base = 0; 1557c478bd9Sstevel@tonic-gate static int tsc_jumped = 0; 156*e014e7f8SPaul Dagnelie static uint32_t tsc_wayback = 0; 157*e014e7f8SPaul Dagnelie /* 158*e014e7f8SPaul Dagnelie * The cap of 1 second was chosen since it is the frequency at which the 159*e014e7f8SPaul Dagnelie * tsc_tick() function runs which means that when gethrtime() is called it 160*e014e7f8SPaul Dagnelie * should never be more than 1 second since tsc_last was updated. 161*e014e7f8SPaul Dagnelie */ 162*e014e7f8SPaul Dagnelie static hrtime_t tsc_resume_cap; 163*e014e7f8SPaul Dagnelie static hrtime_t tsc_resume_cap_ns = NANOSEC; /* 1s */ 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate static hrtime_t shadow_tsc_hrtime_base; 1667c478bd9Sstevel@tonic-gate static hrtime_t shadow_tsc_last; 1677c478bd9Sstevel@tonic-gate static uint_t shadow_nsec_scale; 1687c478bd9Sstevel@tonic-gate static uint32_t shadow_hres_lock; 1692df1fe9cSrandyf int get_tsc_ready(); 1707c478bd9Sstevel@tonic-gate 171*e014e7f8SPaul Dagnelie static inline 172*e014e7f8SPaul Dagnelie hrtime_t tsc_protect(hrtime_t a) { 173*e014e7f8SPaul Dagnelie if (a > tsc_resume_cap) { 174*e014e7f8SPaul Dagnelie atomic_inc_32(&tsc_wayback); 175*e014e7f8SPaul Dagnelie DTRACE_PROBE3(tsc__wayback, htrime_t, a, hrtime_t, tsc_last, 176*e014e7f8SPaul Dagnelie uint32_t, tsc_wayback); 177*e014e7f8SPaul Dagnelie return (tsc_resume_cap); 178*e014e7f8SPaul Dagnelie } 179*e014e7f8SPaul Dagnelie return (a); 180*e014e7f8SPaul Dagnelie } 181*e014e7f8SPaul Dagnelie 182843e1988Sjohnlev hrtime_t 183843e1988Sjohnlev tsc_gethrtime(void) 184843e1988Sjohnlev { 185843e1988Sjohnlev uint32_t old_hres_lock; 186843e1988Sjohnlev hrtime_t tsc, hrt; 187843e1988Sjohnlev 188843e1988Sjohnlev do { 189843e1988Sjohnlev old_hres_lock = hres_lock; 190843e1988Sjohnlev 191843e1988Sjohnlev if ((tsc = tsc_read()) >= tsc_last) { 192843e1988Sjohnlev /* 193843e1988Sjohnlev * It would seem to be obvious that this is true 194843e1988Sjohnlev * (that is, the past is less than the present), 195843e1988Sjohnlev * but it isn't true in the presence of suspend/resume 196843e1988Sjohnlev * cycles. If we manage to call gethrtime() 197843e1988Sjohnlev * after a resume, but before the first call to 198843e1988Sjohnlev * tsc_tick(), we will see the jump. In this case, 199843e1988Sjohnlev * we will simply use the value in TSC as the delta. 200843e1988Sjohnlev */ 201843e1988Sjohnlev tsc -= tsc_last; 202843e1988Sjohnlev } else if (tsc >= tsc_last - 2*tsc_max_delta) { 203843e1988Sjohnlev /* 204843e1988Sjohnlev * There is a chance that tsc_tick() has just run on 205843e1988Sjohnlev * another CPU, and we have drifted just enough so that 206843e1988Sjohnlev * we appear behind tsc_last. In this case, force the 207843e1988Sjohnlev * delta to be zero. 208843e1988Sjohnlev */ 209843e1988Sjohnlev tsc = 0; 210*e014e7f8SPaul Dagnelie } else { 211*e014e7f8SPaul Dagnelie /* 212*e014e7f8SPaul Dagnelie * If we reach this else clause we assume that we have 213*e014e7f8SPaul Dagnelie * gone through a suspend/resume cycle and use the 214*e014e7f8SPaul Dagnelie * current tsc value as the delta. 215*e014e7f8SPaul Dagnelie * 216*e014e7f8SPaul Dagnelie * In rare cases we can reach this else clause due to 217*e014e7f8SPaul Dagnelie * a lack of monotonicity in the TSC value. In such 218*e014e7f8SPaul Dagnelie * cases using the current TSC value as the delta would 219*e014e7f8SPaul Dagnelie * cause us to return a value ~2x of what it should 220*e014e7f8SPaul Dagnelie * be. To protect against these cases we cap the 221*e014e7f8SPaul Dagnelie * suspend/resume delta at tsc_resume_cap. 222*e014e7f8SPaul Dagnelie */ 223*e014e7f8SPaul Dagnelie tsc = tsc_protect(tsc); 224843e1988Sjohnlev } 225843e1988Sjohnlev 226843e1988Sjohnlev hrt = tsc_hrtime_base; 227843e1988Sjohnlev 228843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 229843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 230843e1988Sjohnlev 231843e1988Sjohnlev return (hrt); 232843e1988Sjohnlev } 233843e1988Sjohnlev 234843e1988Sjohnlev hrtime_t 235843e1988Sjohnlev tsc_gethrtime_delta(void) 236843e1988Sjohnlev { 237843e1988Sjohnlev uint32_t old_hres_lock; 238843e1988Sjohnlev hrtime_t tsc, hrt; 239a563a037Sbholler ulong_t flags; 240843e1988Sjohnlev 241843e1988Sjohnlev do { 242843e1988Sjohnlev old_hres_lock = hres_lock; 243843e1988Sjohnlev 244843e1988Sjohnlev /* 245843e1988Sjohnlev * We need to disable interrupts here to assure that we 246843e1988Sjohnlev * don't migrate between the call to tsc_read() and 247843e1988Sjohnlev * adding the CPU's TSC tick delta. Note that disabling 248843e1988Sjohnlev * and reenabling preemption is forbidden here because 249843e1988Sjohnlev * we may be in the middle of a fast trap. In the amd64 250843e1988Sjohnlev * kernel we cannot tolerate preemption during a fast 251843e1988Sjohnlev * trap. See _update_sregs(). 252843e1988Sjohnlev */ 253843e1988Sjohnlev 254843e1988Sjohnlev flags = clear_int_flag(); 255843e1988Sjohnlev tsc = tsc_read() + tsc_sync_tick_delta[CPU->cpu_id]; 256843e1988Sjohnlev restore_int_flag(flags); 257843e1988Sjohnlev 258843e1988Sjohnlev /* See comments in tsc_gethrtime() above */ 259843e1988Sjohnlev 260843e1988Sjohnlev if (tsc >= tsc_last) { 261843e1988Sjohnlev tsc -= tsc_last; 262843e1988Sjohnlev } else if (tsc >= tsc_last - 2 * tsc_max_delta) { 263843e1988Sjohnlev tsc = 0; 264*e014e7f8SPaul Dagnelie } else { 265*e014e7f8SPaul Dagnelie tsc = tsc_protect(tsc); 266843e1988Sjohnlev } 267843e1988Sjohnlev 268843e1988Sjohnlev hrt = tsc_hrtime_base; 269843e1988Sjohnlev 270843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 271843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 272843e1988Sjohnlev 273843e1988Sjohnlev return (hrt); 274843e1988Sjohnlev } 275843e1988Sjohnlev 2769278ddffSRobert Mustacchi hrtime_t 2779278ddffSRobert Mustacchi tsc_gethrtime_tick_delta(void) 2789278ddffSRobert Mustacchi { 2799278ddffSRobert Mustacchi hrtime_t hrt; 2809278ddffSRobert Mustacchi ulong_t flags; 2819278ddffSRobert Mustacchi 2829278ddffSRobert Mustacchi flags = clear_int_flag(); 2839278ddffSRobert Mustacchi hrt = tsc_sync_tick_delta[CPU->cpu_id]; 2849278ddffSRobert Mustacchi restore_int_flag(flags); 2859278ddffSRobert Mustacchi 2869278ddffSRobert Mustacchi return (hrt); 2879278ddffSRobert Mustacchi } 2889278ddffSRobert Mustacchi 289843e1988Sjohnlev /* 290843e1988Sjohnlev * This is similar to the above, but it cannot actually spin on hres_lock. 291843e1988Sjohnlev * As a result, it caches all of the variables it needs; if the variables 292843e1988Sjohnlev * don't change, it's done. 293843e1988Sjohnlev */ 294843e1988Sjohnlev hrtime_t 295843e1988Sjohnlev dtrace_gethrtime(void) 296843e1988Sjohnlev { 297843e1988Sjohnlev uint32_t old_hres_lock; 298843e1988Sjohnlev hrtime_t tsc, hrt; 299a563a037Sbholler ulong_t flags; 300843e1988Sjohnlev 301843e1988Sjohnlev do { 302843e1988Sjohnlev old_hres_lock = hres_lock; 303843e1988Sjohnlev 304843e1988Sjohnlev /* 305843e1988Sjohnlev * Interrupts are disabled to ensure that the thread isn't 306843e1988Sjohnlev * migrated between the tsc_read() and adding the CPU's 307843e1988Sjohnlev * TSC tick delta. 308843e1988Sjohnlev */ 309843e1988Sjohnlev flags = clear_int_flag(); 310843e1988Sjohnlev 311843e1988Sjohnlev tsc = tsc_read(); 312843e1988Sjohnlev 313843e1988Sjohnlev if (gethrtimef == tsc_gethrtime_delta) 314843e1988Sjohnlev tsc += tsc_sync_tick_delta[CPU->cpu_id]; 315843e1988Sjohnlev 316843e1988Sjohnlev restore_int_flag(flags); 317843e1988Sjohnlev 318843e1988Sjohnlev /* 319843e1988Sjohnlev * See the comments in tsc_gethrtime(), above. 320843e1988Sjohnlev */ 321843e1988Sjohnlev if (tsc >= tsc_last) 322843e1988Sjohnlev tsc -= tsc_last; 323843e1988Sjohnlev else if (tsc >= tsc_last - 2*tsc_max_delta) 324843e1988Sjohnlev tsc = 0; 325*e014e7f8SPaul Dagnelie else 326*e014e7f8SPaul Dagnelie tsc = tsc_protect(tsc); 327843e1988Sjohnlev 328843e1988Sjohnlev hrt = tsc_hrtime_base; 329843e1988Sjohnlev 330843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 331843e1988Sjohnlev 332843e1988Sjohnlev if ((old_hres_lock & ~1) == hres_lock) 333843e1988Sjohnlev break; 334843e1988Sjohnlev 335843e1988Sjohnlev /* 336843e1988Sjohnlev * If we're here, the clock lock is locked -- or it has been 337843e1988Sjohnlev * unlocked and locked since we looked. This may be due to 338843e1988Sjohnlev * tsc_tick() running on another CPU -- or it may be because 339843e1988Sjohnlev * some code path has ended up in dtrace_probe() with 340843e1988Sjohnlev * CLOCK_LOCK held. We'll try to determine that we're in 341843e1988Sjohnlev * the former case by taking another lap if the lock has 342843e1988Sjohnlev * changed since when we first looked at it. 343843e1988Sjohnlev */ 344843e1988Sjohnlev if (old_hres_lock != hres_lock) 345843e1988Sjohnlev continue; 346843e1988Sjohnlev 347843e1988Sjohnlev /* 348843e1988Sjohnlev * So the lock was and is locked. We'll use the old data 349843e1988Sjohnlev * instead. 350843e1988Sjohnlev */ 351843e1988Sjohnlev old_hres_lock = shadow_hres_lock; 352843e1988Sjohnlev 353843e1988Sjohnlev /* 354843e1988Sjohnlev * Again, disable interrupts to ensure that the thread 355843e1988Sjohnlev * isn't migrated between the tsc_read() and adding 356843e1988Sjohnlev * the CPU's TSC tick delta. 357843e1988Sjohnlev */ 358843e1988Sjohnlev flags = clear_int_flag(); 359843e1988Sjohnlev 360843e1988Sjohnlev tsc = tsc_read(); 361843e1988Sjohnlev 362843e1988Sjohnlev if (gethrtimef == tsc_gethrtime_delta) 363843e1988Sjohnlev tsc += tsc_sync_tick_delta[CPU->cpu_id]; 364843e1988Sjohnlev 365843e1988Sjohnlev restore_int_flag(flags); 366843e1988Sjohnlev 367843e1988Sjohnlev /* 368843e1988Sjohnlev * See the comments in tsc_gethrtime(), above. 369843e1988Sjohnlev */ 370843e1988Sjohnlev if (tsc >= shadow_tsc_last) 371843e1988Sjohnlev tsc -= shadow_tsc_last; 372843e1988Sjohnlev else if (tsc >= shadow_tsc_last - 2 * tsc_max_delta) 373843e1988Sjohnlev tsc = 0; 374*e014e7f8SPaul Dagnelie else 375*e014e7f8SPaul Dagnelie tsc = tsc_protect(tsc); 376843e1988Sjohnlev 377843e1988Sjohnlev hrt = shadow_tsc_hrtime_base; 378843e1988Sjohnlev 379843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, shadow_nsec_scale); 380843e1988Sjohnlev } while ((old_hres_lock & ~1) != shadow_hres_lock); 381843e1988Sjohnlev 382843e1988Sjohnlev return (hrt); 383843e1988Sjohnlev } 384843e1988Sjohnlev 385843e1988Sjohnlev hrtime_t 386843e1988Sjohnlev tsc_gethrtimeunscaled(void) 387843e1988Sjohnlev { 388843e1988Sjohnlev uint32_t old_hres_lock; 389843e1988Sjohnlev hrtime_t tsc; 390843e1988Sjohnlev 391843e1988Sjohnlev do { 392843e1988Sjohnlev old_hres_lock = hres_lock; 393843e1988Sjohnlev 394843e1988Sjohnlev /* See tsc_tick(). */ 395843e1988Sjohnlev tsc = tsc_read() + tsc_last_jumped; 396843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 397843e1988Sjohnlev 398843e1988Sjohnlev return (tsc); 399843e1988Sjohnlev } 400843e1988Sjohnlev 401113b131bSEric Saxe /* 402113b131bSEric Saxe * Convert a nanosecond based timestamp to tsc 403113b131bSEric Saxe */ 404113b131bSEric Saxe uint64_t 405113b131bSEric Saxe tsc_unscalehrtime(hrtime_t nsec) 406113b131bSEric Saxe { 407113b131bSEric Saxe hrtime_t tsc; 408113b131bSEric Saxe 409113b131bSEric Saxe if (tsc_gethrtime_enable) { 410113b131bSEric Saxe TSC_CONVERT(nsec, tsc, nsec_unscale); 411113b131bSEric Saxe return (tsc); 412113b131bSEric Saxe } 413113b131bSEric Saxe return ((uint64_t)nsec); 414113b131bSEric Saxe } 415843e1988Sjohnlev 416843e1988Sjohnlev /* Convert a tsc timestamp to nanoseconds */ 417843e1988Sjohnlev void 418843e1988Sjohnlev tsc_scalehrtime(hrtime_t *tsc) 419843e1988Sjohnlev { 420843e1988Sjohnlev hrtime_t hrt; 421843e1988Sjohnlev hrtime_t mytsc; 422843e1988Sjohnlev 423843e1988Sjohnlev if (tsc == NULL) 424843e1988Sjohnlev return; 425843e1988Sjohnlev mytsc = *tsc; 426843e1988Sjohnlev 427843e1988Sjohnlev TSC_CONVERT(mytsc, hrt, nsec_scale); 428843e1988Sjohnlev *tsc = hrt; 429843e1988Sjohnlev } 430843e1988Sjohnlev 431843e1988Sjohnlev hrtime_t 432843e1988Sjohnlev tsc_gethrtimeunscaled_delta(void) 433843e1988Sjohnlev { 434843e1988Sjohnlev hrtime_t hrt; 435a563a037Sbholler ulong_t flags; 436843e1988Sjohnlev 437843e1988Sjohnlev /* 438843e1988Sjohnlev * Similarly to tsc_gethrtime_delta, we need to disable preemption 439843e1988Sjohnlev * to prevent migration between the call to tsc_gethrtimeunscaled 440843e1988Sjohnlev * and adding the CPU's hrtime delta. Note that disabling and 441843e1988Sjohnlev * reenabling preemption is forbidden here because we may be in the 442843e1988Sjohnlev * middle of a fast trap. In the amd64 kernel we cannot tolerate 443843e1988Sjohnlev * preemption during a fast trap. See _update_sregs(). 444843e1988Sjohnlev */ 445843e1988Sjohnlev 446843e1988Sjohnlev flags = clear_int_flag(); 447843e1988Sjohnlev hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id]; 448843e1988Sjohnlev restore_int_flag(flags); 449843e1988Sjohnlev 450843e1988Sjohnlev return (hrt); 451843e1988Sjohnlev } 452843e1988Sjohnlev 4537c478bd9Sstevel@tonic-gate /* 454b3c18020SSudheer A * Called by the master in the TSC sync operation (usually the boot CPU). 455b3c18020SSudheer A * If the slave is discovered to have a skew, gethrtimef will be changed to 456b3c18020SSudheer A * point to tsc_gethrtime_delta(). Calculating skews is precise only when 457b3c18020SSudheer A * the master and slave TSCs are read simultaneously; however, there is no 458b3c18020SSudheer A * algorithm that can read both CPUs in perfect simultaneity. The proposed 459b3c18020SSudheer A * algorithm is an approximate method based on the behaviour of cache 460b3c18020SSudheer A * management. The slave CPU continuously reads TSC and then reads a global 461b3c18020SSudheer A * variable which the master CPU updates. The moment the master's update reaches 462b3c18020SSudheer A * the slave's visibility (being forced by an mfence operation) we use the TSC 463b3c18020SSudheer A * reading taken on the slave. A corresponding TSC read will be taken on the 464b3c18020SSudheer A * master as soon as possible after finishing the mfence operation. But the 465b3c18020SSudheer A * delay between causing the slave to notice the invalid cache line and the 466b3c18020SSudheer A * competion of mfence is not repeatable. This error is heuristically assumed 467b3c18020SSudheer A * to be 1/4th of the total write time as being measured by the two TSC reads 468b3c18020SSudheer A * on the master sandwiching the mfence. Furthermore, due to the nature of 469b3c18020SSudheer A * bus arbitration, contention on memory bus, etc., the time taken for the write 470b3c18020SSudheer A * to reflect globally can vary a lot. So instead of taking a single reading, 471b3c18020SSudheer A * a set of readings are taken and the one with least write time is chosen 472b3c18020SSudheer A * to calculate the final skew. 4734af20bbdSSudheer A * 4744af20bbdSSudheer A * TSC sync is disabled in the context of virtualization because the CPUs 4754af20bbdSSudheer A * assigned to the guest are virtual CPUs which means the real CPUs on which 4764af20bbdSSudheer A * guest runs keep changing during life time of guest OS. So we would end up 4774af20bbdSSudheer A * calculating TSC skews for a set of CPUs during boot whereas the guest 4784af20bbdSSudheer A * might migrate to a different set of physical CPUs at a later point of 4794af20bbdSSudheer A * time. 4807c478bd9Sstevel@tonic-gate */ 4817c478bd9Sstevel@tonic-gate void 4827c478bd9Sstevel@tonic-gate tsc_sync_master(processorid_t slave) 4837c478bd9Sstevel@tonic-gate { 484b3c18020SSudheer A ulong_t flags, source, min_write_time = ~0UL; 485b3c18020SSudheer A hrtime_t write_time, x, mtsc_after, tdelta; 486b3c18020SSudheer A tsc_sync_t *tsc = tscp; 487b3c18020SSudheer A int cnt; 488b9bfdccdSStuart Maybee int hwtype; 4897c478bd9Sstevel@tonic-gate 490b9bfdccdSStuart Maybee hwtype = get_hwenv(); 49179ec9da8SYuri Pankov if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0) 492ae115bc7Smrj return; 493ae115bc7Smrj 4947c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 495b3c18020SSudheer A source = CPU->cpu_id; 496b3c18020SSudheer A 497b3c18020SSudheer A for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) { 498b3c18020SSudheer A while (tsc_sync_go != TSC_SYNC_GO) 499b3c18020SSudheer A SMT_PAUSE(); 500b3c18020SSudheer A 501b3c18020SSudheer A tsc->master_tsc = tsc_read(); 502b3c18020SSudheer A membar_enter(); 503b3c18020SSudheer A mtsc_after = tsc_read(); 504b3c18020SSudheer A while (tsc_sync_go != TSC_SYNC_DONE) 505b3c18020SSudheer A SMT_PAUSE(); 506b3c18020SSudheer A write_time = mtsc_after - tsc->master_tsc; 507b3c18020SSudheer A if (write_time <= min_write_time) { 508b3c18020SSudheer A min_write_time = write_time; 509b3c18020SSudheer A /* 510b3c18020SSudheer A * Apply heuristic adjustment only if the calculated 511b3c18020SSudheer A * delta is > 1/4th of the write time. 512b3c18020SSudheer A */ 513b3c18020SSudheer A x = tsc->slave_tsc - mtsc_after; 514b3c18020SSudheer A if (x < 0) 515b3c18020SSudheer A x = -x; 516b3c18020SSudheer A if (x > (min_write_time/4)) 517b3c18020SSudheer A /* 518b3c18020SSudheer A * Subtract 1/4th of the measured write time 519b3c18020SSudheer A * from the master's TSC value, as an estimate 520b3c18020SSudheer A * of how late the mfence completion came 521b3c18020SSudheer A * after the slave noticed the cache line 522b3c18020SSudheer A * change. 523b3c18020SSudheer A */ 524b3c18020SSudheer A tdelta = tsc->slave_tsc - 525b3c18020SSudheer A (mtsc_after - (min_write_time/4)); 526b3c18020SSudheer A else 527b3c18020SSudheer A tdelta = tsc->slave_tsc - mtsc_after; 528b3c18020SSudheer A tsc_sync_tick_delta[slave] = 529b3c18020SSudheer A tsc_sync_tick_delta[source] - tdelta; 530b3c18020SSudheer A } 5317c478bd9Sstevel@tonic-gate 532b3c18020SSudheer A tsc->master_tsc = tsc->slave_tsc = write_time = 0; 533b3c18020SSudheer A membar_enter(); 534b3c18020SSudheer A tsc_sync_go = TSC_SYNC_STOP; 535b3c18020SSudheer A } 536b3c18020SSudheer A if (tdelta < 0) 537b3c18020SSudheer A tdelta = -tdelta; 538b3c18020SSudheer A if (tdelta > largest_tsc_delta) 539b3c18020SSudheer A largest_tsc_delta = tdelta; 540b3c18020SSudheer A if (min_write_time < shortest_write_time) 541b3c18020SSudheer A shortest_write_time = min_write_time; 5427c478bd9Sstevel@tonic-gate /* 543b3c18020SSudheer A * Enable delta variants of tsc functions if the largest of all chosen 544b3c18020SSudheer A * deltas is > smallest of the write time. 5457c478bd9Sstevel@tonic-gate */ 546b3c18020SSudheer A if (largest_tsc_delta > shortest_write_time) { 547b3c18020SSudheer A gethrtimef = tsc_gethrtime_delta; 548b3c18020SSudheer A gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; 549b3c18020SSudheer A } 5507c478bd9Sstevel@tonic-gate restore_int_flag(flags); 5517c478bd9Sstevel@tonic-gate } 5527c478bd9Sstevel@tonic-gate 5534af20bbdSSudheer A /* 5544af20bbdSSudheer A * Called by a CPU which has just been onlined. It is expected that the CPU 5554af20bbdSSudheer A * performing the online operation will call tsc_sync_master(). 5564af20bbdSSudheer A * 5574af20bbdSSudheer A * TSC sync is disabled in the context of virtualization. See comments 5584af20bbdSSudheer A * above tsc_sync_master. 5594af20bbdSSudheer A */ 5607c478bd9Sstevel@tonic-gate void 5617c478bd9Sstevel@tonic-gate tsc_sync_slave(void) 5627c478bd9Sstevel@tonic-gate { 563ae115bc7Smrj ulong_t flags; 564b3c18020SSudheer A hrtime_t s1; 565b3c18020SSudheer A tsc_sync_t *tsc = tscp; 566b3c18020SSudheer A int cnt; 567b9bfdccdSStuart Maybee int hwtype; 5687c478bd9Sstevel@tonic-gate 569b9bfdccdSStuart Maybee hwtype = get_hwenv(); 57079ec9da8SYuri Pankov if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0) 571ae115bc7Smrj return; 572ae115bc7Smrj 5737c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 5747c478bd9Sstevel@tonic-gate 575b3c18020SSudheer A for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) { 576b3c18020SSudheer A /* Re-fill the cache line */ 577b3c18020SSudheer A s1 = tsc->master_tsc; 578b3c18020SSudheer A membar_enter(); 579b3c18020SSudheer A tsc_sync_go = TSC_SYNC_GO; 580b3c18020SSudheer A do { 581b3c18020SSudheer A /* 582b3c18020SSudheer A * Do not put an SMT_PAUSE here. For instance, 583b3c18020SSudheer A * if the master and slave are really the same 584b3c18020SSudheer A * hyper-threaded CPU, then you want the master 585b3c18020SSudheer A * to yield to the slave as quickly as possible here, 586b3c18020SSudheer A * but not the other way. 587b3c18020SSudheer A */ 588b3c18020SSudheer A s1 = tsc_read(); 589b3c18020SSudheer A } while (tsc->master_tsc == 0); 590b3c18020SSudheer A tsc->slave_tsc = s1; 591b3c18020SSudheer A membar_enter(); 592b3c18020SSudheer A tsc_sync_go = TSC_SYNC_DONE; 593b3c18020SSudheer A 594b3c18020SSudheer A while (tsc_sync_go != TSC_SYNC_STOP) 595b3c18020SSudheer A SMT_PAUSE(); 596b3c18020SSudheer A } 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate restore_int_flag(flags); 5997c478bd9Sstevel@tonic-gate } 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate /* 602ae115bc7Smrj * Called once per second on a CPU from the cyclic subsystem's 603ae115bc7Smrj * CY_HIGH_LEVEL interrupt. (No longer just cpu0-only) 6047c478bd9Sstevel@tonic-gate */ 6057c478bd9Sstevel@tonic-gate void 6067c478bd9Sstevel@tonic-gate tsc_tick(void) 6077c478bd9Sstevel@tonic-gate { 6087c478bd9Sstevel@tonic-gate hrtime_t now, delta; 6097c478bd9Sstevel@tonic-gate ushort_t spl; 6107c478bd9Sstevel@tonic-gate 6117c478bd9Sstevel@tonic-gate /* 6127c478bd9Sstevel@tonic-gate * Before we set the new variables, we set the shadow values. This 6137c478bd9Sstevel@tonic-gate * allows for lock free operation in dtrace_gethrtime(). 6147c478bd9Sstevel@tonic-gate */ 6157c478bd9Sstevel@tonic-gate lock_set_spl((lock_t *)&shadow_hres_lock + HRES_LOCK_OFFSET, 6167c478bd9Sstevel@tonic-gate ipltospl(CBE_HIGH_PIL), &spl); 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate shadow_tsc_hrtime_base = tsc_hrtime_base; 6197c478bd9Sstevel@tonic-gate shadow_tsc_last = tsc_last; 6207c478bd9Sstevel@tonic-gate shadow_nsec_scale = nsec_scale; 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate shadow_hres_lock++; 6237c478bd9Sstevel@tonic-gate splx(spl); 6247c478bd9Sstevel@tonic-gate 6257c478bd9Sstevel@tonic-gate CLOCK_LOCK(&spl); 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate now = tsc_read(); 6287c478bd9Sstevel@tonic-gate 629d90554ebSdmick if (gethrtimef == tsc_gethrtime_delta) 630d90554ebSdmick now += tsc_sync_tick_delta[CPU->cpu_id]; 631d90554ebSdmick 6327c478bd9Sstevel@tonic-gate if (now < tsc_last) { 6337c478bd9Sstevel@tonic-gate /* 6347c478bd9Sstevel@tonic-gate * The TSC has just jumped into the past. We assume that 6357c478bd9Sstevel@tonic-gate * this is due to a suspend/resume cycle, and we're going 6367c478bd9Sstevel@tonic-gate * to use the _current_ value of TSC as the delta. This 6377c478bd9Sstevel@tonic-gate * will keep tsc_hrtime_base correct. We're also going to 6387c478bd9Sstevel@tonic-gate * assume that rate of tsc does not change after a suspend 6397c478bd9Sstevel@tonic-gate * resume (i.e nsec_scale remains the same). 6407c478bd9Sstevel@tonic-gate */ 6417c478bd9Sstevel@tonic-gate delta = now; 642*e014e7f8SPaul Dagnelie delta = tsc_protect(delta); 6437c478bd9Sstevel@tonic-gate tsc_last_jumped += tsc_last; 6447c478bd9Sstevel@tonic-gate tsc_jumped = 1; 6457c478bd9Sstevel@tonic-gate } else { 6467c478bd9Sstevel@tonic-gate /* 6477c478bd9Sstevel@tonic-gate * Determine the number of TSC ticks since the last clock 6487c478bd9Sstevel@tonic-gate * tick, and add that to the hrtime base. 6497c478bd9Sstevel@tonic-gate */ 6507c478bd9Sstevel@tonic-gate delta = now - tsc_last; 6517c478bd9Sstevel@tonic-gate } 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate TSC_CONVERT_AND_ADD(delta, tsc_hrtime_base, nsec_scale); 6547c478bd9Sstevel@tonic-gate tsc_last = now; 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate CLOCK_UNLOCK(spl); 6577c478bd9Sstevel@tonic-gate } 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate void 660843e1988Sjohnlev tsc_hrtimeinit(uint64_t cpu_freq_hz) 6617c478bd9Sstevel@tonic-gate { 662843e1988Sjohnlev extern int gethrtime_hires; 663843e1988Sjohnlev longlong_t tsc; 664843e1988Sjohnlev ulong_t flags; 6657c478bd9Sstevel@tonic-gate 666843e1988Sjohnlev /* 667843e1988Sjohnlev * cpu_freq_hz is the measured cpu frequency in hertz 668843e1988Sjohnlev */ 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate /* 671843e1988Sjohnlev * We can't accommodate CPUs slower than 31.25 MHz. 6727c478bd9Sstevel@tonic-gate */ 673843e1988Sjohnlev ASSERT(cpu_freq_hz > NANOSEC / (1 << NSEC_SHIFT)); 674843e1988Sjohnlev nsec_scale = 675843e1988Sjohnlev (uint_t)(((uint64_t)NANOSEC << (32 - NSEC_SHIFT)) / cpu_freq_hz); 676113b131bSEric Saxe nsec_unscale = 677113b131bSEric Saxe (uint_t)(((uint64_t)cpu_freq_hz << (32 - NSEC_SHIFT)) / NANOSEC); 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 680843e1988Sjohnlev tsc = tsc_read(); 681843e1988Sjohnlev (void) tsc_gethrtime(); 682843e1988Sjohnlev tsc_max_delta = tsc_read() - tsc; 6837c478bd9Sstevel@tonic-gate restore_int_flag(flags); 684843e1988Sjohnlev gethrtimef = tsc_gethrtime; 685843e1988Sjohnlev gethrtimeunscaledf = tsc_gethrtimeunscaled; 686843e1988Sjohnlev scalehrtimef = tsc_scalehrtime; 687113b131bSEric Saxe unscalehrtimef = tsc_unscalehrtime; 688843e1988Sjohnlev hrtime_tick = tsc_tick; 689843e1988Sjohnlev gethrtime_hires = 1; 690b3c18020SSudheer A /* 691b3c18020SSudheer A * Allocate memory for the structure used in the tsc sync logic. 692b3c18020SSudheer A * This structure should be aligned on a multiple of cache line size. 693b3c18020SSudheer A */ 694b3c18020SSudheer A tscp = kmem_zalloc(PAGESIZE, KM_SLEEP); 695*e014e7f8SPaul Dagnelie 696*e014e7f8SPaul Dagnelie /* 697*e014e7f8SPaul Dagnelie * Convert the TSC resume cap ns value into its unscaled TSC value. 698*e014e7f8SPaul Dagnelie * See tsc_gethrtime(). 699*e014e7f8SPaul Dagnelie */ 700*e014e7f8SPaul Dagnelie if (tsc_resume_cap == 0) 701*e014e7f8SPaul Dagnelie TSC_CONVERT(tsc_resume_cap_ns, tsc_resume_cap, nsec_unscale); 7027c478bd9Sstevel@tonic-gate } 7032df1fe9cSrandyf 7042df1fe9cSrandyf int 7052df1fe9cSrandyf get_tsc_ready() 7062df1fe9cSrandyf { 7072df1fe9cSrandyf return (tsc_ready); 7082df1fe9cSrandyf } 7092df1fe9cSrandyf 7102df1fe9cSrandyf /* 7112df1fe9cSrandyf * Adjust all the deltas by adding the passed value to the array. 7122df1fe9cSrandyf * Then use the "delt" versions of the the gethrtime functions. 7132df1fe9cSrandyf * Note that 'tdelta' _could_ be a negative number, which should 7142df1fe9cSrandyf * reduce the values in the array (used, for example, if the Solaris 7152df1fe9cSrandyf * instance was moved by a virtual manager to a machine with a higher 7162df1fe9cSrandyf * value of tsc). 7172df1fe9cSrandyf */ 7182df1fe9cSrandyf void 7192df1fe9cSrandyf tsc_adjust_delta(hrtime_t tdelta) 7202df1fe9cSrandyf { 7212df1fe9cSrandyf int i; 7222df1fe9cSrandyf 7232df1fe9cSrandyf for (i = 0; i < NCPU; i++) { 7242df1fe9cSrandyf tsc_sync_tick_delta[i] += tdelta; 7252df1fe9cSrandyf } 7262df1fe9cSrandyf 7272df1fe9cSrandyf gethrtimef = tsc_gethrtime_delta; 7282df1fe9cSrandyf gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; 7292df1fe9cSrandyf } 7302df1fe9cSrandyf 7312df1fe9cSrandyf /* 7322df1fe9cSrandyf * Functions to manage TSC and high-res time on suspend and resume. 7332df1fe9cSrandyf */ 7342df1fe9cSrandyf 7352df1fe9cSrandyf /* 7362df1fe9cSrandyf * declarations needed for time adjustment 7372df1fe9cSrandyf */ 7382df1fe9cSrandyf extern void rtcsync(void); 7392df1fe9cSrandyf extern tod_ops_t *tod_ops; 7402df1fe9cSrandyf /* There must be a better way than exposing nsec_scale! */ 7412df1fe9cSrandyf extern uint_t nsec_scale; 7422df1fe9cSrandyf static uint64_t tsc_saved_tsc = 0; /* 1 in 2^64 chance this'll screw up! */ 7432df1fe9cSrandyf static timestruc_t tsc_saved_ts; 7442df1fe9cSrandyf static int tsc_needs_resume = 0; /* We only want to do this once. */ 7452df1fe9cSrandyf int tsc_delta_onsuspend = 0; 7462df1fe9cSrandyf int tsc_adjust_seconds = 1; 7472df1fe9cSrandyf int tsc_suspend_count = 0; 7482df1fe9cSrandyf int tsc_resume_in_cyclic = 0; 7492df1fe9cSrandyf 7502df1fe9cSrandyf /* 7512df1fe9cSrandyf * Let timestamp.c know that we are suspending. It needs to take 7522df1fe9cSrandyf * snapshots of the current time, and do any pre-suspend work. 7532df1fe9cSrandyf */ 7542df1fe9cSrandyf void 7552df1fe9cSrandyf tsc_suspend(void) 7562df1fe9cSrandyf { 7572df1fe9cSrandyf /* 7582df1fe9cSrandyf * What we need to do here, is to get the time we suspended, so that we 7592df1fe9cSrandyf * know how much we should add to the resume. 7602df1fe9cSrandyf * This routine is called by each CPU, so we need to handle reentry. 7612df1fe9cSrandyf */ 7622df1fe9cSrandyf if (tsc_gethrtime_enable) { 7632df1fe9cSrandyf /* 7642df1fe9cSrandyf * We put the tsc_read() inside the lock as it 7652df1fe9cSrandyf * as no locking constraints, and it puts the 7662df1fe9cSrandyf * aquired value closer to the time stamp (in 7672df1fe9cSrandyf * case we delay getting the lock). 7682df1fe9cSrandyf */ 7692df1fe9cSrandyf mutex_enter(&tod_lock); 7702df1fe9cSrandyf tsc_saved_tsc = tsc_read(); 7712df1fe9cSrandyf tsc_saved_ts = TODOP_GET(tod_ops); 7722df1fe9cSrandyf mutex_exit(&tod_lock); 7732df1fe9cSrandyf /* We only want to do this once. */ 7742df1fe9cSrandyf if (tsc_needs_resume == 0) { 7752df1fe9cSrandyf if (tsc_delta_onsuspend) { 7762df1fe9cSrandyf tsc_adjust_delta(tsc_saved_tsc); 7772df1fe9cSrandyf } else { 7782df1fe9cSrandyf tsc_adjust_delta(nsec_scale); 7792df1fe9cSrandyf } 7802df1fe9cSrandyf tsc_suspend_count++; 7812df1fe9cSrandyf } 7822df1fe9cSrandyf } 7832df1fe9cSrandyf 7842df1fe9cSrandyf invalidate_cache(); 7852df1fe9cSrandyf tsc_needs_resume = 1; 7862df1fe9cSrandyf } 7872df1fe9cSrandyf 7882df1fe9cSrandyf /* 7892df1fe9cSrandyf * Restore all timestamp state based on the snapshots taken at 7902df1fe9cSrandyf * suspend time. 7912df1fe9cSrandyf */ 7922df1fe9cSrandyf void 7932df1fe9cSrandyf tsc_resume(void) 7942df1fe9cSrandyf { 7952df1fe9cSrandyf /* 7962df1fe9cSrandyf * We only need to (and want to) do this once. So let the first 7972df1fe9cSrandyf * caller handle this (we are locked by the cpu lock), as it 7982df1fe9cSrandyf * is preferential that we get the earliest sync. 7992df1fe9cSrandyf */ 8002df1fe9cSrandyf if (tsc_needs_resume) { 8012df1fe9cSrandyf /* 8022df1fe9cSrandyf * If using the TSC, adjust the delta based on how long 8032df1fe9cSrandyf * we were sleeping (or away). We also adjust for 8042df1fe9cSrandyf * migration and a grown TSC. 8052df1fe9cSrandyf */ 8062df1fe9cSrandyf if (tsc_saved_tsc != 0) { 8072df1fe9cSrandyf timestruc_t ts; 8082df1fe9cSrandyf hrtime_t now, sleep_tsc = 0; 8092df1fe9cSrandyf int sleep_sec; 8102df1fe9cSrandyf extern void tsc_tick(void); 8112df1fe9cSrandyf extern uint64_t cpu_freq_hz; 8122df1fe9cSrandyf 8132df1fe9cSrandyf /* tsc_read() MUST be before TODOP_GET() */ 8142df1fe9cSrandyf mutex_enter(&tod_lock); 8152df1fe9cSrandyf now = tsc_read(); 8162df1fe9cSrandyf ts = TODOP_GET(tod_ops); 8172df1fe9cSrandyf mutex_exit(&tod_lock); 8182df1fe9cSrandyf 8192df1fe9cSrandyf /* Compute seconds of sleep time */ 8202df1fe9cSrandyf sleep_sec = ts.tv_sec - tsc_saved_ts.tv_sec; 8212df1fe9cSrandyf 8222df1fe9cSrandyf /* 8232df1fe9cSrandyf * If the saved sec is less that or equal to 8242df1fe9cSrandyf * the current ts, then there is likely a 8252df1fe9cSrandyf * problem with the clock. Assume at least 8262df1fe9cSrandyf * one second has passed, so that time goes forward. 8272df1fe9cSrandyf */ 8282df1fe9cSrandyf if (sleep_sec <= 0) { 8292df1fe9cSrandyf sleep_sec = 1; 8302df1fe9cSrandyf } 8312df1fe9cSrandyf 8322df1fe9cSrandyf /* How many TSC's should have occured while sleeping */ 8332df1fe9cSrandyf if (tsc_adjust_seconds) 8342df1fe9cSrandyf sleep_tsc = sleep_sec * cpu_freq_hz; 8352df1fe9cSrandyf 8362df1fe9cSrandyf /* 8372df1fe9cSrandyf * We also want to subtract from the "sleep_tsc" 8382df1fe9cSrandyf * the current value of tsc_read(), so that our 8392df1fe9cSrandyf * adjustment accounts for the amount of time we 8402df1fe9cSrandyf * have been resumed _or_ an adjustment based on 8412df1fe9cSrandyf * the fact that we didn't actually power off the 8422df1fe9cSrandyf * CPU (migration is another issue, but _should_ 8432df1fe9cSrandyf * also comply with this calculation). If the CPU 8442df1fe9cSrandyf * never powered off, then: 8452df1fe9cSrandyf * 'now == sleep_tsc + saved_tsc' 8462df1fe9cSrandyf * and the delta will effectively be "0". 8472df1fe9cSrandyf */ 8482df1fe9cSrandyf sleep_tsc -= now; 8492df1fe9cSrandyf if (tsc_delta_onsuspend) { 8502df1fe9cSrandyf tsc_adjust_delta(sleep_tsc); 8512df1fe9cSrandyf } else { 8522df1fe9cSrandyf tsc_adjust_delta(tsc_saved_tsc + sleep_tsc); 8532df1fe9cSrandyf } 8542df1fe9cSrandyf tsc_saved_tsc = 0; 8552df1fe9cSrandyf 8562df1fe9cSrandyf tsc_tick(); 8572df1fe9cSrandyf } 8582df1fe9cSrandyf tsc_needs_resume = 0; 8592df1fe9cSrandyf } 8602df1fe9cSrandyf 8612df1fe9cSrandyf } 862