17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ae115bc7Smrj * Common Development and Distribution License (the "License"). 6ae115bc7Smrj * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21843e1988Sjohnlev 227c478bd9Sstevel@tonic-gate /* 23*a563a037Sbholler * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/types.h> 307c478bd9Sstevel@tonic-gate #include <sys/param.h> 317c478bd9Sstevel@tonic-gate #include <sys/systm.h> 327c478bd9Sstevel@tonic-gate #include <sys/disp.h> 337c478bd9Sstevel@tonic-gate #include <sys/var.h> 347c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 357c478bd9Sstevel@tonic-gate #include <sys/debug.h> 367c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 377c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 387c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 397c478bd9Sstevel@tonic-gate #include <sys/psm_defs.h> 407c478bd9Sstevel@tonic-gate #include <sys/clock.h> 417c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 427c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 437c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 447c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 457c478bd9Sstevel@tonic-gate #include <sys/time.h> 46843e1988Sjohnlev #include <sys/panic.h> 477c478bd9Sstevel@tonic-gate 487c478bd9Sstevel@tonic-gate /* 497c478bd9Sstevel@tonic-gate * Using the Pentium's TSC register for gethrtime() 507c478bd9Sstevel@tonic-gate * ------------------------------------------------ 517c478bd9Sstevel@tonic-gate * 527c478bd9Sstevel@tonic-gate * The Pentium family, like many chip architectures, has a high-resolution 537c478bd9Sstevel@tonic-gate * timestamp counter ("TSC") which increments once per CPU cycle. The contents 547c478bd9Sstevel@tonic-gate * of the timestamp counter are read with the RDTSC instruction. 557c478bd9Sstevel@tonic-gate * 567c478bd9Sstevel@tonic-gate * As with its UltraSPARC equivalent (the %tick register), TSC's cycle count 577c478bd9Sstevel@tonic-gate * must be translated into nanoseconds in order to implement gethrtime(). 587c478bd9Sstevel@tonic-gate * We avoid inducing floating point operations in this conversion by 597c478bd9Sstevel@tonic-gate * implementing the same nsec_scale algorithm as that found in the sun4u 607c478bd9Sstevel@tonic-gate * platform code. The sun4u NATIVE_TIME_TO_NSEC_SCALE block comment contains 617c478bd9Sstevel@tonic-gate * a detailed description of the algorithm; the comment is not reproduced 627c478bd9Sstevel@tonic-gate * here. This implementation differs only in its value for NSEC_SHIFT: 637c478bd9Sstevel@tonic-gate * we implement an NSEC_SHIFT of 5 (instead of sun4u's 4) to allow for 647c478bd9Sstevel@tonic-gate * 60 MHz Pentiums. 657c478bd9Sstevel@tonic-gate * 667c478bd9Sstevel@tonic-gate * While TSC and %tick are both cycle counting registers, TSC's functionality 677c478bd9Sstevel@tonic-gate * falls short in several critical ways: 687c478bd9Sstevel@tonic-gate * 697c478bd9Sstevel@tonic-gate * (a) TSCs on different CPUs are not guaranteed to be in sync. While in 707c478bd9Sstevel@tonic-gate * practice they often _are_ in sync, this isn't guaranteed by the 717c478bd9Sstevel@tonic-gate * architecture. 727c478bd9Sstevel@tonic-gate * 737c478bd9Sstevel@tonic-gate * (b) The TSC cannot be reliably set to an arbitrary value. The architecture 747c478bd9Sstevel@tonic-gate * only supports writing the low 32-bits of TSC, making it impractical 757c478bd9Sstevel@tonic-gate * to rewrite. 767c478bd9Sstevel@tonic-gate * 777c478bd9Sstevel@tonic-gate * (c) The architecture doesn't have the capacity to interrupt based on 787c478bd9Sstevel@tonic-gate * arbitrary values of TSC; there is no TICK_CMPR equivalent. 797c478bd9Sstevel@tonic-gate * 807c478bd9Sstevel@tonic-gate * Together, (a) and (b) imply that software must track the skew between 817c478bd9Sstevel@tonic-gate * TSCs and account for it (it is assumed that while there may exist skew, 827c478bd9Sstevel@tonic-gate * there does not exist drift). To determine the skew between CPUs, we 837c478bd9Sstevel@tonic-gate * have newly onlined CPUs call tsc_sync_slave(), while the CPU performing 847c478bd9Sstevel@tonic-gate * the online operation calls tsc_sync_master(). Once both CPUs are ready, 857c478bd9Sstevel@tonic-gate * the master sets a shared flag, and each reads its TSC register. To reduce 867c478bd9Sstevel@tonic-gate * bias, we then wait until both CPUs are ready again, but this time the 877c478bd9Sstevel@tonic-gate * slave sets the shared flag, and each reads its TSC register again. The 887c478bd9Sstevel@tonic-gate * master compares the average of the two sample values, and, if observable 897c478bd9Sstevel@tonic-gate * skew is found, changes the gethrtimef function pointer to point to a 907c478bd9Sstevel@tonic-gate * gethrtime() implementation which will take the discovered skew into 917c478bd9Sstevel@tonic-gate * consideration. 927c478bd9Sstevel@tonic-gate * 937c478bd9Sstevel@tonic-gate * In the absence of time-of-day clock adjustments, gethrtime() must stay in 947c478bd9Sstevel@tonic-gate * sync with gettimeofday(). This is problematic; given (c), the software 957c478bd9Sstevel@tonic-gate * cannot drive its time-of-day source from TSC, and yet they must somehow be 967c478bd9Sstevel@tonic-gate * kept in sync. We implement this by having a routine, tsc_tick(), which 977c478bd9Sstevel@tonic-gate * is called once per second from the interrupt which drives time-of-day. 987c478bd9Sstevel@tonic-gate * tsc_tick() recalculates nsec_scale based on the number of the CPU cycles 997c478bd9Sstevel@tonic-gate * since boot versus the number of seconds since boot. This algorithm 1007c478bd9Sstevel@tonic-gate * becomes more accurate over time and converges quickly; the error in 1017c478bd9Sstevel@tonic-gate * nsec_scale is typically under 1 ppm less than 10 seconds after boot, and 1027c478bd9Sstevel@tonic-gate * is less than 100 ppb 1 minute after boot. 1037c478bd9Sstevel@tonic-gate * 1047c478bd9Sstevel@tonic-gate * Note that the hrtime base for gethrtime, tsc_hrtime_base, is modified 1057c478bd9Sstevel@tonic-gate * atomically with nsec_scale under CLOCK_LOCK. This assures that time 1067c478bd9Sstevel@tonic-gate * monotonically increases. 1077c478bd9Sstevel@tonic-gate */ 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate #define NSEC_SHIFT 5 1107c478bd9Sstevel@tonic-gate 1117c478bd9Sstevel@tonic-gate static uint_t nsec_scale; 1127c478bd9Sstevel@tonic-gate 1137c478bd9Sstevel@tonic-gate /* 1147c478bd9Sstevel@tonic-gate * These two variables used to be grouped together inside of a structure that 1157c478bd9Sstevel@tonic-gate * lived on a single cache line. A regression (bug ID 4623398) caused the 1167c478bd9Sstevel@tonic-gate * compiler to emit code that "optimized" away the while-loops below. The 1177c478bd9Sstevel@tonic-gate * result was that no synchronization between the onlining and onlined CPUs 1187c478bd9Sstevel@tonic-gate * took place. 1197c478bd9Sstevel@tonic-gate */ 1207c478bd9Sstevel@tonic-gate static volatile int tsc_ready; 1217c478bd9Sstevel@tonic-gate static volatile int tsc_sync_go; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * Used as indices into the tsc_sync_snaps[] array. 1257c478bd9Sstevel@tonic-gate */ 1267c478bd9Sstevel@tonic-gate #define TSC_MASTER 0 1277c478bd9Sstevel@tonic-gate #define TSC_SLAVE 1 1287c478bd9Sstevel@tonic-gate 1297c478bd9Sstevel@tonic-gate /* 1307c478bd9Sstevel@tonic-gate * Used in the tsc_master_sync()/tsc_slave_sync() rendezvous. 1317c478bd9Sstevel@tonic-gate */ 1327c478bd9Sstevel@tonic-gate #define TSC_SYNC_STOP 1 1337c478bd9Sstevel@tonic-gate #define TSC_SYNC_GO 2 1347c478bd9Sstevel@tonic-gate #define TSC_SYNC_AGAIN 3 1357c478bd9Sstevel@tonic-gate 136843e1988Sjohnlev #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) { \ 137ae115bc7Smrj unsigned int *_l = (unsigned int *)&(tsc); \ 138ae115bc7Smrj (hrt) += mul32(_l[1], scale) << NSEC_SHIFT; \ 1397c478bd9Sstevel@tonic-gate (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \ 1407c478bd9Sstevel@tonic-gate } 1417c478bd9Sstevel@tonic-gate 142ae115bc7Smrj #define TSC_CONVERT(tsc, hrt, scale) { \ 143ae115bc7Smrj unsigned int *_l = (unsigned int *)&(tsc); \ 144ae115bc7Smrj (hrt) = mul32(_l[1], scale) << NSEC_SHIFT; \ 1457c478bd9Sstevel@tonic-gate (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \ 1467c478bd9Sstevel@tonic-gate } 1477c478bd9Sstevel@tonic-gate 148ae115bc7Smrj int tsc_master_slave_sync_needed = 1; 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate static int tsc_max_delta; 1517c478bd9Sstevel@tonic-gate static hrtime_t tsc_sync_snaps[2]; 1527c478bd9Sstevel@tonic-gate static hrtime_t tsc_sync_delta[NCPU]; 1537c478bd9Sstevel@tonic-gate static hrtime_t tsc_sync_tick_delta[NCPU]; 1547c478bd9Sstevel@tonic-gate static hrtime_t tsc_last = 0; 1557c478bd9Sstevel@tonic-gate static hrtime_t tsc_last_jumped = 0; 1567c478bd9Sstevel@tonic-gate static hrtime_t tsc_hrtime_base = 0; 1577c478bd9Sstevel@tonic-gate static int tsc_jumped = 0; 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate static hrtime_t shadow_tsc_hrtime_base; 1607c478bd9Sstevel@tonic-gate static hrtime_t shadow_tsc_last; 1617c478bd9Sstevel@tonic-gate static uint_t shadow_nsec_scale; 1627c478bd9Sstevel@tonic-gate static uint32_t shadow_hres_lock; 1632df1fe9cSrandyf int get_tsc_ready(); 1647c478bd9Sstevel@tonic-gate 165843e1988Sjohnlev hrtime_t 166843e1988Sjohnlev tsc_gethrtime(void) 167843e1988Sjohnlev { 168843e1988Sjohnlev uint32_t old_hres_lock; 169843e1988Sjohnlev hrtime_t tsc, hrt; 170843e1988Sjohnlev 171843e1988Sjohnlev do { 172843e1988Sjohnlev old_hres_lock = hres_lock; 173843e1988Sjohnlev 174843e1988Sjohnlev if ((tsc = tsc_read()) >= tsc_last) { 175843e1988Sjohnlev /* 176843e1988Sjohnlev * It would seem to be obvious that this is true 177843e1988Sjohnlev * (that is, the past is less than the present), 178843e1988Sjohnlev * but it isn't true in the presence of suspend/resume 179843e1988Sjohnlev * cycles. If we manage to call gethrtime() 180843e1988Sjohnlev * after a resume, but before the first call to 181843e1988Sjohnlev * tsc_tick(), we will see the jump. In this case, 182843e1988Sjohnlev * we will simply use the value in TSC as the delta. 183843e1988Sjohnlev */ 184843e1988Sjohnlev tsc -= tsc_last; 185843e1988Sjohnlev } else if (tsc >= tsc_last - 2*tsc_max_delta) { 186843e1988Sjohnlev /* 187843e1988Sjohnlev * There is a chance that tsc_tick() has just run on 188843e1988Sjohnlev * another CPU, and we have drifted just enough so that 189843e1988Sjohnlev * we appear behind tsc_last. In this case, force the 190843e1988Sjohnlev * delta to be zero. 191843e1988Sjohnlev */ 192843e1988Sjohnlev tsc = 0; 193843e1988Sjohnlev } 194843e1988Sjohnlev 195843e1988Sjohnlev hrt = tsc_hrtime_base; 196843e1988Sjohnlev 197843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 198843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 199843e1988Sjohnlev 200843e1988Sjohnlev return (hrt); 201843e1988Sjohnlev } 202843e1988Sjohnlev 203843e1988Sjohnlev hrtime_t 204843e1988Sjohnlev tsc_gethrtime_delta(void) 205843e1988Sjohnlev { 206843e1988Sjohnlev uint32_t old_hres_lock; 207843e1988Sjohnlev hrtime_t tsc, hrt; 208*a563a037Sbholler ulong_t flags; 209843e1988Sjohnlev 210843e1988Sjohnlev do { 211843e1988Sjohnlev old_hres_lock = hres_lock; 212843e1988Sjohnlev 213843e1988Sjohnlev /* 214843e1988Sjohnlev * We need to disable interrupts here to assure that we 215843e1988Sjohnlev * don't migrate between the call to tsc_read() and 216843e1988Sjohnlev * adding the CPU's TSC tick delta. Note that disabling 217843e1988Sjohnlev * and reenabling preemption is forbidden here because 218843e1988Sjohnlev * we may be in the middle of a fast trap. In the amd64 219843e1988Sjohnlev * kernel we cannot tolerate preemption during a fast 220843e1988Sjohnlev * trap. See _update_sregs(). 221843e1988Sjohnlev */ 222843e1988Sjohnlev 223843e1988Sjohnlev flags = clear_int_flag(); 224843e1988Sjohnlev tsc = tsc_read() + tsc_sync_tick_delta[CPU->cpu_id]; 225843e1988Sjohnlev restore_int_flag(flags); 226843e1988Sjohnlev 227843e1988Sjohnlev /* See comments in tsc_gethrtime() above */ 228843e1988Sjohnlev 229843e1988Sjohnlev if (tsc >= tsc_last) { 230843e1988Sjohnlev tsc -= tsc_last; 231843e1988Sjohnlev } else if (tsc >= tsc_last - 2 * tsc_max_delta) { 232843e1988Sjohnlev tsc = 0; 233843e1988Sjohnlev } 234843e1988Sjohnlev 235843e1988Sjohnlev hrt = tsc_hrtime_base; 236843e1988Sjohnlev 237843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 238843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 239843e1988Sjohnlev 240843e1988Sjohnlev return (hrt); 241843e1988Sjohnlev } 242843e1988Sjohnlev 243843e1988Sjohnlev /* 244843e1988Sjohnlev * This is similar to the above, but it cannot actually spin on hres_lock. 245843e1988Sjohnlev * As a result, it caches all of the variables it needs; if the variables 246843e1988Sjohnlev * don't change, it's done. 247843e1988Sjohnlev */ 248843e1988Sjohnlev hrtime_t 249843e1988Sjohnlev dtrace_gethrtime(void) 250843e1988Sjohnlev { 251843e1988Sjohnlev uint32_t old_hres_lock; 252843e1988Sjohnlev hrtime_t tsc, hrt; 253*a563a037Sbholler ulong_t flags; 254843e1988Sjohnlev 255843e1988Sjohnlev do { 256843e1988Sjohnlev old_hres_lock = hres_lock; 257843e1988Sjohnlev 258843e1988Sjohnlev /* 259843e1988Sjohnlev * Interrupts are disabled to ensure that the thread isn't 260843e1988Sjohnlev * migrated between the tsc_read() and adding the CPU's 261843e1988Sjohnlev * TSC tick delta. 262843e1988Sjohnlev */ 263843e1988Sjohnlev flags = clear_int_flag(); 264843e1988Sjohnlev 265843e1988Sjohnlev tsc = tsc_read(); 266843e1988Sjohnlev 267843e1988Sjohnlev if (gethrtimef == tsc_gethrtime_delta) 268843e1988Sjohnlev tsc += tsc_sync_tick_delta[CPU->cpu_id]; 269843e1988Sjohnlev 270843e1988Sjohnlev restore_int_flag(flags); 271843e1988Sjohnlev 272843e1988Sjohnlev /* 273843e1988Sjohnlev * See the comments in tsc_gethrtime(), above. 274843e1988Sjohnlev */ 275843e1988Sjohnlev if (tsc >= tsc_last) 276843e1988Sjohnlev tsc -= tsc_last; 277843e1988Sjohnlev else if (tsc >= tsc_last - 2*tsc_max_delta) 278843e1988Sjohnlev tsc = 0; 279843e1988Sjohnlev 280843e1988Sjohnlev hrt = tsc_hrtime_base; 281843e1988Sjohnlev 282843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale); 283843e1988Sjohnlev 284843e1988Sjohnlev if ((old_hres_lock & ~1) == hres_lock) 285843e1988Sjohnlev break; 286843e1988Sjohnlev 287843e1988Sjohnlev /* 288843e1988Sjohnlev * If we're here, the clock lock is locked -- or it has been 289843e1988Sjohnlev * unlocked and locked since we looked. This may be due to 290843e1988Sjohnlev * tsc_tick() running on another CPU -- or it may be because 291843e1988Sjohnlev * some code path has ended up in dtrace_probe() with 292843e1988Sjohnlev * CLOCK_LOCK held. We'll try to determine that we're in 293843e1988Sjohnlev * the former case by taking another lap if the lock has 294843e1988Sjohnlev * changed since when we first looked at it. 295843e1988Sjohnlev */ 296843e1988Sjohnlev if (old_hres_lock != hres_lock) 297843e1988Sjohnlev continue; 298843e1988Sjohnlev 299843e1988Sjohnlev /* 300843e1988Sjohnlev * So the lock was and is locked. We'll use the old data 301843e1988Sjohnlev * instead. 302843e1988Sjohnlev */ 303843e1988Sjohnlev old_hres_lock = shadow_hres_lock; 304843e1988Sjohnlev 305843e1988Sjohnlev /* 306843e1988Sjohnlev * Again, disable interrupts to ensure that the thread 307843e1988Sjohnlev * isn't migrated between the tsc_read() and adding 308843e1988Sjohnlev * the CPU's TSC tick delta. 309843e1988Sjohnlev */ 310843e1988Sjohnlev flags = clear_int_flag(); 311843e1988Sjohnlev 312843e1988Sjohnlev tsc = tsc_read(); 313843e1988Sjohnlev 314843e1988Sjohnlev if (gethrtimef == tsc_gethrtime_delta) 315843e1988Sjohnlev tsc += tsc_sync_tick_delta[CPU->cpu_id]; 316843e1988Sjohnlev 317843e1988Sjohnlev restore_int_flag(flags); 318843e1988Sjohnlev 319843e1988Sjohnlev /* 320843e1988Sjohnlev * See the comments in tsc_gethrtime(), above. 321843e1988Sjohnlev */ 322843e1988Sjohnlev if (tsc >= shadow_tsc_last) 323843e1988Sjohnlev tsc -= shadow_tsc_last; 324843e1988Sjohnlev else if (tsc >= shadow_tsc_last - 2 * tsc_max_delta) 325843e1988Sjohnlev tsc = 0; 326843e1988Sjohnlev 327843e1988Sjohnlev hrt = shadow_tsc_hrtime_base; 328843e1988Sjohnlev 329843e1988Sjohnlev TSC_CONVERT_AND_ADD(tsc, hrt, shadow_nsec_scale); 330843e1988Sjohnlev } while ((old_hres_lock & ~1) != shadow_hres_lock); 331843e1988Sjohnlev 332843e1988Sjohnlev return (hrt); 333843e1988Sjohnlev } 334843e1988Sjohnlev 335843e1988Sjohnlev hrtime_t 336843e1988Sjohnlev tsc_gethrtimeunscaled(void) 337843e1988Sjohnlev { 338843e1988Sjohnlev uint32_t old_hres_lock; 339843e1988Sjohnlev hrtime_t tsc; 340843e1988Sjohnlev 341843e1988Sjohnlev do { 342843e1988Sjohnlev old_hres_lock = hres_lock; 343843e1988Sjohnlev 344843e1988Sjohnlev /* See tsc_tick(). */ 345843e1988Sjohnlev tsc = tsc_read() + tsc_last_jumped; 346843e1988Sjohnlev } while ((old_hres_lock & ~1) != hres_lock); 347843e1988Sjohnlev 348843e1988Sjohnlev return (tsc); 349843e1988Sjohnlev } 350843e1988Sjohnlev 351843e1988Sjohnlev 352843e1988Sjohnlev /* Convert a tsc timestamp to nanoseconds */ 353843e1988Sjohnlev void 354843e1988Sjohnlev tsc_scalehrtime(hrtime_t *tsc) 355843e1988Sjohnlev { 356843e1988Sjohnlev hrtime_t hrt; 357843e1988Sjohnlev hrtime_t mytsc; 358843e1988Sjohnlev 359843e1988Sjohnlev if (tsc == NULL) 360843e1988Sjohnlev return; 361843e1988Sjohnlev mytsc = *tsc; 362843e1988Sjohnlev 363843e1988Sjohnlev TSC_CONVERT(mytsc, hrt, nsec_scale); 364843e1988Sjohnlev *tsc = hrt; 365843e1988Sjohnlev } 366843e1988Sjohnlev 367843e1988Sjohnlev hrtime_t 368843e1988Sjohnlev tsc_gethrtimeunscaled_delta(void) 369843e1988Sjohnlev { 370843e1988Sjohnlev hrtime_t hrt; 371*a563a037Sbholler ulong_t flags; 372843e1988Sjohnlev 373843e1988Sjohnlev /* 374843e1988Sjohnlev * Similarly to tsc_gethrtime_delta, we need to disable preemption 375843e1988Sjohnlev * to prevent migration between the call to tsc_gethrtimeunscaled 376843e1988Sjohnlev * and adding the CPU's hrtime delta. Note that disabling and 377843e1988Sjohnlev * reenabling preemption is forbidden here because we may be in the 378843e1988Sjohnlev * middle of a fast trap. In the amd64 kernel we cannot tolerate 379843e1988Sjohnlev * preemption during a fast trap. See _update_sregs(). 380843e1988Sjohnlev */ 381843e1988Sjohnlev 382843e1988Sjohnlev flags = clear_int_flag(); 383843e1988Sjohnlev hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id]; 384843e1988Sjohnlev restore_int_flag(flags); 385843e1988Sjohnlev 386843e1988Sjohnlev return (hrt); 387843e1988Sjohnlev } 388843e1988Sjohnlev 3897c478bd9Sstevel@tonic-gate /* 3907c478bd9Sstevel@tonic-gate * Called by the master after the sync operation is complete. If the 3917c478bd9Sstevel@tonic-gate * slave is discovered to lag, gethrtimef will be changed to point to 3927c478bd9Sstevel@tonic-gate * tsc_gethrtime_delta(). 3937c478bd9Sstevel@tonic-gate */ 3947c478bd9Sstevel@tonic-gate static void 3957c478bd9Sstevel@tonic-gate tsc_digest(processorid_t target) 3967c478bd9Sstevel@tonic-gate { 3977c478bd9Sstevel@tonic-gate hrtime_t tdelta, hdelta = 0; 3987c478bd9Sstevel@tonic-gate int max = tsc_max_delta; 3997c478bd9Sstevel@tonic-gate processorid_t source = CPU->cpu_id; 4007c478bd9Sstevel@tonic-gate int update; 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate update = tsc_sync_delta[source] != 0 || 4037c478bd9Sstevel@tonic-gate gethrtimef == tsc_gethrtime_delta; 4047c478bd9Sstevel@tonic-gate 4057c478bd9Sstevel@tonic-gate /* 4067c478bd9Sstevel@tonic-gate * We divide by 2 since each of the data points is the sum of two TSC 4077c478bd9Sstevel@tonic-gate * reads; this takes the average of the two. 4087c478bd9Sstevel@tonic-gate */ 4097c478bd9Sstevel@tonic-gate tdelta = (tsc_sync_snaps[TSC_SLAVE] - tsc_sync_snaps[TSC_MASTER]) / 2; 4107c478bd9Sstevel@tonic-gate if ((tdelta > max) || ((tdelta >= 0) && update)) { 4117c478bd9Sstevel@tonic-gate TSC_CONVERT_AND_ADD(tdelta, hdelta, nsec_scale); 4127c478bd9Sstevel@tonic-gate tsc_sync_delta[target] = tsc_sync_delta[source] - hdelta; 4132df1fe9cSrandyf tsc_sync_tick_delta[target] = tsc_sync_tick_delta[source] 4142df1fe9cSrandyf -tdelta; 4157c478bd9Sstevel@tonic-gate gethrtimef = tsc_gethrtime_delta; 4167c478bd9Sstevel@tonic-gate gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; 4177c478bd9Sstevel@tonic-gate return; 4187c478bd9Sstevel@tonic-gate } 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate tdelta = -tdelta; 4217c478bd9Sstevel@tonic-gate if ((tdelta > max) || update) { 4227c478bd9Sstevel@tonic-gate TSC_CONVERT_AND_ADD(tdelta, hdelta, nsec_scale); 4237c478bd9Sstevel@tonic-gate tsc_sync_delta[target] = tsc_sync_delta[source] + hdelta; 4242df1fe9cSrandyf tsc_sync_tick_delta[target] = tsc_sync_tick_delta[source] 4252df1fe9cSrandyf + tdelta; 4267c478bd9Sstevel@tonic-gate gethrtimef = tsc_gethrtime_delta; 4277c478bd9Sstevel@tonic-gate gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate } 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate /* 4337c478bd9Sstevel@tonic-gate * Called by a CPU which has just performed an online operation on another 4347c478bd9Sstevel@tonic-gate * CPU. It is expected that the newly onlined CPU will call tsc_sync_slave(). 4357c478bd9Sstevel@tonic-gate */ 4367c478bd9Sstevel@tonic-gate void 4377c478bd9Sstevel@tonic-gate tsc_sync_master(processorid_t slave) 4387c478bd9Sstevel@tonic-gate { 439ae115bc7Smrj ulong_t flags; 4407c478bd9Sstevel@tonic-gate hrtime_t hrt; 4417c478bd9Sstevel@tonic-gate 442ae115bc7Smrj if (!tsc_master_slave_sync_needed) 443ae115bc7Smrj return; 444ae115bc7Smrj 4457c478bd9Sstevel@tonic-gate ASSERT(tsc_sync_go != TSC_SYNC_GO); 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate /* 4507c478bd9Sstevel@tonic-gate * Wait for the slave CPU to arrive. 4517c478bd9Sstevel@tonic-gate */ 4527c478bd9Sstevel@tonic-gate while (tsc_ready != TSC_SYNC_GO) 4537c478bd9Sstevel@tonic-gate continue; 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * Tell the slave CPU to begin reading its TSC; read our own. 4577c478bd9Sstevel@tonic-gate */ 4587c478bd9Sstevel@tonic-gate tsc_sync_go = TSC_SYNC_GO; 4597c478bd9Sstevel@tonic-gate hrt = tsc_read(); 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate /* 4627c478bd9Sstevel@tonic-gate * Tell the slave that we're ready, and wait for the slave to tell us 4637c478bd9Sstevel@tonic-gate * to read our TSC again. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate tsc_ready = TSC_SYNC_AGAIN; 4667c478bd9Sstevel@tonic-gate while (tsc_sync_go != TSC_SYNC_AGAIN) 4677c478bd9Sstevel@tonic-gate continue; 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate hrt += tsc_read(); 4707c478bd9Sstevel@tonic-gate tsc_sync_snaps[TSC_MASTER] = hrt; 4717c478bd9Sstevel@tonic-gate 4727c478bd9Sstevel@tonic-gate /* 4737c478bd9Sstevel@tonic-gate * Wait for the slave to finish reading its TSC. 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate while (tsc_ready != TSC_SYNC_STOP) 4767c478bd9Sstevel@tonic-gate continue; 4777c478bd9Sstevel@tonic-gate 4787c478bd9Sstevel@tonic-gate /* 4797c478bd9Sstevel@tonic-gate * At this point, both CPUs have performed their tsc_read() calls. 4807c478bd9Sstevel@tonic-gate * We'll digest it now before letting the slave CPU return. 4817c478bd9Sstevel@tonic-gate */ 4827c478bd9Sstevel@tonic-gate tsc_digest(slave); 4837c478bd9Sstevel@tonic-gate tsc_sync_go = TSC_SYNC_STOP; 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate restore_int_flag(flags); 4867c478bd9Sstevel@tonic-gate } 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate /* 4897c478bd9Sstevel@tonic-gate * Called by a CPU which has just been onlined. It is expected that the CPU 4907c478bd9Sstevel@tonic-gate * performing the online operation will call tsc_sync_master(). 4917c478bd9Sstevel@tonic-gate */ 4927c478bd9Sstevel@tonic-gate void 4937c478bd9Sstevel@tonic-gate tsc_sync_slave(void) 4947c478bd9Sstevel@tonic-gate { 495ae115bc7Smrj ulong_t flags; 4967c478bd9Sstevel@tonic-gate hrtime_t hrt; 4977c478bd9Sstevel@tonic-gate 498ae115bc7Smrj if (!tsc_master_slave_sync_needed) 499ae115bc7Smrj return; 500ae115bc7Smrj 5017c478bd9Sstevel@tonic-gate ASSERT(tsc_sync_go != TSC_SYNC_GO); 5027c478bd9Sstevel@tonic-gate 5037c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 5047c478bd9Sstevel@tonic-gate 505d90554ebSdmick /* to test tsc_gethrtime_delta, add wrmsr(REG_TSC, 0) here */ 506d90554ebSdmick 5077c478bd9Sstevel@tonic-gate /* 5087c478bd9Sstevel@tonic-gate * Tell the master CPU that we're ready, and wait for the master to 5097c478bd9Sstevel@tonic-gate * tell us to begin reading our TSC. 5107c478bd9Sstevel@tonic-gate */ 5117c478bd9Sstevel@tonic-gate tsc_ready = TSC_SYNC_GO; 5127c478bd9Sstevel@tonic-gate while (tsc_sync_go != TSC_SYNC_GO) 5137c478bd9Sstevel@tonic-gate continue; 5147c478bd9Sstevel@tonic-gate 5157c478bd9Sstevel@tonic-gate hrt = tsc_read(); 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate /* 5187c478bd9Sstevel@tonic-gate * Wait for the master CPU to be ready to read its TSC again. 5197c478bd9Sstevel@tonic-gate */ 5207c478bd9Sstevel@tonic-gate while (tsc_ready != TSC_SYNC_AGAIN) 5217c478bd9Sstevel@tonic-gate continue; 5227c478bd9Sstevel@tonic-gate 5237c478bd9Sstevel@tonic-gate /* 5247c478bd9Sstevel@tonic-gate * Tell the master CPU to read its TSC again; read ours again. 5257c478bd9Sstevel@tonic-gate */ 5267c478bd9Sstevel@tonic-gate tsc_sync_go = TSC_SYNC_AGAIN; 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate hrt += tsc_read(); 5297c478bd9Sstevel@tonic-gate tsc_sync_snaps[TSC_SLAVE] = hrt; 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate /* 5327c478bd9Sstevel@tonic-gate * Tell the master that we're done, and wait to be dismissed. 5337c478bd9Sstevel@tonic-gate */ 5347c478bd9Sstevel@tonic-gate tsc_ready = TSC_SYNC_STOP; 5357c478bd9Sstevel@tonic-gate while (tsc_sync_go != TSC_SYNC_STOP) 5367c478bd9Sstevel@tonic-gate continue; 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate restore_int_flag(flags); 5397c478bd9Sstevel@tonic-gate } 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate /* 542ae115bc7Smrj * Called once per second on a CPU from the cyclic subsystem's 543ae115bc7Smrj * CY_HIGH_LEVEL interrupt. (No longer just cpu0-only) 5447c478bd9Sstevel@tonic-gate */ 5457c478bd9Sstevel@tonic-gate void 5467c478bd9Sstevel@tonic-gate tsc_tick(void) 5477c478bd9Sstevel@tonic-gate { 5487c478bd9Sstevel@tonic-gate hrtime_t now, delta; 5497c478bd9Sstevel@tonic-gate ushort_t spl; 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate /* 5527c478bd9Sstevel@tonic-gate * Before we set the new variables, we set the shadow values. This 5537c478bd9Sstevel@tonic-gate * allows for lock free operation in dtrace_gethrtime(). 5547c478bd9Sstevel@tonic-gate */ 5557c478bd9Sstevel@tonic-gate lock_set_spl((lock_t *)&shadow_hres_lock + HRES_LOCK_OFFSET, 5567c478bd9Sstevel@tonic-gate ipltospl(CBE_HIGH_PIL), &spl); 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate shadow_tsc_hrtime_base = tsc_hrtime_base; 5597c478bd9Sstevel@tonic-gate shadow_tsc_last = tsc_last; 5607c478bd9Sstevel@tonic-gate shadow_nsec_scale = nsec_scale; 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate shadow_hres_lock++; 5637c478bd9Sstevel@tonic-gate splx(spl); 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate CLOCK_LOCK(&spl); 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate now = tsc_read(); 5687c478bd9Sstevel@tonic-gate 569d90554ebSdmick if (gethrtimef == tsc_gethrtime_delta) 570d90554ebSdmick now += tsc_sync_tick_delta[CPU->cpu_id]; 571d90554ebSdmick 5727c478bd9Sstevel@tonic-gate if (now < tsc_last) { 5737c478bd9Sstevel@tonic-gate /* 5747c478bd9Sstevel@tonic-gate * The TSC has just jumped into the past. We assume that 5757c478bd9Sstevel@tonic-gate * this is due to a suspend/resume cycle, and we're going 5767c478bd9Sstevel@tonic-gate * to use the _current_ value of TSC as the delta. This 5777c478bd9Sstevel@tonic-gate * will keep tsc_hrtime_base correct. We're also going to 5787c478bd9Sstevel@tonic-gate * assume that rate of tsc does not change after a suspend 5797c478bd9Sstevel@tonic-gate * resume (i.e nsec_scale remains the same). 5807c478bd9Sstevel@tonic-gate */ 5817c478bd9Sstevel@tonic-gate delta = now; 5827c478bd9Sstevel@tonic-gate tsc_last_jumped += tsc_last; 5837c478bd9Sstevel@tonic-gate tsc_jumped = 1; 5847c478bd9Sstevel@tonic-gate } else { 5857c478bd9Sstevel@tonic-gate /* 5867c478bd9Sstevel@tonic-gate * Determine the number of TSC ticks since the last clock 5877c478bd9Sstevel@tonic-gate * tick, and add that to the hrtime base. 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate delta = now - tsc_last; 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate TSC_CONVERT_AND_ADD(delta, tsc_hrtime_base, nsec_scale); 5937c478bd9Sstevel@tonic-gate tsc_last = now; 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate CLOCK_UNLOCK(spl); 5967c478bd9Sstevel@tonic-gate } 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate void 599843e1988Sjohnlev tsc_hrtimeinit(uint64_t cpu_freq_hz) 6007c478bd9Sstevel@tonic-gate { 601843e1988Sjohnlev extern int gethrtime_hires; 602843e1988Sjohnlev longlong_t tsc; 603843e1988Sjohnlev ulong_t flags; 6047c478bd9Sstevel@tonic-gate 605843e1988Sjohnlev /* 606843e1988Sjohnlev * cpu_freq_hz is the measured cpu frequency in hertz 607843e1988Sjohnlev */ 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate /* 610843e1988Sjohnlev * We can't accommodate CPUs slower than 31.25 MHz. 6117c478bd9Sstevel@tonic-gate */ 612843e1988Sjohnlev ASSERT(cpu_freq_hz > NANOSEC / (1 << NSEC_SHIFT)); 613843e1988Sjohnlev nsec_scale = 614843e1988Sjohnlev (uint_t)(((uint64_t)NANOSEC << (32 - NSEC_SHIFT)) / cpu_freq_hz); 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate flags = clear_int_flag(); 617843e1988Sjohnlev tsc = tsc_read(); 618843e1988Sjohnlev (void) tsc_gethrtime(); 619843e1988Sjohnlev tsc_max_delta = tsc_read() - tsc; 6207c478bd9Sstevel@tonic-gate restore_int_flag(flags); 621843e1988Sjohnlev gethrtimef = tsc_gethrtime; 622843e1988Sjohnlev gethrtimeunscaledf = tsc_gethrtimeunscaled; 623843e1988Sjohnlev scalehrtimef = tsc_scalehrtime; 624843e1988Sjohnlev hrtime_tick = tsc_tick; 625843e1988Sjohnlev gethrtime_hires = 1; 6267c478bd9Sstevel@tonic-gate } 6272df1fe9cSrandyf 6282df1fe9cSrandyf int 6292df1fe9cSrandyf get_tsc_ready() 6302df1fe9cSrandyf { 6312df1fe9cSrandyf return (tsc_ready); 6322df1fe9cSrandyf } 6332df1fe9cSrandyf 6342df1fe9cSrandyf /* 6352df1fe9cSrandyf * Adjust all the deltas by adding the passed value to the array. 6362df1fe9cSrandyf * Then use the "delt" versions of the the gethrtime functions. 6372df1fe9cSrandyf * Note that 'tdelta' _could_ be a negative number, which should 6382df1fe9cSrandyf * reduce the values in the array (used, for example, if the Solaris 6392df1fe9cSrandyf * instance was moved by a virtual manager to a machine with a higher 6402df1fe9cSrandyf * value of tsc). 6412df1fe9cSrandyf */ 6422df1fe9cSrandyf void 6432df1fe9cSrandyf tsc_adjust_delta(hrtime_t tdelta) 6442df1fe9cSrandyf { 6452df1fe9cSrandyf int i; 6462df1fe9cSrandyf hrtime_t hdelta = 0; 6472df1fe9cSrandyf 6482df1fe9cSrandyf TSC_CONVERT(tdelta, hdelta, nsec_scale); 6492df1fe9cSrandyf 6502df1fe9cSrandyf for (i = 0; i < NCPU; i++) { 6512df1fe9cSrandyf tsc_sync_delta[i] += hdelta; 6522df1fe9cSrandyf tsc_sync_tick_delta[i] += tdelta; 6532df1fe9cSrandyf } 6542df1fe9cSrandyf 6552df1fe9cSrandyf gethrtimef = tsc_gethrtime_delta; 6562df1fe9cSrandyf gethrtimeunscaledf = tsc_gethrtimeunscaled_delta; 6572df1fe9cSrandyf } 6582df1fe9cSrandyf 6592df1fe9cSrandyf /* 6602df1fe9cSrandyf * Functions to manage TSC and high-res time on suspend and resume. 6612df1fe9cSrandyf */ 6622df1fe9cSrandyf 6632df1fe9cSrandyf /* 6642df1fe9cSrandyf * declarations needed for time adjustment 6652df1fe9cSrandyf */ 6662df1fe9cSrandyf extern void rtcsync(void); 6672df1fe9cSrandyf extern tod_ops_t *tod_ops; 6682df1fe9cSrandyf /* There must be a better way than exposing nsec_scale! */ 6692df1fe9cSrandyf extern uint_t nsec_scale; 6702df1fe9cSrandyf static uint64_t tsc_saved_tsc = 0; /* 1 in 2^64 chance this'll screw up! */ 6712df1fe9cSrandyf static timestruc_t tsc_saved_ts; 6722df1fe9cSrandyf static int tsc_needs_resume = 0; /* We only want to do this once. */ 6732df1fe9cSrandyf int tsc_delta_onsuspend = 0; 6742df1fe9cSrandyf int tsc_adjust_seconds = 1; 6752df1fe9cSrandyf int tsc_suspend_count = 0; 6762df1fe9cSrandyf int tsc_resume_in_cyclic = 0; 6772df1fe9cSrandyf 6782df1fe9cSrandyf /* 6792df1fe9cSrandyf * Let timestamp.c know that we are suspending. It needs to take 6802df1fe9cSrandyf * snapshots of the current time, and do any pre-suspend work. 6812df1fe9cSrandyf */ 6822df1fe9cSrandyf void 6832df1fe9cSrandyf tsc_suspend(void) 6842df1fe9cSrandyf { 6852df1fe9cSrandyf /* 6862df1fe9cSrandyf * What we need to do here, is to get the time we suspended, so that we 6872df1fe9cSrandyf * know how much we should add to the resume. 6882df1fe9cSrandyf * This routine is called by each CPU, so we need to handle reentry. 6892df1fe9cSrandyf */ 6902df1fe9cSrandyf if (tsc_gethrtime_enable) { 6912df1fe9cSrandyf /* 6922df1fe9cSrandyf * We put the tsc_read() inside the lock as it 6932df1fe9cSrandyf * as no locking constraints, and it puts the 6942df1fe9cSrandyf * aquired value closer to the time stamp (in 6952df1fe9cSrandyf * case we delay getting the lock). 6962df1fe9cSrandyf */ 6972df1fe9cSrandyf mutex_enter(&tod_lock); 6982df1fe9cSrandyf tsc_saved_tsc = tsc_read(); 6992df1fe9cSrandyf tsc_saved_ts = TODOP_GET(tod_ops); 7002df1fe9cSrandyf mutex_exit(&tod_lock); 7012df1fe9cSrandyf /* We only want to do this once. */ 7022df1fe9cSrandyf if (tsc_needs_resume == 0) { 7032df1fe9cSrandyf if (tsc_delta_onsuspend) { 7042df1fe9cSrandyf tsc_adjust_delta(tsc_saved_tsc); 7052df1fe9cSrandyf } else { 7062df1fe9cSrandyf tsc_adjust_delta(nsec_scale); 7072df1fe9cSrandyf } 7082df1fe9cSrandyf tsc_suspend_count++; 7092df1fe9cSrandyf } 7102df1fe9cSrandyf } 7112df1fe9cSrandyf 7122df1fe9cSrandyf invalidate_cache(); 7132df1fe9cSrandyf tsc_needs_resume = 1; 7142df1fe9cSrandyf } 7152df1fe9cSrandyf 7162df1fe9cSrandyf /* 7172df1fe9cSrandyf * Restore all timestamp state based on the snapshots taken at 7182df1fe9cSrandyf * suspend time. 7192df1fe9cSrandyf */ 7202df1fe9cSrandyf void 7212df1fe9cSrandyf tsc_resume(void) 7222df1fe9cSrandyf { 7232df1fe9cSrandyf /* 7242df1fe9cSrandyf * We only need to (and want to) do this once. So let the first 7252df1fe9cSrandyf * caller handle this (we are locked by the cpu lock), as it 7262df1fe9cSrandyf * is preferential that we get the earliest sync. 7272df1fe9cSrandyf */ 7282df1fe9cSrandyf if (tsc_needs_resume) { 7292df1fe9cSrandyf /* 7302df1fe9cSrandyf * If using the TSC, adjust the delta based on how long 7312df1fe9cSrandyf * we were sleeping (or away). We also adjust for 7322df1fe9cSrandyf * migration and a grown TSC. 7332df1fe9cSrandyf */ 7342df1fe9cSrandyf if (tsc_saved_tsc != 0) { 7352df1fe9cSrandyf timestruc_t ts; 7362df1fe9cSrandyf hrtime_t now, sleep_tsc = 0; 7372df1fe9cSrandyf int sleep_sec; 7382df1fe9cSrandyf extern void tsc_tick(void); 7392df1fe9cSrandyf extern uint64_t cpu_freq_hz; 7402df1fe9cSrandyf 7412df1fe9cSrandyf /* tsc_read() MUST be before TODOP_GET() */ 7422df1fe9cSrandyf mutex_enter(&tod_lock); 7432df1fe9cSrandyf now = tsc_read(); 7442df1fe9cSrandyf ts = TODOP_GET(tod_ops); 7452df1fe9cSrandyf mutex_exit(&tod_lock); 7462df1fe9cSrandyf 7472df1fe9cSrandyf /* Compute seconds of sleep time */ 7482df1fe9cSrandyf sleep_sec = ts.tv_sec - tsc_saved_ts.tv_sec; 7492df1fe9cSrandyf 7502df1fe9cSrandyf /* 7512df1fe9cSrandyf * If the saved sec is less that or equal to 7522df1fe9cSrandyf * the current ts, then there is likely a 7532df1fe9cSrandyf * problem with the clock. Assume at least 7542df1fe9cSrandyf * one second has passed, so that time goes forward. 7552df1fe9cSrandyf */ 7562df1fe9cSrandyf if (sleep_sec <= 0) { 7572df1fe9cSrandyf sleep_sec = 1; 7582df1fe9cSrandyf } 7592df1fe9cSrandyf 7602df1fe9cSrandyf /* How many TSC's should have occured while sleeping */ 7612df1fe9cSrandyf if (tsc_adjust_seconds) 7622df1fe9cSrandyf sleep_tsc = sleep_sec * cpu_freq_hz; 7632df1fe9cSrandyf 7642df1fe9cSrandyf /* 7652df1fe9cSrandyf * We also want to subtract from the "sleep_tsc" 7662df1fe9cSrandyf * the current value of tsc_read(), so that our 7672df1fe9cSrandyf * adjustment accounts for the amount of time we 7682df1fe9cSrandyf * have been resumed _or_ an adjustment based on 7692df1fe9cSrandyf * the fact that we didn't actually power off the 7702df1fe9cSrandyf * CPU (migration is another issue, but _should_ 7712df1fe9cSrandyf * also comply with this calculation). If the CPU 7722df1fe9cSrandyf * never powered off, then: 7732df1fe9cSrandyf * 'now == sleep_tsc + saved_tsc' 7742df1fe9cSrandyf * and the delta will effectively be "0". 7752df1fe9cSrandyf */ 7762df1fe9cSrandyf sleep_tsc -= now; 7772df1fe9cSrandyf if (tsc_delta_onsuspend) { 7782df1fe9cSrandyf tsc_adjust_delta(sleep_tsc); 7792df1fe9cSrandyf } else { 7802df1fe9cSrandyf tsc_adjust_delta(tsc_saved_tsc + sleep_tsc); 7812df1fe9cSrandyf } 7822df1fe9cSrandyf tsc_saved_tsc = 0; 7832df1fe9cSrandyf 7842df1fe9cSrandyf tsc_tick(); 7852df1fe9cSrandyf } 7862df1fe9cSrandyf tsc_needs_resume = 0; 7872df1fe9cSrandyf } 7882df1fe9cSrandyf 7892df1fe9cSrandyf } 790