1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Joyent, Inc.  All rights reserved.
25 */
26
27#include <sys/types.h>
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/user.h>
31#include <sys/proc.h>
32#include <sys/cpuvar.h>
33#include <sys/thread.h>
34#include <sys/debug.h>
35#include <sys/msacct.h>
36#include <sys/time.h>
37#include <sys/zone.h>
38
39/*
40 * Mega-theory block comment:
41 *
42 * Microstate accounting uses finite states and the transitions between these
43 * states to measure timing and accounting information.  The state information
44 * is presently tracked for threads (via microstate accounting) and cpus (via
45 * cpu microstate accounting).  In each case, these accounting mechanisms use
46 * states and transitions to measure time spent in each state instead of
47 * clock-based sampling methodologies.
48 *
49 * For microstate accounting:
50 * state transitions are accomplished by calling new_mstate() to switch between
51 * states.  Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur
52 * by calling restore_mstate() which restores a thread to its previously running
53 * state.  This code is primarialy executed by the dispatcher in disp() before
54 * running a process that was put to sleep.  If the thread was not in a sleeping
55 * state, this call has little effect other than to update the count of time the
56 * thread has spent waiting on run-queues in its lifetime.
57 *
58 * For cpu microstate accounting:
59 * Cpu microstate accounting is similar to the microstate accounting for threads
60 * but it tracks user, system, and idle time for cpus.  Cpu microstate
61 * accounting does not track interrupt times as there is a pre-existing
62 * interrupt accounting mechanism for this purpose.  Cpu microstate accounting
63 * tracks time that user threads have spent active, idle, or in the system on a
64 * given cpu.  Cpu microstate accounting has fewer states which allows it to
65 * have better defined transitions.  The states transition in the following
66 * order:
67 *
68 *  CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE
69 *
70 * In order to get to the idle state, the cpu microstate must first go through
71 * the system state, and vice-versa for the user state from idle.  The switching
72 * of the microstates from user to system is done as part of the regular thread
73 * microstate accounting code, except for the idle state which is switched by
74 * the dispatcher before it runs the idle loop.
75 *
76 * Cpu percentages:
77 * Cpu percentages are now handled by and based upon microstate accounting
78 * information (the same is true for load averages).  The routines which handle
79 * the growing/shrinking and exponentiation of cpu percentages have been moved
80 * here as it now makes more sense for them to be generated from the microstate
81 * code.  Cpu percentages are generated similarly to the way they were before;
82 * however, now they are based upon high-resolution timestamps and the
83 * timestamps are modified at various state changes instead of during a clock()
84 * interrupt.  This allows us to generate more accurate cpu percentages which
85 * are also in-sync with microstate data.
86 */
87
88/*
89 * Initialize the microstate level and the
90 * associated accounting information for an LWP.
91 */
92void
93init_mstate(
94	kthread_t	*t,
95	int		init_state)
96{
97	struct mstate *ms;
98	klwp_t *lwp;
99	hrtime_t curtime;
100
101	ASSERT(init_state != LMS_WAIT_CPU);
102	ASSERT((unsigned)init_state < NMSTATES);
103
104	if ((lwp = ttolwp(t)) != NULL) {
105		ms = &lwp->lwp_mstate;
106		curtime = gethrtime_unscaled();
107		ms->ms_prev = LMS_SYSTEM;
108		ms->ms_start = curtime;
109		ms->ms_term = 0;
110		ms->ms_state_start = curtime;
111		t->t_mstate = init_state;
112		t->t_waitrq = 0;
113		t->t_hrtime = curtime;
114		if ((t->t_proc_flag & TP_MSACCT) == 0)
115			t->t_proc_flag |= TP_MSACCT;
116		bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct));
117	}
118}
119
120/*
121 * Initialize the microstate level and associated accounting information
122 * for the specified cpu
123 */
124
125void
126init_cpu_mstate(
127	cpu_t *cpu,
128	int init_state)
129{
130	ASSERT(init_state != CMS_DISABLED);
131
132	cpu->cpu_mstate = init_state;
133	cpu->cpu_mstate_start = gethrtime_unscaled();
134	cpu->cpu_waitrq = 0;
135	bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct));
136}
137
138/*
139 * sets cpu state to OFFLINE.  We don't actually track this time,
140 * but it serves as a useful placeholder state for when we're not
141 * doing anything.
142 */
143
144void
145term_cpu_mstate(struct cpu *cpu)
146{
147	ASSERT(cpu->cpu_mstate != CMS_DISABLED);
148	cpu->cpu_mstate = CMS_DISABLED;
149	cpu->cpu_mstate_start = 0;
150}
151
152/* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */
153
154#define	NEW_CPU_MSTATE(state)						\
155	gen = cpu->cpu_mstate_gen;					\
156	cpu->cpu_mstate_gen = 0;					\
157	/* Need membar_producer() here if stores not ordered / TSO */	\
158	cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \
159	cpu->cpu_mstate = state;					\
160	cpu->cpu_mstate_start = curtime;				\
161	/* Need membar_producer() here if stores not ordered / TSO */	\
162	cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen;
163
164void
165new_cpu_mstate(int cmstate, hrtime_t curtime)
166{
167	cpu_t *cpu = CPU;
168	uint16_t gen;
169
170	ASSERT(cpu->cpu_mstate != CMS_DISABLED);
171	ASSERT(cmstate < NCMSTATES);
172	ASSERT(cmstate != CMS_DISABLED);
173
174	/*
175	 * This function cannot be re-entrant on a given CPU. As such,
176	 * we ASSERT and panic if we are called on behalf of an interrupt.
177	 * The one exception is for an interrupt which has previously
178	 * blocked. Such an interrupt is being scheduled by the dispatcher
179	 * just like a normal thread, and as such cannot arrive here
180	 * in a re-entrant manner.
181	 */
182
183	ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL);
184	ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread);
185
186	/*
187	 * LOCKING, or lack thereof:
188	 *
189	 * Updates to CPU mstate can only be made by the CPU
190	 * itself, and the above check to ignore interrupts
191	 * should prevent recursion into this function on a given
192	 * processor. i.e. no possible write contention.
193	 *
194	 * However, reads of CPU mstate can occur at any time
195	 * from any CPU. Any locking added to this code path
196	 * would seriously impact syscall performance. So,
197	 * instead we have a best-effort protection for readers.
198	 * The reader will want to account for any time between
199	 * cpu_mstate_start and the present time. This requires
200	 * some guarantees that the reader is getting coherent
201	 * information.
202	 *
203	 * We use a generation counter, which is set to 0 before
204	 * we start making changes, and is set to a new value
205	 * after we're done. Someone reading the CPU mstate
206	 * should check for the same non-zero value of this
207	 * counter both before and after reading all state. The
208	 * important point is that the reader is not a
209	 * performance-critical path, but this function is.
210	 *
211	 * The ordering of writes is critical. cpu_mstate_gen must
212	 * be visibly zero on all CPUs before we change cpu_mstate
213	 * and cpu_mstate_start. Additionally, cpu_mstate_gen must
214	 * not be restored to oldgen+1 until after all of the other
215	 * writes have become visible.
216	 *
217	 * Normally one puts membar_producer() calls to accomplish
218	 * this. Unfortunately this routine is extremely performance
219	 * critical (esp. in syscall_mstate below) and we cannot
220	 * afford the additional time, particularly on some x86
221	 * architectures with extremely slow sfence calls. On a
222	 * CPU which guarantees write ordering (including sparc, x86,
223	 * and amd64) this is not a problem. The compiler could still
224	 * reorder the writes, so we make the four cpu fields
225	 * volatile to prevent this.
226	 *
227	 * TSO warning: should we port to a non-TSO (or equivalent)
228	 * CPU, this will break.
229	 *
230	 * The reader stills needs the membar_consumer() calls because,
231	 * although the volatiles prevent the compiler from reordering
232	 * loads, the CPU can still do so.
233	 */
234
235	NEW_CPU_MSTATE(cmstate);
236}
237
238/*
239 * Return an aggregation of user and system CPU time consumed by
240 * the specified thread in scaled nanoseconds.
241 */
242hrtime_t
243mstate_thread_onproc_time(kthread_t *t)
244{
245	hrtime_t aggr_time;
246	hrtime_t now;
247	hrtime_t waitrq;
248	hrtime_t state_start;
249	struct mstate *ms;
250	klwp_t *lwp;
251	int	mstate;
252
253	ASSERT(THREAD_LOCK_HELD(t));
254
255	if ((lwp = ttolwp(t)) == NULL)
256		return (0);
257
258	mstate = t->t_mstate;
259	waitrq = t->t_waitrq;
260	ms = &lwp->lwp_mstate;
261	state_start = ms->ms_state_start;
262
263	aggr_time = ms->ms_acct[LMS_USER] +
264	    ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];
265
266	now = gethrtime_unscaled();
267
268	/*
269	 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
270	 * inconsistent, so it is possible that now < state_start.
271	 */
272	if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) {
273		/* if waitrq is zero, count all of the time. */
274		if (waitrq == 0) {
275			waitrq = now;
276		}
277
278		if (waitrq > state_start) {
279			aggr_time += waitrq - state_start;
280		}
281	}
282
283	scalehrtime(&aggr_time);
284	return (aggr_time);
285}
286
287/*
288 * Return the amount of onproc and runnable time this thread has experienced.
289 *
290 * Because the fields we read are not protected by locks when updated
291 * by the thread itself, this is an inherently racey interface.  In
292 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
293 * as it might appear to.
294 *
295 * The implication for users of this interface is that onproc and runnable
296 * are *NOT* monotonically increasing; they may temporarily be larger than
297 * they should be.
298 */
299void
300mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable)
301{
302	struct mstate	*const	ms = &ttolwp(t)->lwp_mstate;
303
304	int		mstate;
305	hrtime_t	now;
306	hrtime_t	state_start;
307	hrtime_t	waitrq;
308	hrtime_t	aggr_onp;
309	hrtime_t	aggr_run;
310
311	ASSERT(THREAD_LOCK_HELD(t));
312	ASSERT(t->t_procp->p_flag & SSYS);
313	ASSERT(ttolwp(t) != NULL);
314
315	/* shouldn't be any non-SYSTEM on-CPU time */
316	ASSERT(ms->ms_acct[LMS_USER] == 0);
317	ASSERT(ms->ms_acct[LMS_TRAP] == 0);
318
319	mstate = t->t_mstate;
320	waitrq = t->t_waitrq;
321	state_start = ms->ms_state_start;
322
323	aggr_onp = ms->ms_acct[LMS_SYSTEM];
324	aggr_run = ms->ms_acct[LMS_WAIT_CPU];
325
326	now = gethrtime_unscaled();
327
328	/* if waitrq == 0, then there is no time to account to TS_RUN */
329	if (waitrq == 0)
330		waitrq = now;
331
332	/* If there is system time to accumulate, do so */
333	if (mstate == LMS_SYSTEM && state_start < waitrq)
334		aggr_onp += waitrq - state_start;
335
336	if (waitrq < now)
337		aggr_run += now - waitrq;
338
339	scalehrtime(&aggr_onp);
340	scalehrtime(&aggr_run);
341
342	*onproc = aggr_onp;
343	*runnable = aggr_run;
344}
345
346/*
347 * Return an aggregation of microstate times in scaled nanoseconds (high-res
348 * time).  This keeps in mind that p_acct is already scaled, and ms_acct is
349 * not.
350 */
351hrtime_t
352mstate_aggr_state(proc_t *p, int a_state)
353{
354	struct mstate *ms;
355	kthread_t *t;
356	klwp_t *lwp;
357	hrtime_t aggr_time;
358	hrtime_t scaledtime;
359
360	ASSERT(MUTEX_HELD(&p->p_lock));
361	ASSERT((unsigned)a_state < NMSTATES);
362
363	aggr_time = p->p_acct[a_state];
364	if (a_state == LMS_SYSTEM)
365		aggr_time += p->p_acct[LMS_TRAP];
366
367	t = p->p_tlist;
368	if (t == NULL)
369		return (aggr_time);
370
371	do {
372		if (t->t_proc_flag & TP_LWPEXIT)
373			continue;
374
375		lwp = ttolwp(t);
376		ms = &lwp->lwp_mstate;
377		scaledtime = ms->ms_acct[a_state];
378		scalehrtime(&scaledtime);
379		aggr_time += scaledtime;
380		if (a_state == LMS_SYSTEM) {
381			scaledtime = ms->ms_acct[LMS_TRAP];
382			scalehrtime(&scaledtime);
383			aggr_time += scaledtime;
384		}
385	} while ((t = t->t_forw) != p->p_tlist);
386
387	return (aggr_time);
388}
389
390
391void
392syscall_mstate(int fromms, int toms)
393{
394	kthread_t *t = curthread;
395	zone_t *z = ttozone(t);
396	struct mstate *ms;
397	hrtime_t *mstimep;
398	hrtime_t curtime;
399	klwp_t *lwp;
400	hrtime_t newtime;
401	cpu_t *cpu;
402	uint16_t gen;
403
404	if ((lwp = ttolwp(t)) == NULL)
405		return;
406
407	ASSERT(fromms < NMSTATES);
408	ASSERT(toms < NMSTATES);
409
410	ms = &lwp->lwp_mstate;
411	mstimep = &ms->ms_acct[fromms];
412	curtime = gethrtime_unscaled();
413	newtime = curtime - ms->ms_state_start;
414	while (newtime < 0) {
415		curtime = gethrtime_unscaled();
416		newtime = curtime - ms->ms_state_start;
417	}
418	*mstimep += newtime;
419	if (fromms == LMS_USER)
420		atomic_add_64(&z->zone_utime, newtime);
421	else if (fromms == LMS_SYSTEM)
422		atomic_add_64(&z->zone_stime, newtime);
423	t->t_mstate = toms;
424	ms->ms_state_start = curtime;
425	ms->ms_prev = fromms;
426	kpreempt_disable(); /* don't change CPU while changing CPU's state */
427	cpu = CPU;
428	ASSERT(cpu == t->t_cpu);
429	if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) {
430		NEW_CPU_MSTATE(CMS_SYSTEM);
431	} else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) {
432		NEW_CPU_MSTATE(CMS_USER);
433	}
434	kpreempt_enable();
435}
436
437#undef NEW_CPU_MSTATE
438
439/*
440 * The following is for computing the percentage of cpu time used recently
441 * by an lwp.  The function cpu_decay() is also called from /proc code.
442 *
443 * exp_x(x):
444 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude,
445 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1].
446 *
447 * Scaling for 64-bit scaled integer:
448 * The binary point is to the right of the high-order bit
449 * of the low-order 32-bit word.
450 */
451
452#define	LSHIFT	31
453#define	LSI_ONE	((uint32_t)1 << LSHIFT)	/* 32-bit scaled integer 1 */
454
455#ifdef DEBUG
456uint_t expx_cnt = 0;	/* number of calls to exp_x() */
457uint_t expx_mul = 0;	/* number of long multiplies in exp_x() */
458#endif
459
460static uint64_t
461exp_x(uint64_t x)
462{
463	int i;
464	uint64_t ull;
465	uint32_t ui;
466
467#ifdef DEBUG
468	expx_cnt++;
469#endif
470	/*
471	 * By the formula:
472	 *	exp(-x) = exp(-x/2) * exp(-x/2)
473	 * we keep halving x until it becomes small enough for
474	 * the following approximation to be accurate enough:
475	 *	exp(-x) = 1 - x
476	 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below).
477	 * Our final error will be smaller than 4% .
478	 */
479
480	/*
481	 * Use a uint64_t for the initial shift calculation.
482	 */
483	ull = x >> (LSHIFT-2);
484
485	/*
486	 * Short circuit:
487	 * A number this large produces effectively 0 (actually .005).
488	 * This way, we will never do more than 5 multiplies.
489	 */
490	if (ull >= (1 << 5))
491		return (0);
492
493	ui = ull;	/* OK.  Now we can use a uint_t. */
494	for (i = 0; ui != 0; i++)
495		ui >>= 1;
496
497	if (i != 0) {
498#ifdef DEBUG
499		expx_mul += i;	/* seldom happens */
500#endif
501		x >>= i;
502	}
503
504	/*
505	 * Now we compute 1 - x and square it the number of times
506	 * that we halved x above to produce the final result:
507	 */
508	x = LSI_ONE - x;
509	while (i--)
510		x = (x * x) >> LSHIFT;
511
512	return (x);
513}
514
515/*
516 * Given the old percent cpu and a time delta in nanoseconds,
517 * return the new decayed percent cpu:  pct * exp(-tau),
518 * where 'tau' is the time delta multiplied by a decay factor.
519 * We have chosen the decay factor (cpu_decay_factor in param.c)
520 * to make the decay over five seconds be approximately 20%.
521 *
522 * 'pct' is a 32-bit scaled integer <= 1
523 * The binary point is to the right of the high-order bit
524 * of the 32-bit word.
525 */
526static uint32_t
527cpu_decay(uint32_t pct, hrtime_t nsec)
528{
529	uint64_t delta = (uint64_t)nsec;
530
531	delta /= cpu_decay_factor;
532	return ((pct * exp_x(delta)) >> LSHIFT);
533}
534
535/*
536 * Given the old percent cpu and a time delta in nanoseconds,
537 * return the new grown percent cpu:  1 - ( 1 - pct ) * exp(-tau)
538 */
539static uint32_t
540cpu_grow(uint32_t pct, hrtime_t nsec)
541{
542	return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec));
543}
544
545
546/*
547 * Defined to determine whether a lwp is still on a processor.
548 */
549
550#define	T_ONPROC(kt)	\
551	((kt)->t_mstate < LMS_SLEEP)
552#define	T_OFFPROC(kt)	\
553	((kt)->t_mstate >= LMS_SLEEP)
554
555uint_t
556cpu_update_pct(kthread_t *t, hrtime_t newtime)
557{
558	hrtime_t delta;
559	hrtime_t hrlb;
560	uint_t pctcpu;
561	uint_t npctcpu;
562
563	/*
564	 * This routine can get called at PIL > 0, this *has* to be
565	 * done atomically. Holding locks here causes bad things to happen.
566	 * (read: deadlock).
567	 */
568
569	do {
570		pctcpu = t->t_pctcpu;
571		hrlb = t->t_hrtime;
572		delta = newtime - hrlb;
573		if (delta < 0) {
574			newtime = gethrtime_unscaled();
575			delta = newtime - hrlb;
576		}
577		t->t_hrtime = newtime;
578		scalehrtime(&delta);
579		if (T_ONPROC(t) && t->t_waitrq == 0) {
580			npctcpu = cpu_grow(pctcpu, delta);
581		} else {
582			npctcpu = cpu_decay(pctcpu, delta);
583		}
584	} while (atomic_cas_32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu);
585
586	return (npctcpu);
587}
588
589/*
590 * Change the microstate level for the LWP and update the
591 * associated accounting information.  Return the previous
592 * LWP state.
593 */
594int
595new_mstate(kthread_t *t, int new_state)
596{
597	struct mstate *ms;
598	unsigned state;
599	hrtime_t *mstimep;
600	hrtime_t curtime;
601	hrtime_t newtime;
602	hrtime_t oldtime;
603	hrtime_t ztime;
604	hrtime_t origstart;
605	klwp_t *lwp;
606	zone_t *z;
607
608	ASSERT(new_state != LMS_WAIT_CPU);
609	ASSERT((unsigned)new_state < NMSTATES);
610	ASSERT(t == curthread || THREAD_LOCK_HELD(t));
611
612	/*
613	 * Don't do microstate processing for threads without a lwp (kernel
614	 * threads).  Also, if we're an interrupt thread that is pinning another
615	 * thread, our t_mstate hasn't been initialized.  We'd be modifying the
616	 * microstate of the underlying lwp which doesn't realize that it's
617	 * pinned.  In this case, also don't change the microstate.
618	 */
619	if (((lwp = ttolwp(t)) == NULL) || t->t_intr)
620		return (LMS_SYSTEM);
621
622	curtime = gethrtime_unscaled();
623
624	/* adjust cpu percentages before we go any further */
625	(void) cpu_update_pct(t, curtime);
626
627	ms = &lwp->lwp_mstate;
628	state = t->t_mstate;
629	origstart = ms->ms_state_start;
630	do {
631		switch (state) {
632		case LMS_TFAULT:
633		case LMS_DFAULT:
634		case LMS_KFAULT:
635		case LMS_USER_LOCK:
636			mstimep = &ms->ms_acct[LMS_SYSTEM];
637			break;
638		default:
639			mstimep = &ms->ms_acct[state];
640			break;
641		}
642		ztime = newtime = curtime - ms->ms_state_start;
643		if (newtime < 0) {
644			curtime = gethrtime_unscaled();
645			oldtime = *mstimep - 1; /* force CAS to fail */
646			continue;
647		}
648		oldtime = *mstimep;
649		newtime += oldtime;
650		t->t_mstate = new_state;
651		ms->ms_state_start = curtime;
652	} while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) !=
653	    oldtime);
654
655	/*
656	 * When the system boots the initial startup thread will have a
657	 * ms_state_start of 0 which would add a huge system time to the global
658	 * zone.  We want to skip aggregating that initial bit of work.
659	 */
660	if (origstart != 0) {
661		z = ttozone(t);
662		if (state == LMS_USER)
663			atomic_add_64(&z->zone_utime, ztime);
664		else if (state == LMS_SYSTEM)
665			atomic_add_64(&z->zone_stime, ztime);
666	}
667
668	/*
669	 * Remember the previous running microstate.
670	 */
671	if (state != LMS_SLEEP && state != LMS_STOPPED)
672		ms->ms_prev = state;
673
674	/*
675	 * Switch CPU microstate if appropriate
676	 */
677
678	kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */
679	ASSERT(t->t_cpu == CPU);
680	if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) {
681		if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER)
682			new_cpu_mstate(CMS_USER, curtime);
683		else if (new_state != LMS_USER &&
684		    t->t_cpu->cpu_mstate != CMS_SYSTEM)
685			new_cpu_mstate(CMS_SYSTEM, curtime);
686	}
687	kpreempt_enable();
688
689	return (ms->ms_prev);
690}
691
692/*
693 * Restore the LWP microstate to the previous runnable state.
694 * Called from disp() with the newly selected lwp.
695 */
696void
697restore_mstate(kthread_t *t)
698{
699	struct mstate *ms;
700	hrtime_t *mstimep;
701	klwp_t *lwp;
702	hrtime_t curtime;
703	hrtime_t waitrq;
704	hrtime_t newtime;
705	hrtime_t oldtime;
706	hrtime_t waittime;
707	zone_t *z;
708
709	/*
710	 * Don't call restore mstate of threads without lwps.  (Kernel threads)
711	 *
712	 * threads with t_intr set shouldn't be in the dispatcher, so assert
713	 * that nobody here has t_intr.
714	 */
715	ASSERT(t->t_intr == NULL);
716
717	if ((lwp = ttolwp(t)) == NULL)
718		return;
719
720	curtime = gethrtime_unscaled();
721	(void) cpu_update_pct(t, curtime);
722	ms = &lwp->lwp_mstate;
723	ASSERT((unsigned)t->t_mstate < NMSTATES);
724	do {
725		switch (t->t_mstate) {
726		case LMS_SLEEP:
727			/*
728			 * Update the timer for the current sleep state.
729			 */
730			ASSERT((unsigned)ms->ms_prev < NMSTATES);
731			switch (ms->ms_prev) {
732			case LMS_TFAULT:
733			case LMS_DFAULT:
734			case LMS_KFAULT:
735			case LMS_USER_LOCK:
736				mstimep = &ms->ms_acct[ms->ms_prev];
737				break;
738			default:
739				mstimep = &ms->ms_acct[LMS_SLEEP];
740				break;
741			}
742			/*
743			 * Return to the previous run state.
744			 */
745			t->t_mstate = ms->ms_prev;
746			break;
747		case LMS_STOPPED:
748			mstimep = &ms->ms_acct[LMS_STOPPED];
749			/*
750			 * Return to the previous run state.
751			 */
752			t->t_mstate = ms->ms_prev;
753			break;
754		case LMS_TFAULT:
755		case LMS_DFAULT:
756		case LMS_KFAULT:
757		case LMS_USER_LOCK:
758			mstimep = &ms->ms_acct[LMS_SYSTEM];
759			break;
760		default:
761			mstimep = &ms->ms_acct[t->t_mstate];
762			break;
763		}
764		waitrq = t->t_waitrq;	/* hopefully atomic */
765		if (waitrq == 0) {
766			waitrq = curtime;
767		}
768		t->t_waitrq = 0;
769		newtime = waitrq - ms->ms_state_start;
770		if (newtime < 0) {
771			curtime = gethrtime_unscaled();
772			oldtime = *mstimep - 1; /* force CAS to fail */
773			continue;
774		}
775		oldtime = *mstimep;
776		newtime += oldtime;
777	} while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) !=
778	    oldtime);
779
780	/*
781	 * Update the WAIT_CPU timer and per-cpu waitrq total.
782	 */
783	z = ttozone(t);
784	waittime = curtime - waitrq;
785	ms->ms_acct[LMS_WAIT_CPU] += waittime;
786	atomic_add_64(&z->zone_wtime, waittime);
787	CPU->cpu_waitrq += waittime;
788	ms->ms_state_start = curtime;
789}
790
791/*
792 * Copy lwp microstate accounting and resource usage information
793 * to the process.  (lwp is terminating)
794 */
795void
796term_mstate(kthread_t *t)
797{
798	struct mstate *ms;
799	proc_t *p = ttoproc(t);
800	klwp_t *lwp = ttolwp(t);
801	int i;
802	hrtime_t tmp;
803
804	ASSERT(MUTEX_HELD(&p->p_lock));
805
806	ms = &lwp->lwp_mstate;
807	(void) new_mstate(t, LMS_STOPPED);
808	ms->ms_term = ms->ms_state_start;
809	tmp = ms->ms_term - ms->ms_start;
810	scalehrtime(&tmp);
811	p->p_mlreal += tmp;
812	for (i = 0; i < NMSTATES; i++) {
813		tmp = ms->ms_acct[i];
814		scalehrtime(&tmp);
815		p->p_acct[i] += tmp;
816	}
817	p->p_ru.minflt   += lwp->lwp_ru.minflt;
818	p->p_ru.majflt   += lwp->lwp_ru.majflt;
819	p->p_ru.nswap    += lwp->lwp_ru.nswap;
820	p->p_ru.inblock  += lwp->lwp_ru.inblock;
821	p->p_ru.oublock  += lwp->lwp_ru.oublock;
822	p->p_ru.msgsnd   += lwp->lwp_ru.msgsnd;
823	p->p_ru.msgrcv   += lwp->lwp_ru.msgrcv;
824	p->p_ru.nsignals += lwp->lwp_ru.nsignals;
825	p->p_ru.nvcsw    += lwp->lwp_ru.nvcsw;
826	p->p_ru.nivcsw   += lwp->lwp_ru.nivcsw;
827	p->p_ru.sysc	 += lwp->lwp_ru.sysc;
828	p->p_ru.ioch	 += lwp->lwp_ru.ioch;
829	p->p_defunct++;
830}
831