1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/types.h>
27#include <sys/param.h>
28#include <sys/systm.h>
29#include <sys/user.h>
30#include <sys/proc.h>
31#include <sys/cpuvar.h>
32#include <sys/thread.h>
33#include <sys/debug.h>
34#include <sys/msacct.h>
35#include <sys/time.h>
36
37/*
38 * Mega-theory block comment:
39 *
40 * Microstate accounting uses finite states and the transitions between these
41 * states to measure timing and accounting information.  The state information
42 * is presently tracked for threads (via microstate accounting) and cpus (via
43 * cpu microstate accounting).  In each case, these accounting mechanisms use
44 * states and transitions to measure time spent in each state instead of
45 * clock-based sampling methodologies.
46 *
47 * For microstate accounting:
48 * state transitions are accomplished by calling new_mstate() to switch between
49 * states.  Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur
50 * by calling restore_mstate() which restores a thread to its previously running
51 * state.  This code is primarialy executed by the dispatcher in disp() before
52 * running a process that was put to sleep.  If the thread was not in a sleeping
53 * state, this call has little effect other than to update the count of time the
54 * thread has spent waiting on run-queues in its lifetime.
55 *
56 * For cpu microstate accounting:
57 * Cpu microstate accounting is similar to the microstate accounting for threads
58 * but it tracks user, system, and idle time for cpus.  Cpu microstate
59 * accounting does not track interrupt times as there is a pre-existing
60 * interrupt accounting mechanism for this purpose.  Cpu microstate accounting
61 * tracks time that user threads have spent active, idle, or in the system on a
62 * given cpu.  Cpu microstate accounting has fewer states which allows it to
63 * have better defined transitions.  The states transition in the following
64 * order:
65 *
66 *  CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE
67 *
68 * In order to get to the idle state, the cpu microstate must first go through
69 * the system state, and vice-versa for the user state from idle.  The switching
70 * of the microstates from user to system is done as part of the regular thread
71 * microstate accounting code, except for the idle state which is switched by
72 * the dispatcher before it runs the idle loop.
73 *
74 * Cpu percentages:
75 * Cpu percentages are now handled by and based upon microstate accounting
76 * information (the same is true for load averages).  The routines which handle
77 * the growing/shrinking and exponentiation of cpu percentages have been moved
78 * here as it now makes more sense for them to be generated from the microstate
79 * code.  Cpu percentages are generated similarly to the way they were before;
80 * however, now they are based upon high-resolution timestamps and the
81 * timestamps are modified at various state changes instead of during a clock()
82 * interrupt.  This allows us to generate more accurate cpu percentages which
83 * are also in-sync with microstate data.
84 */
85
86/*
87 * Initialize the microstate level and the
88 * associated accounting information for an LWP.
89 */
90void
91init_mstate(
92	kthread_t	*t,
93	int		init_state)
94{
95	struct mstate *ms;
96	klwp_t *lwp;
97	hrtime_t curtime;
98
99	ASSERT(init_state != LMS_WAIT_CPU);
100	ASSERT((unsigned)init_state < NMSTATES);
101
102	if ((lwp = ttolwp(t)) != NULL) {
103		ms = &lwp->lwp_mstate;
104		curtime = gethrtime_unscaled();
105		ms->ms_prev = LMS_SYSTEM;
106		ms->ms_start = curtime;
107		ms->ms_term = 0;
108		ms->ms_state_start = curtime;
109		t->t_mstate = init_state;
110		t->t_waitrq = 0;
111		t->t_hrtime = curtime;
112		if ((t->t_proc_flag & TP_MSACCT) == 0)
113			t->t_proc_flag |= TP_MSACCT;
114		bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct));
115	}
116}
117
118/*
119 * Initialize the microstate level and associated accounting information
120 * for the specified cpu
121 */
122
123void
124init_cpu_mstate(
125	cpu_t *cpu,
126	int init_state)
127{
128	ASSERT(init_state != CMS_DISABLED);
129
130	cpu->cpu_mstate = init_state;
131	cpu->cpu_mstate_start = gethrtime_unscaled();
132	cpu->cpu_waitrq = 0;
133	bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct));
134}
135
136/*
137 * sets cpu state to OFFLINE.  We don't actually track this time,
138 * but it serves as a useful placeholder state for when we're not
139 * doing anything.
140 */
141
142void
143term_cpu_mstate(struct cpu *cpu)
144{
145	ASSERT(cpu->cpu_mstate != CMS_DISABLED);
146	cpu->cpu_mstate = CMS_DISABLED;
147	cpu->cpu_mstate_start = 0;
148}
149
150/* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */
151
152#define	NEW_CPU_MSTATE(state)						\
153	gen = cpu->cpu_mstate_gen;					\
154	cpu->cpu_mstate_gen = 0;					\
155	/* Need membar_producer() here if stores not ordered / TSO */	\
156	cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \
157	cpu->cpu_mstate = state;					\
158	cpu->cpu_mstate_start = curtime;				\
159	/* Need membar_producer() here if stores not ordered / TSO */	\
160	cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen;
161
162void
163new_cpu_mstate(int cmstate, hrtime_t curtime)
164{
165	cpu_t *cpu = CPU;
166	uint16_t gen;
167
168	ASSERT(cpu->cpu_mstate != CMS_DISABLED);
169	ASSERT(cmstate < NCMSTATES);
170	ASSERT(cmstate != CMS_DISABLED);
171
172	/*
173	 * This function cannot be re-entrant on a given CPU. As such,
174	 * we ASSERT and panic if we are called on behalf of an interrupt.
175	 * The one exception is for an interrupt which has previously
176	 * blocked. Such an interrupt is being scheduled by the dispatcher
177	 * just like a normal thread, and as such cannot arrive here
178	 * in a re-entrant manner.
179	 */
180
181	ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL);
182	ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread);
183
184	/*
185	 * LOCKING, or lack thereof:
186	 *
187	 * Updates to CPU mstate can only be made by the CPU
188	 * itself, and the above check to ignore interrupts
189	 * should prevent recursion into this function on a given
190	 * processor. i.e. no possible write contention.
191	 *
192	 * However, reads of CPU mstate can occur at any time
193	 * from any CPU. Any locking added to this code path
194	 * would seriously impact syscall performance. So,
195	 * instead we have a best-effort protection for readers.
196	 * The reader will want to account for any time between
197	 * cpu_mstate_start and the present time. This requires
198	 * some guarantees that the reader is getting coherent
199	 * information.
200	 *
201	 * We use a generation counter, which is set to 0 before
202	 * we start making changes, and is set to a new value
203	 * after we're done. Someone reading the CPU mstate
204	 * should check for the same non-zero value of this
205	 * counter both before and after reading all state. The
206	 * important point is that the reader is not a
207	 * performance-critical path, but this function is.
208	 *
209	 * The ordering of writes is critical. cpu_mstate_gen must
210	 * be visibly zero on all CPUs before we change cpu_mstate
211	 * and cpu_mstate_start. Additionally, cpu_mstate_gen must
212	 * not be restored to oldgen+1 until after all of the other
213	 * writes have become visible.
214	 *
215	 * Normally one puts membar_producer() calls to accomplish
216	 * this. Unfortunately this routine is extremely performance
217	 * critical (esp. in syscall_mstate below) and we cannot
218	 * afford the additional time, particularly on some x86
219	 * architectures with extremely slow sfence calls. On a
220	 * CPU which guarantees write ordering (including sparc, x86,
221	 * and amd64) this is not a problem. The compiler could still
222	 * reorder the writes, so we make the four cpu fields
223	 * volatile to prevent this.
224	 *
225	 * TSO warning: should we port to a non-TSO (or equivalent)
226	 * CPU, this will break.
227	 *
228	 * The reader stills needs the membar_consumer() calls because,
229	 * although the volatiles prevent the compiler from reordering
230	 * loads, the CPU can still do so.
231	 */
232
233	NEW_CPU_MSTATE(cmstate);
234}
235
236/*
237 * Return an aggregation of user and system CPU time consumed by
238 * the specified thread in scaled nanoseconds.
239 */
240hrtime_t
241mstate_thread_onproc_time(kthread_t *t)
242{
243	hrtime_t aggr_time;
244	hrtime_t now;
245	hrtime_t waitrq;
246	hrtime_t state_start;
247	struct mstate *ms;
248	klwp_t *lwp;
249	int	mstate;
250
251	ASSERT(THREAD_LOCK_HELD(t));
252
253	if ((lwp = ttolwp(t)) == NULL)
254		return (0);
255
256	mstate = t->t_mstate;
257	waitrq = t->t_waitrq;
258	ms = &lwp->lwp_mstate;
259	state_start = ms->ms_state_start;
260
261	aggr_time = ms->ms_acct[LMS_USER] +
262	    ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];
263
264	now = gethrtime_unscaled();
265
266	/*
267	 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
268	 * inconsistent, so it is possible that now < state_start.
269	 */
270	if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) {
271		/* if waitrq is zero, count all of the time. */
272		if (waitrq == 0) {
273			waitrq = now;
274		}
275
276		if (waitrq > state_start) {
277			aggr_time += waitrq - state_start;
278		}
279	}
280
281	scalehrtime(&aggr_time);
282	return (aggr_time);
283}
284
285/*
286 * Return the amount of onproc and runnable time this thread has experienced.
287 *
288 * Because the fields we read are not protected by locks when updated
289 * by the thread itself, this is an inherently racey interface.  In
290 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
291 * as it might appear to.
292 *
293 * The implication for users of this interface is that onproc and runnable
294 * are *NOT* monotonically increasing; they may temporarily be larger than
295 * they should be.
296 */
297void
298mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable)
299{
300	struct mstate	*const	ms = &ttolwp(t)->lwp_mstate;
301
302	int		mstate;
303	hrtime_t	now;
304	hrtime_t	state_start;
305	hrtime_t	waitrq;
306	hrtime_t	aggr_onp;
307	hrtime_t	aggr_run;
308
309	ASSERT(THREAD_LOCK_HELD(t));
310	ASSERT(t->t_procp->p_flag & SSYS);
311	ASSERT(ttolwp(t) != NULL);
312
313	/* shouldn't be any non-SYSTEM on-CPU time */
314	ASSERT(ms->ms_acct[LMS_USER] == 0);
315	ASSERT(ms->ms_acct[LMS_TRAP] == 0);
316
317	mstate = t->t_mstate;
318	waitrq = t->t_waitrq;
319	state_start = ms->ms_state_start;
320
321	aggr_onp = ms->ms_acct[LMS_SYSTEM];
322	aggr_run = ms->ms_acct[LMS_WAIT_CPU];
323
324	now = gethrtime_unscaled();
325
326	/* if waitrq == 0, then there is no time to account to TS_RUN */
327	if (waitrq == 0)
328		waitrq = now;
329
330	/* If there is system time to accumulate, do so */
331	if (mstate == LMS_SYSTEM && state_start < waitrq)
332		aggr_onp += waitrq - state_start;
333
334	if (waitrq < now)
335		aggr_run += now - waitrq;
336
337	scalehrtime(&aggr_onp);
338	scalehrtime(&aggr_run);
339
340	*onproc = aggr_onp;
341	*runnable = aggr_run;
342}
343
344/*
345 * Return an aggregation of microstate times in scaled nanoseconds (high-res
346 * time).  This keeps in mind that p_acct is already scaled, and ms_acct is
347 * not.
348 */
349hrtime_t
350mstate_aggr_state(proc_t *p, int a_state)
351{
352	struct mstate *ms;
353	kthread_t *t;
354	klwp_t *lwp;
355	hrtime_t aggr_time;
356	hrtime_t scaledtime;
357
358	ASSERT(MUTEX_HELD(&p->p_lock));
359	ASSERT((unsigned)a_state < NMSTATES);
360
361	aggr_time = p->p_acct[a_state];
362	if (a_state == LMS_SYSTEM)
363		aggr_time += p->p_acct[LMS_TRAP];
364
365	t = p->p_tlist;
366	if (t == NULL)
367		return (aggr_time);
368
369	do {
370		if (t->t_proc_flag & TP_LWPEXIT)
371			continue;
372
373		lwp = ttolwp(t);
374		ms = &lwp->lwp_mstate;
375		scaledtime = ms->ms_acct[a_state];
376		scalehrtime(&scaledtime);
377		aggr_time += scaledtime;
378		if (a_state == LMS_SYSTEM) {
379			scaledtime = ms->ms_acct[LMS_TRAP];
380			scalehrtime(&scaledtime);
381			aggr_time += scaledtime;
382		}
383	} while ((t = t->t_forw) != p->p_tlist);
384
385	return (aggr_time);
386}
387
388
389void
390syscall_mstate(int fromms, int toms)
391{
392	kthread_t *t = curthread;
393	struct mstate *ms;
394	hrtime_t *mstimep;
395	hrtime_t curtime;
396	klwp_t *lwp;
397	hrtime_t newtime;
398	cpu_t *cpu;
399	uint16_t gen;
400
401	if ((lwp = ttolwp(t)) == NULL)
402		return;
403
404	ASSERT(fromms < NMSTATES);
405	ASSERT(toms < NMSTATES);
406
407	ms = &lwp->lwp_mstate;
408	mstimep = &ms->ms_acct[fromms];
409	curtime = gethrtime_unscaled();
410	newtime = curtime - ms->ms_state_start;
411	while (newtime < 0) {
412		curtime = gethrtime_unscaled();
413		newtime = curtime - ms->ms_state_start;
414	}
415	*mstimep += newtime;
416	t->t_mstate = toms;
417	ms->ms_state_start = curtime;
418	ms->ms_prev = fromms;
419	kpreempt_disable(); /* don't change CPU while changing CPU's state */
420	cpu = CPU;
421	ASSERT(cpu == t->t_cpu);
422	if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) {
423		NEW_CPU_MSTATE(CMS_SYSTEM);
424	} else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) {
425		NEW_CPU_MSTATE(CMS_USER);
426	}
427	kpreempt_enable();
428}
429
430#undef NEW_CPU_MSTATE
431
432/*
433 * The following is for computing the percentage of cpu time used recently
434 * by an lwp.  The function cpu_decay() is also called from /proc code.
435 *
436 * exp_x(x):
437 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude,
438 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1].
439 *
440 * Scaling for 64-bit scaled integer:
441 * The binary point is to the right of the high-order bit
442 * of the low-order 32-bit word.
443 */
444
445#define	LSHIFT	31
446#define	LSI_ONE	((uint32_t)1 << LSHIFT)	/* 32-bit scaled integer 1 */
447
448#ifdef DEBUG
449uint_t expx_cnt = 0;	/* number of calls to exp_x() */
450uint_t expx_mul = 0;	/* number of long multiplies in exp_x() */
451#endif
452
453static uint64_t
454exp_x(uint64_t x)
455{
456	int i;
457	uint64_t ull;
458	uint32_t ui;
459
460#ifdef DEBUG
461	expx_cnt++;
462#endif
463	/*
464	 * By the formula:
465	 *	exp(-x) = exp(-x/2) * exp(-x/2)
466	 * we keep halving x until it becomes small enough for
467	 * the following approximation to be accurate enough:
468	 *	exp(-x) = 1 - x
469	 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below).
470	 * Our final error will be smaller than 4% .
471	 */
472
473	/*
474	 * Use a uint64_t for the initial shift calculation.
475	 */
476	ull = x >> (LSHIFT-2);
477
478	/*
479	 * Short circuit:
480	 * A number this large produces effectively 0 (actually .005).
481	 * This way, we will never do more than 5 multiplies.
482	 */
483	if (ull >= (1 << 5))
484		return (0);
485
486	ui = ull;	/* OK.  Now we can use a uint_t. */
487	for (i = 0; ui != 0; i++)
488		ui >>= 1;
489
490	if (i != 0) {
491#ifdef DEBUG
492		expx_mul += i;	/* seldom happens */
493#endif
494		x >>= i;
495	}
496
497	/*
498	 * Now we compute 1 - x and square it the number of times
499	 * that we halved x above to produce the final result:
500	 */
501	x = LSI_ONE - x;
502	while (i--)
503		x = (x * x) >> LSHIFT;
504
505	return (x);
506}
507
508/*
509 * Given the old percent cpu and a time delta in nanoseconds,
510 * return the new decayed percent cpu:  pct * exp(-tau),
511 * where 'tau' is the time delta multiplied by a decay factor.
512 * We have chosen the decay factor (cpu_decay_factor in param.c)
513 * to make the decay over five seconds be approximately 20%.
514 *
515 * 'pct' is a 32-bit scaled integer <= 1
516 * The binary point is to the right of the high-order bit
517 * of the 32-bit word.
518 */
519static uint32_t
520cpu_decay(uint32_t pct, hrtime_t nsec)
521{
522	uint64_t delta = (uint64_t)nsec;
523
524	delta /= cpu_decay_factor;
525	return ((pct * exp_x(delta)) >> LSHIFT);
526}
527
528/*
529 * Given the old percent cpu and a time delta in nanoseconds,
530 * return the new grown percent cpu:  1 - ( 1 - pct ) * exp(-tau)
531 */
532static uint32_t
533cpu_grow(uint32_t pct, hrtime_t nsec)
534{
535	return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec));
536}
537
538
539/*
540 * Defined to determine whether a lwp is still on a processor.
541 */
542
543#define	T_ONPROC(kt)	\
544	((kt)->t_mstate < LMS_SLEEP)
545#define	T_OFFPROC(kt)	\
546	((kt)->t_mstate >= LMS_SLEEP)
547
548uint_t
549cpu_update_pct(kthread_t *t, hrtime_t newtime)
550{
551	hrtime_t delta;
552	hrtime_t hrlb;
553	uint_t pctcpu;
554	uint_t npctcpu;
555
556	/*
557	 * This routine can get called at PIL > 0, this *has* to be
558	 * done atomically. Holding locks here causes bad things to happen.
559	 * (read: deadlock).
560	 */
561
562	do {
563		if (T_ONPROC(t) && t->t_waitrq == 0) {
564			hrlb = t->t_hrtime;
565			delta = newtime - hrlb;
566			if (delta < 0) {
567				newtime = gethrtime_unscaled();
568				delta = newtime - hrlb;
569			}
570			t->t_hrtime = newtime;
571			scalehrtime(&delta);
572			pctcpu = t->t_pctcpu;
573			npctcpu = cpu_grow(pctcpu, delta);
574		} else {
575			hrlb = t->t_hrtime;
576			delta = newtime - hrlb;
577			if (delta < 0) {
578				newtime = gethrtime_unscaled();
579				delta = newtime - hrlb;
580			}
581			t->t_hrtime = newtime;
582			scalehrtime(&delta);
583			pctcpu = t->t_pctcpu;
584			npctcpu = cpu_decay(pctcpu, delta);
585		}
586	} while (cas32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu);
587
588	return (npctcpu);
589}
590
591/*
592 * Change the microstate level for the LWP and update the
593 * associated accounting information.  Return the previous
594 * LWP state.
595 */
596int
597new_mstate(kthread_t *t, int new_state)
598{
599	struct mstate *ms;
600	unsigned state;
601	hrtime_t *mstimep;
602	hrtime_t curtime;
603	hrtime_t newtime;
604	hrtime_t oldtime;
605	klwp_t *lwp;
606
607	ASSERT(new_state != LMS_WAIT_CPU);
608	ASSERT((unsigned)new_state < NMSTATES);
609	ASSERT(t == curthread || THREAD_LOCK_HELD(t));
610
611	/*
612	 * Don't do microstate processing for threads without a lwp (kernel
613	 * threads).  Also, if we're an interrupt thread that is pinning another
614	 * thread, our t_mstate hasn't been initialized.  We'd be modifying the
615	 * microstate of the underlying lwp which doesn't realize that it's
616	 * pinned.  In this case, also don't change the microstate.
617	 */
618	if (((lwp = ttolwp(t)) == NULL) || t->t_intr)
619		return (LMS_SYSTEM);
620
621	curtime = gethrtime_unscaled();
622
623	/* adjust cpu percentages before we go any further */
624	(void) cpu_update_pct(t, curtime);
625
626	ms = &lwp->lwp_mstate;
627	state = t->t_mstate;
628	do {
629		switch (state) {
630		case LMS_TFAULT:
631		case LMS_DFAULT:
632		case LMS_KFAULT:
633		case LMS_USER_LOCK:
634			mstimep = &ms->ms_acct[LMS_SYSTEM];
635			break;
636		default:
637			mstimep = &ms->ms_acct[state];
638			break;
639		}
640		newtime = curtime - ms->ms_state_start;
641		if (newtime < 0) {
642			curtime = gethrtime_unscaled();
643			oldtime = *mstimep - 1; /* force CAS to fail */
644			continue;
645		}
646		oldtime = *mstimep;
647		newtime += oldtime;
648		t->t_mstate = new_state;
649		ms->ms_state_start = curtime;
650	} while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime);
651	/*
652	 * Remember the previous running microstate.
653	 */
654	if (state != LMS_SLEEP && state != LMS_STOPPED)
655		ms->ms_prev = state;
656
657	/*
658	 * Switch CPU microstate if appropriate
659	 */
660
661	kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */
662	ASSERT(t->t_cpu == CPU);
663	if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) {
664		if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER)
665			new_cpu_mstate(CMS_USER, curtime);
666		else if (new_state != LMS_USER &&
667		    t->t_cpu->cpu_mstate != CMS_SYSTEM)
668			new_cpu_mstate(CMS_SYSTEM, curtime);
669	}
670	kpreempt_enable();
671
672	return (ms->ms_prev);
673}
674
675/*
676 * Restore the LWP microstate to the previous runnable state.
677 * Called from disp() with the newly selected lwp.
678 */
679void
680restore_mstate(kthread_t *t)
681{
682	struct mstate *ms;
683	hrtime_t *mstimep;
684	klwp_t *lwp;
685	hrtime_t curtime;
686	hrtime_t waitrq;
687	hrtime_t newtime;
688	hrtime_t oldtime;
689
690	/*
691	 * Don't call restore mstate of threads without lwps.  (Kernel threads)
692	 *
693	 * threads with t_intr set shouldn't be in the dispatcher, so assert
694	 * that nobody here has t_intr.
695	 */
696	ASSERT(t->t_intr == NULL);
697
698	if ((lwp = ttolwp(t)) == NULL)
699		return;
700
701	curtime = gethrtime_unscaled();
702	(void) cpu_update_pct(t, curtime);
703	ms = &lwp->lwp_mstate;
704	ASSERT((unsigned)t->t_mstate < NMSTATES);
705	do {
706		switch (t->t_mstate) {
707		case LMS_SLEEP:
708			/*
709			 * Update the timer for the current sleep state.
710			 */
711			ASSERT((unsigned)ms->ms_prev < NMSTATES);
712			switch (ms->ms_prev) {
713			case LMS_TFAULT:
714			case LMS_DFAULT:
715			case LMS_KFAULT:
716			case LMS_USER_LOCK:
717				mstimep = &ms->ms_acct[ms->ms_prev];
718				break;
719			default:
720				mstimep = &ms->ms_acct[LMS_SLEEP];
721				break;
722			}
723			/*
724			 * Return to the previous run state.
725			 */
726			t->t_mstate = ms->ms_prev;
727			break;
728		case LMS_STOPPED:
729			mstimep = &ms->ms_acct[LMS_STOPPED];
730			/*
731			 * Return to the previous run state.
732			 */
733			t->t_mstate = ms->ms_prev;
734			break;
735		case LMS_TFAULT:
736		case LMS_DFAULT:
737		case LMS_KFAULT:
738		case LMS_USER_LOCK:
739			mstimep = &ms->ms_acct[LMS_SYSTEM];
740			break;
741		default:
742			mstimep = &ms->ms_acct[t->t_mstate];
743			break;
744		}
745		waitrq = t->t_waitrq;	/* hopefully atomic */
746		if (waitrq == 0) {
747			waitrq = curtime;
748		}
749		t->t_waitrq = 0;
750		newtime = waitrq - ms->ms_state_start;
751		if (newtime < 0) {
752			curtime = gethrtime_unscaled();
753			oldtime = *mstimep - 1; /* force CAS to fail */
754			continue;
755		}
756		oldtime = *mstimep;
757		newtime += oldtime;
758	} while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime);
759	/*
760	 * Update the WAIT_CPU timer and per-cpu waitrq total.
761	 */
762	ms->ms_acct[LMS_WAIT_CPU] += (curtime - waitrq);
763	CPU->cpu_waitrq += (curtime - waitrq);
764	ms->ms_state_start = curtime;
765}
766
767/*
768 * Copy lwp microstate accounting and resource usage information
769 * to the process.  (lwp is terminating)
770 */
771void
772term_mstate(kthread_t *t)
773{
774	struct mstate *ms;
775	proc_t *p = ttoproc(t);
776	klwp_t *lwp = ttolwp(t);
777	int i;
778	hrtime_t tmp;
779
780	ASSERT(MUTEX_HELD(&p->p_lock));
781
782	ms = &lwp->lwp_mstate;
783	(void) new_mstate(t, LMS_STOPPED);
784	ms->ms_term = ms->ms_state_start;
785	tmp = ms->ms_term - ms->ms_start;
786	scalehrtime(&tmp);
787	p->p_mlreal += tmp;
788	for (i = 0; i < NMSTATES; i++) {
789		tmp = ms->ms_acct[i];
790		scalehrtime(&tmp);
791		p->p_acct[i] += tmp;
792	}
793	p->p_ru.minflt   += lwp->lwp_ru.minflt;
794	p->p_ru.majflt   += lwp->lwp_ru.majflt;
795	p->p_ru.nswap    += lwp->lwp_ru.nswap;
796	p->p_ru.inblock  += lwp->lwp_ru.inblock;
797	p->p_ru.oublock  += lwp->lwp_ru.oublock;
798	p->p_ru.msgsnd   += lwp->lwp_ru.msgsnd;
799	p->p_ru.msgrcv   += lwp->lwp_ru.msgrcv;
800	p->p_ru.nsignals += lwp->lwp_ru.nsignals;
801	p->p_ru.nvcsw    += lwp->lwp_ru.nvcsw;
802	p->p_ru.nivcsw   += lwp->lwp_ru.nivcsw;
803	p->p_ru.sysc	 += lwp->lwp_ru.sysc;
804	p->p_ru.ioch	 += lwp->lwp_ru.ioch;
805	p->p_defunct++;
806}
807