xref: /illumos-gate/usr/src/uts/common/os/lwp.c (revision 088d69f8)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
59acbbeafSnn  * Common Development and Distribution License (the "License").
69acbbeafSnn  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
23061d7437SJakub Jermar  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
27f971a346SBryan Cantrill /*
28f971a346SBryan Cantrill  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29f971a346SBryan Cantrill  */
30f971a346SBryan Cantrill 
317c478bd9Sstevel@tonic-gate #include <sys/param.h>
327c478bd9Sstevel@tonic-gate #include <sys/types.h>
337c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
347c478bd9Sstevel@tonic-gate #include <sys/systm.h>
357c478bd9Sstevel@tonic-gate #include <sys/thread.h>
367c478bd9Sstevel@tonic-gate #include <sys/proc.h>
377c478bd9Sstevel@tonic-gate #include <sys/task.h>
387c478bd9Sstevel@tonic-gate #include <sys/project.h>
397c478bd9Sstevel@tonic-gate #include <sys/signal.h>
407c478bd9Sstevel@tonic-gate #include <sys/errno.h>
417c478bd9Sstevel@tonic-gate #include <sys/vmparam.h>
427c478bd9Sstevel@tonic-gate #include <sys/stack.h>
437c478bd9Sstevel@tonic-gate #include <sys/procfs.h>
447c478bd9Sstevel@tonic-gate #include <sys/prsystm.h>
457c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
467c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
477c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
487c478bd9Sstevel@tonic-gate #include <sys/door.h>
497c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h>
507c478bd9Sstevel@tonic-gate #include <sys/debug.h>
517c478bd9Sstevel@tonic-gate #include <sys/tnf.h>
527c478bd9Sstevel@tonic-gate #include <sys/schedctl.h>
537c478bd9Sstevel@tonic-gate #include <sys/poll.h>
547c478bd9Sstevel@tonic-gate #include <sys/copyops.h>
557c478bd9Sstevel@tonic-gate #include <sys/lwp_upimutex_impl.h>
567c478bd9Sstevel@tonic-gate #include <sys/cpupart.h>
577c478bd9Sstevel@tonic-gate #include <sys/lgrp.h>
587c478bd9Sstevel@tonic-gate #include <sys/rctl.h>
597c478bd9Sstevel@tonic-gate #include <sys/contract_impl.h>
607c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h>
617c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
627c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
639acbbeafSnn #include <sys/brand.h>
64e0cf54a5SRoger A. Faulkner #include <sys/cyclic.h>
65936e3a33SGangadhar Mylapuram #include <sys/pool.h>
676eb30ec3SRoger A. Faulkner /* hash function for the lwpid hash table, p->p_tidhash[] */
686eb30ec3SRoger A. Faulkner #define	TIDHASH(tid, hash_sz)	((tid) & ((hash_sz) - 1))
696eb30ec3SRoger A. Faulkner 
707c478bd9Sstevel@tonic-gate void *segkp_lwp;		/* cookie for pool of segkp resources */
71575a7426Spt extern void reapq_move_lq_to_tq(kthread_t *);
72575a7426Spt extern void freectx_ctx(struct ctxop *);
7435a5a358SJonathan Adams /*
7535a5a358SJonathan Adams  * Create a kernel thread associated with a particular system process.  Give
7635a5a358SJonathan Adams  * it an LWP so that microstate accounting will be available for it.
7735a5a358SJonathan Adams  */
7835a5a358SJonathan Adams kthread_t *
lwp_kernel_create(proc_t * p,void (* proc)(),void * arg,int state,pri_t pri)7935a5a358SJonathan Adams lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri)
8035a5a358SJonathan Adams {
8135a5a358SJonathan Adams 	klwp_t *lwp;
8235a5a358SJonathan Adams 
8335a5a358SJonathan Adams 	VERIFY((p->p_flag & SSYS) != 0);
8435a5a358SJonathan Adams 
8535a5a358SJonathan Adams 	lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0);
8635a5a358SJonathan Adams 
8735a5a358SJonathan Adams 	VERIFY(lwp != NULL);
8835a5a358SJonathan Adams 
8935a5a358SJonathan Adams 	return (lwptot(lwp));
9035a5a358SJonathan Adams }
9135a5a358SJonathan Adams 
927c478bd9Sstevel@tonic-gate /*
937c478bd9Sstevel@tonic-gate  * Create a thread that appears to be stopped at sys_rtt.
947c478bd9Sstevel@tonic-gate  */
957c478bd9Sstevel@tonic-gate klwp_t *
lwp_create(void (* proc)(),caddr_t arg,size_t len,proc_t * p,int state,int pri,const k_sigset_t * smask,int cid,id_t lwpid)967c478bd9Sstevel@tonic-gate lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p,
977c478bd9Sstevel@tonic-gate     int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid)
987c478bd9Sstevel@tonic-gate {
997c478bd9Sstevel@tonic-gate 	klwp_t *lwp = NULL;
1007c478bd9Sstevel@tonic-gate 	kthread_t *t;
1017c478bd9Sstevel@tonic-gate 	kthread_t *tx;
1027c478bd9Sstevel@tonic-gate 	cpupart_t *oldpart = NULL;
1037c478bd9Sstevel@tonic-gate 	size_t	stksize;
1047c478bd9Sstevel@tonic-gate 	caddr_t lwpdata = NULL;
1057c478bd9Sstevel@tonic-gate 	processorid_t	binding;
1067c478bd9Sstevel@tonic-gate 	int err = 0;
1077c478bd9Sstevel@tonic-gate 	kproject_t *oldkpj, *newkpj;
1087c478bd9Sstevel@tonic-gate 	void *bufp = NULL;
10935a5a358SJonathan Adams 	klwp_t *curlwp;
1107c478bd9Sstevel@tonic-gate 	lwpent_t *lep;
1117c478bd9Sstevel@tonic-gate 	lwpdir_t *old_dir = NULL;
1127c478bd9Sstevel@tonic-gate 	uint_t old_dirsz = 0;
1136eb30ec3SRoger A. Faulkner 	tidhash_t *old_hash = NULL;
1147c478bd9Sstevel@tonic-gate 	uint_t old_hashsz = 0;
1156eb30ec3SRoger A. Faulkner 	ret_tidhash_t *ret_tidhash = NULL;
1167c478bd9Sstevel@tonic-gate 	int i;
1177c478bd9Sstevel@tonic-gate 	int rctlfail = 0;
1189acbbeafSnn 	boolean_t branded = 0;
119575a7426Spt 	struct ctxop *ctx = NULL;
12135a5a358SJonathan Adams 	ASSERT(cid != sysdccid);	/* system threads must start in SYS */
12235a5a358SJonathan Adams 
12335a5a358SJonathan Adams 	ASSERT(p != &p0);		/* No new LWPs in p0. */
12435a5a358SJonathan Adams 
1257c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
1267c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_zone->zone_nlwps_lock);
1277c478bd9Sstevel@tonic-gate 	/*
1287c478bd9Sstevel@tonic-gate 	 * don't enforce rctl limits on system processes
1297c478bd9Sstevel@tonic-gate 	 */
13035a5a358SJonathan Adams 	if (!CLASS_KERNEL(cid)) {
1317c478bd9Sstevel@tonic-gate 		if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl)
1327c478bd9Sstevel@tonic-gate 			if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p,
1337c478bd9Sstevel@tonic-gate 			    1, 0) & RCT_DENY)
1347c478bd9Sstevel@tonic-gate 				rctlfail = 1;
1357c478bd9Sstevel@tonic-gate 		if (p->p_task->tk_proj->kpj_nlwps >=
1367c478bd9Sstevel@tonic-gate 		    p->p_task->tk_proj->kpj_nlwps_ctl)
1377c478bd9Sstevel@tonic-gate 			if (rctl_test(rc_project_nlwps,
1387c478bd9Sstevel@tonic-gate 			    p->p_task->tk_proj->kpj_rctls, p, 1, 0)
1397c478bd9Sstevel@tonic-gate 			    & RCT_DENY)
1407c478bd9Sstevel@tonic-gate 				rctlfail = 1;
1417c478bd9Sstevel@tonic-gate 		if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl)
1427c478bd9Sstevel@tonic-gate 			if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p,
1437c478bd9Sstevel@tonic-gate 			    1, 0) & RCT_DENY)
1447c478bd9Sstevel@tonic-gate 				rctlfail = 1;
1457c478bd9Sstevel@tonic-gate 	}
1467c478bd9Sstevel@tonic-gate 	if (rctlfail) {
1477c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_zone->zone_nlwps_lock);
1487c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
1492dc692e0SJerry Jelinek 		atomic_inc_32(&p->p_zone->zone_ffcap);
1507c478bd9Sstevel@tonic-gate 		return (NULL);
1517c478bd9Sstevel@tonic-gate 	}
1527c478bd9Sstevel@tonic-gate 	p->p_task->tk_nlwps++;
1537c478bd9Sstevel@tonic-gate 	p->p_task->tk_proj->kpj_nlwps++;
1547c478bd9Sstevel@tonic-gate 	p->p_zone->zone_nlwps++;
1557c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_zone->zone_nlwps_lock);
1567c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
158d32efdadSJonathan Adams 	curlwp = ttolwp(curthread);
159d32efdadSJonathan Adams 	if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0)
1607c478bd9Sstevel@tonic-gate 		stksize = lwp_default_stksize;
16235a5a358SJonathan Adams 	if (CLASS_KERNEL(cid)) {
163d32efdadSJonathan Adams 		/*
164d32efdadSJonathan Adams 		 * Since we are creating an LWP in an SSYS process, we do not
165d32efdadSJonathan Adams 		 * inherit anything from the current thread's LWP.  We set
166d32efdadSJonathan Adams 		 * stksize and lwpdata to 0 in order to let thread_create()
167d32efdadSJonathan Adams 		 * allocate a regular kernel thread stack for this thread.
168d32efdadSJonathan Adams 		 */
169d32efdadSJonathan Adams 		curlwp = NULL;
170d32efdadSJonathan Adams 		stksize = 0;
171d32efdadSJonathan Adams 		lwpdata = NULL;
17235a5a358SJonathan Adams 
17335a5a358SJonathan Adams 	} else if (stksize == lwp_default_stksize) {
174d32efdadSJonathan Adams 		/*
175d32efdadSJonathan Adams 		 * Try to reuse an <lwp,stack> from the LWP deathrow.
176d32efdadSJonathan Adams 		 */
1777c478bd9Sstevel@tonic-gate 		if (lwp_reapcnt > 0) {
1787c478bd9Sstevel@tonic-gate 			mutex_enter(&reaplock);
1797c478bd9Sstevel@tonic-gate 			if ((t = lwp_deathrow) != NULL) {
1807c478bd9Sstevel@tonic-gate 				ASSERT(t->t_swap);
1817c478bd9Sstevel@tonic-gate 				lwp_deathrow = t->t_forw;
1827c478bd9Sstevel@tonic-gate 				lwp_reapcnt--;
1837c478bd9Sstevel@tonic-gate 				lwpdata = t->t_swap;
1847c478bd9Sstevel@tonic-gate 				lwp = t->t_lwp;
185575a7426Spt 				ctx = t->t_ctx;
1867c478bd9Sstevel@tonic-gate 				t->t_swap = NULL;
1877c478bd9Sstevel@tonic-gate 				t->t_lwp = NULL;
188575a7426Spt 				t->t_ctx = NULL;
189575a7426Spt 				reapq_move_lq_to_tq(t);
190575a7426Spt 			}
191575a7426Spt 			mutex_exit(&reaplock);
192575a7426Spt 			if (lwp != NULL) {
193575a7426Spt 				lwp_stk_fini(lwp);
194575a7426Spt 			}
195575a7426Spt 			if (ctx != NULL) {
196575a7426Spt 				freectx_ctx(ctx);
1977c478bd9Sstevel@tonic-gate 			}
1987c478bd9Sstevel@tonic-gate 		}
1997c478bd9Sstevel@tonic-gate 		if (lwpdata == NULL &&
2007c478bd9Sstevel@tonic-gate 		    (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) {
2017c478bd9Sstevel@tonic-gate 			mutex_enter(&p->p_lock);
2027c478bd9Sstevel@tonic-gate 			mutex_enter(&p->p_zone->zone_nlwps_lock);
2037c478bd9Sstevel@tonic-gate 			p->p_task->tk_nlwps--;
2047c478bd9Sstevel@tonic-gate 			p->p_task->tk_proj->kpj_nlwps--;
2057c478bd9Sstevel@tonic-gate 			p->p_zone->zone_nlwps--;
2067c478bd9Sstevel@tonic-gate 			mutex_exit(&p->p_zone->zone_nlwps_lock);
2077c478bd9Sstevel@tonic-gate 			mutex_exit(&p->p_lock);
2082dc692e0SJerry Jelinek 			atomic_inc_32(&p->p_zone->zone_ffnomem);
2097c478bd9Sstevel@tonic-gate 			return (NULL);
2107c478bd9Sstevel@tonic-gate 		}
2117c478bd9Sstevel@tonic-gate 	} else {
2127c478bd9Sstevel@tonic-gate 		stksize = roundup(stksize, PAGESIZE);
2137c478bd9Sstevel@tonic-gate 		if ((lwpdata = (caddr_t)segkp_get(segkp, stksize,
2147c478bd9Sstevel@tonic-gate 		    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) {
2157c478bd9Sstevel@tonic-gate 			mutex_enter(&p->p_lock);
2167c478bd9Sstevel@tonic-gate 			mutex_enter(&p->p_zone->zone_nlwps_lock);
2177c478bd9Sstevel@tonic-gate 			p->p_task->tk_nlwps--;
2187c478bd9Sstevel@tonic-gate 			p->p_task->tk_proj->kpj_nlwps--;
2197c478bd9Sstevel@tonic-gate 			p->p_zone->zone_nlwps--;
2207c478bd9Sstevel@tonic-gate 			mutex_exit(&p->p_zone->zone_nlwps_lock);
2217c478bd9Sstevel@tonic-gate 			mutex_exit(&p->p_lock);
2222dc692e0SJerry Jelinek 			atomic_inc_32(&p->p_zone->zone_ffnomem);
2237c478bd9Sstevel@tonic-gate 			return (NULL);
2247c478bd9Sstevel@tonic-gate 		}
2257c478bd9Sstevel@tonic-gate 	}
2277c478bd9Sstevel@tonic-gate 	/*
2287c478bd9Sstevel@tonic-gate 	 * Create a thread, initializing the stack pointer
2297c478bd9Sstevel@tonic-gate 	 */
2307c478bd9Sstevel@tonic-gate 	t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri);
232d32efdadSJonathan Adams 	/*
233d32efdadSJonathan Adams 	 * If a non-NULL stack base is passed in, thread_create() assumes
234d32efdadSJonathan Adams 	 * that the stack might be statically allocated (as opposed to being
235d32efdadSJonathan Adams 	 * allocated from segkp), and so it does not set t_swap.  Since
236d32efdadSJonathan Adams 	 * the lwpdata was allocated from segkp, we must set t_swap to point
237d32efdadSJonathan Adams 	 * to it ourselves.
238d32efdadSJonathan Adams 	 *
239d32efdadSJonathan Adams 	 * This would be less confusing if t_swap had a better name; it really
240d32efdadSJonathan Adams 	 * indicates that the stack is allocated from segkp, regardless of
241d32efdadSJonathan Adams 	 * whether or not it is swappable.
242d32efdadSJonathan Adams 	 */
243d32efdadSJonathan Adams 	if (lwpdata != NULL) {
244d32efdadSJonathan Adams 		ASSERT(!CLASS_KERNEL(cid));
245d32efdadSJonathan Adams 		ASSERT(t->t_swap == NULL);
246d32efdadSJonathan Adams 		t->t_swap = lwpdata;	/* Start of page-able data */
247d32efdadSJonathan Adams 	}
248d32efdadSJonathan Adams 
249d32efdadSJonathan Adams 	/*
250d32efdadSJonathan Adams 	 * If the stack and lwp can be reused, mark the thread as such.
251d32efdadSJonathan Adams 	 * When we get to reapq_add() from resume_from_zombie(), these
252d32efdadSJonathan Adams 	 * threads will go onto lwp_deathrow instead of thread_deathrow.
253d32efdadSJonathan Adams 	 */
254d32efdadSJonathan Adams 	if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize)
255d32efdadSJonathan Adams 		t->t_flag |= T_LWPREUSE;
256d32efdadSJonathan Adams 
2577c478bd9Sstevel@tonic-gate 	if (lwp == NULL)
2587c478bd9Sstevel@tonic-gate 		lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP);
2597c478bd9Sstevel@tonic-gate 	bzero(lwp, sizeof (*lwp));
2607c478bd9Sstevel@tonic-gate 	t->t_lwp = lwp;
2627c478bd9Sstevel@tonic-gate 	t->t_hold = *smask;
2637c478bd9Sstevel@tonic-gate 	lwp->lwp_thread = t;
2647c478bd9Sstevel@tonic-gate 	lwp->lwp_procp = p;
2657c478bd9Sstevel@tonic-gate 	lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
2667c478bd9Sstevel@tonic-gate 	if (curlwp != NULL && curlwp->lwp_childstksz != 0)
2677c478bd9Sstevel@tonic-gate 		lwp->lwp_childstksz = curlwp->lwp_childstksz;
2697c478bd9Sstevel@tonic-gate 	t->t_stk = lwp_stk_init(lwp, t->t_stk);
2707c478bd9Sstevel@tonic-gate 	thread_load(t, proc, arg, len);
2727c478bd9Sstevel@tonic-gate 	/*
2737c478bd9Sstevel@tonic-gate 	 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect.
2747c478bd9Sstevel@tonic-gate 	 */
275e0cf54a5SRoger A. Faulkner 	if (p->p_rprof_cyclic != CYCLIC_NONE)
2767c478bd9Sstevel@tonic-gate 		t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP);
2787c478bd9Sstevel@tonic-gate 	if (cid != NOCLASS)
2797c478bd9Sstevel@tonic-gate 		(void) CL_ALLOC(&bufp, cid, KM_SLEEP);
2817c478bd9Sstevel@tonic-gate 	/*
2827c478bd9Sstevel@tonic-gate 	 * Allocate an lwp directory entry for the new lwp.
2837c478bd9Sstevel@tonic-gate 	 */
2847c478bd9Sstevel@tonic-gate 	lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
2867c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
2877c478bd9Sstevel@tonic-gate grow:
2887c478bd9Sstevel@tonic-gate 	/*
2897c478bd9Sstevel@tonic-gate 	 * Grow the lwp (thread) directory and lwpid hash table if necessary.
2907c478bd9Sstevel@tonic-gate 	 * A note on the growth algorithm:
2917c478bd9Sstevel@tonic-gate 	 *	The new lwp directory size is computed as:
2927c478bd9Sstevel@tonic-gate 	 *		new = 2 * old + 2
2937c478bd9Sstevel@tonic-gate 	 *	Starting with an initial size of 2 (see exec_common()),
2947c478bd9Sstevel@tonic-gate 	 *	this yields numbers that are a power of two minus 2:
2957c478bd9Sstevel@tonic-gate 	 *		2, 6, 14, 30, 62, 126, 254, 510, 1022, ...
2967c478bd9Sstevel@tonic-gate 	 *	The size of the lwpid hash table must be a power of two
2977c478bd9Sstevel@tonic-gate 	 *	and must be commensurate in size with the lwp directory
2987c478bd9Sstevel@tonic-gate 	 *	so that hash bucket chains remain short.  Therefore,
2997c478bd9Sstevel@tonic-gate 	 *	the lwpid hash table size is computed as:
3007c478bd9Sstevel@tonic-gate 	 *		hashsz = (dirsz + 2) / 2
3017c478bd9Sstevel@tonic-gate 	 *	which leads to these hash table sizes corresponding to
3027c478bd9Sstevel@tonic-gate 	 *	the above directory sizes:
3037c478bd9Sstevel@tonic-gate 	 *		2, 4, 8, 16, 32, 64, 128, 256, 512, ...
3046eb30ec3SRoger A. Faulkner 	 * A note on growing the hash table:
3056eb30ec3SRoger A. Faulkner 	 *	For performance reasons, code in lwp_unpark() does not
3066eb30ec3SRoger A. Faulkner 	 *	acquire curproc->p_lock when searching the hash table.
3076eb30ec3SRoger A. Faulkner 	 *	Rather, it calls lwp_hash_lookup_and_lock() which
3086eb30ec3SRoger A. Faulkner 	 *	acquires only the individual hash bucket lock, taking
3096eb30ec3SRoger A. Faulkner 	 *	care to deal with reallocation of the hash table
3106eb30ec3SRoger A. Faulkner 	 *	during the time it takes to acquire the lock.
3116eb30ec3SRoger A. Faulkner 	 *
3126eb30ec3SRoger A. Faulkner 	 *	This is sufficient to protect the integrity of the
3136eb30ec3SRoger A. Faulkner 	 *	hash table, but it requires us to acquire all of the
3146eb30ec3SRoger A. Faulkner 	 *	old hash bucket locks before growing the hash table
3156eb30ec3SRoger A. Faulkner 	 *	and to release them afterwards.  It also requires us
3166eb30ec3SRoger A. Faulkner 	 *	not to free the old hash table because some thread
3176eb30ec3SRoger A. Faulkner 	 *	in lwp_hash_lookup_and_lock() might still be trying
3186eb30ec3SRoger A. Faulkner 	 *	to acquire the old bucket lock.
3196eb30ec3SRoger A. Faulkner 	 *
3206eb30ec3SRoger A. Faulkner 	 *	So we adopt the tactic of keeping all of the retired
3216eb30ec3SRoger A. Faulkner 	 *	hash tables on a linked list, so they can be safely
3226eb30ec3SRoger A. Faulkner 	 *	freed when the process exits or execs.
3236eb30ec3SRoger A. Faulkner 	 *
3246eb30ec3SRoger A. Faulkner 	 *	Because the hash table grows in powers of two, the
3256eb30ec3SRoger A. Faulkner 	 *	total size of all of the hash tables will be slightly
3266eb30ec3SRoger A. Faulkner 	 *	less than twice the size of the largest hash table.
3277c478bd9Sstevel@tonic-gate 	 */
3287c478bd9Sstevel@tonic-gate 	while (p->p_lwpfree == NULL) {
3297c478bd9Sstevel@tonic-gate 		uint_t dirsz = p->p_lwpdir_sz;
3307c478bd9Sstevel@tonic-gate 		lwpdir_t *new_dir;
3316eb30ec3SRoger A. Faulkner 		uint_t new_dirsz;
3327c478bd9Sstevel@tonic-gate 		lwpdir_t *ldp;
3336eb30ec3SRoger A. Faulkner 		tidhash_t *new_hash;
3346eb30ec3SRoger A. Faulkner 		uint_t new_hashsz;
3367c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
3386eb30ec3SRoger A. Faulkner 		/*
3396eb30ec3SRoger A. Faulkner 		 * Prepare to remember the old p_tidhash for later
3406eb30ec3SRoger A. Faulkner 		 * kmem_free()ing when the process exits or execs.
3416eb30ec3SRoger A. Faulkner 		 */
3426eb30ec3SRoger A. Faulkner 		if (ret_tidhash == NULL)
3436eb30ec3SRoger A. Faulkner 			ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t),
3446eb30ec3SRoger A. Faulkner 			    KM_SLEEP);
3456eb30ec3SRoger A. Faulkner 		if (old_dir != NULL)
3467c478bd9Sstevel@tonic-gate 			kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
3476eb30ec3SRoger A. Faulkner 		if (old_hash != NULL)
3487c478bd9Sstevel@tonic-gate 			kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
3496eb30ec3SRoger A. Faulkner 
3507c478bd9Sstevel@tonic-gate 		new_dirsz = 2 * dirsz + 2;
3517c478bd9Sstevel@tonic-gate 		new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP);
3527c478bd9Sstevel@tonic-gate 		for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++)
3537c478bd9Sstevel@tonic-gate 			ldp->ld_next = ldp + 1;
3547c478bd9Sstevel@tonic-gate 		new_hashsz = (new_dirsz + 2) / 2;
3556eb30ec3SRoger A. Faulkner 		new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t),
356575a7426Spt 		    KM_SLEEP);
3587c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_lock);
3597c478bd9Sstevel@tonic-gate 		if (p == curproc)
3607c478bd9Sstevel@tonic-gate 			prbarrier(p);
3627c478bd9Sstevel@tonic-gate 		if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) {
3637c478bd9Sstevel@tonic-gate 			/*
3647c478bd9Sstevel@tonic-gate 			 * Someone else beat us to it or some lwp exited.
3657c478bd9Sstevel@tonic-gate 			 * Set up to free our memory and take a lap.
3667c478bd9Sstevel@tonic-gate 			 */
3677c478bd9Sstevel@tonic-gate 			old_dir = new_dir;
3687c478bd9Sstevel@tonic-gate 			old_dirsz = new_dirsz;
3697c478bd9Sstevel@tonic-gate 			old_hash = new_hash;
3707c478bd9Sstevel@tonic-gate 			old_hashsz = new_hashsz;
3717c478bd9Sstevel@tonic-gate 		} else {
3726eb30ec3SRoger A. Faulkner 			/*
3736eb30ec3SRoger A. Faulkner 			 * For the benefit of lwp_hash_lookup_and_lock(),
3746eb30ec3SRoger A. Faulkner 			 * called from lwp_unpark(), which searches the
3756eb30ec3SRoger A. Faulkner 			 * tid hash table without acquiring p->p_lock,
3766eb30ec3SRoger A. Faulkner 			 * we must acquire all of the tid hash table
3776eb30ec3SRoger A. Faulkner 			 * locks before replacing p->p_tidhash.
3786eb30ec3SRoger A. Faulkner 			 */
3797c478bd9Sstevel@tonic-gate 			old_hash = p->p_tidhash;
3807c478bd9Sstevel@tonic-gate 			old_hashsz = p->p_tidhash_sz;
3816eb30ec3SRoger A. Faulkner 			for (i = 0; i < old_hashsz; i++) {
3826eb30ec3SRoger A. Faulkner 				mutex_enter(&old_hash[i].th_lock);
3836eb30ec3SRoger A. Faulkner 				mutex_enter(&new_hash[i].th_lock);
3846eb30ec3SRoger A. Faulkner 			}
3856eb30ec3SRoger A. Faulkner 
3867c478bd9Sstevel@tonic-gate 			/*
3877c478bd9Sstevel@tonic-gate 			 * We simply hash in all of the old directory entries.
3887c478bd9Sstevel@tonic-gate 			 * This works because the old directory has no empty
3897c478bd9Sstevel@tonic-gate 			 * slots and the new hash table starts out empty.
3907c478bd9Sstevel@tonic-gate 			 * This reproduces the original directory ordering
3917c478bd9Sstevel@tonic-gate 			 * (required for /proc directory semantics).
3927c478bd9Sstevel@tonic-gate 			 */
3936eb30ec3SRoger A. Faulkner 			old_dir = p->p_lwpdir;
3946eb30ec3SRoger A. Faulkner 			old_dirsz = p->p_lwpdir_sz;
3956eb30ec3SRoger A. Faulkner 			p->p_lwpdir = new_dir;
3966eb30ec3SRoger A. Faulkner 			p->p_lwpfree = new_dir;
3976eb30ec3SRoger A. Faulkner 			p->p_lwpdir_sz = new_dirsz;
3986eb30ec3SRoger A. Faulkner 			for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++)
3996eb30ec3SRoger A. Faulkner 				lwp_hash_in(p, ldp->ld_entry,
4006eb30ec3SRoger A. Faulkner 				    new_hash, new_hashsz, 0);
4016eb30ec3SRoger A. Faulkner 
4026eb30ec3SRoger A. Faulkner 			/*
4036eb30ec3SRoger A. Faulkner 			 * Remember the old hash table along with all
4046eb30ec3SRoger A. Faulkner 			 * of the previously-remembered hash tables.
4056eb30ec3SRoger A. Faulkner 			 * We will free them at process exit or exec.
4066eb30ec3SRoger A. Faulkner 			 */
4076eb30ec3SRoger A. Faulkner 			ret_tidhash->rth_tidhash = old_hash;
4086eb30ec3SRoger A. Faulkner 			ret_tidhash->rth_tidhash_sz = old_hashsz;
4096eb30ec3SRoger A. Faulkner 			ret_tidhash->rth_next = p->p_ret_tidhash;
4106eb30ec3SRoger A. Faulkner 			p->p_ret_tidhash = ret_tidhash;
4116eb30ec3SRoger A. Faulkner 
4127c478bd9Sstevel@tonic-gate 			/*
4136eb30ec3SRoger A. Faulkner 			 * Now establish the new tid hash table.
4146eb30ec3SRoger A. Faulkner 			 * As soon as we assign p->p_tidhash,
4156eb30ec3SRoger A. Faulkner 			 * code in lwp_unpark() can start using it.
4167c478bd9Sstevel@tonic-gate 			 */
4176eb30ec3SRoger A. Faulkner 			membar_producer();
4186eb30ec3SRoger A. Faulkner 			p->p_tidhash = new_hash;
4196eb30ec3SRoger A. Faulkner 
4206eb30ec3SRoger A. Faulkner 			/*
4216eb30ec3SRoger A. Faulkner 			 * It is necessary that p_tidhash reach global
4226eb30ec3SRoger A. Faulkner 			 * visibility before p_tidhash_sz.  Otherwise,
4236eb30ec3SRoger A. Faulkner 			 * code in lwp_hash_lookup_and_lock() could
4246eb30ec3SRoger A. Faulkner 			 * index into the old p_tidhash using the new
4256eb30ec3SRoger A. Faulkner 			 * p_tidhash_sz and thereby access invalid data.
4266eb30ec3SRoger A. Faulkner 			 */
4276eb30ec3SRoger A. Faulkner 			membar_producer();
4286eb30ec3SRoger A. Faulkner 			p->p_tidhash_sz = new_hashsz;
4296eb30ec3SRoger A. Faulkner 
4306eb30ec3SRoger A. Faulkner 			/*
4316eb30ec3SRoger A. Faulkner 			 * Release the locks; allow lwp_unpark() to carry on.
4326eb30ec3SRoger A. Faulkner 			 */
4336eb30ec3SRoger A. Faulkner 			for (i = 0; i < old_hashsz; i++) {
4346eb30ec3SRoger A. Faulkner 				mutex_exit(&old_hash[i].th_lock);
4356eb30ec3SRoger A. Faulkner 				mutex_exit(&new_hash[i].th_lock);
4366eb30ec3SRoger A. Faulkner 			}
4376eb30ec3SRoger A. Faulkner 
4386eb30ec3SRoger A. Faulkner 			/*
4396eb30ec3SRoger A. Faulkner 			 * Avoid freeing these objects below.
4406eb30ec3SRoger A. Faulkner 			 */
4416eb30ec3SRoger A. Faulkner 			ret_tidhash = NULL;
4426eb30ec3SRoger A. Faulkner 			old_hash = NULL;
4436eb30ec3SRoger A. Faulkner 			old_hashsz = 0;
4447c478bd9Sstevel@tonic-gate 		}
4457c478bd9Sstevel@tonic-gate 	}
4477c478bd9Sstevel@tonic-gate 	/*
4487c478bd9Sstevel@tonic-gate 	 * Block the process against /proc while we manipulate p->p_tlist,
4497c478bd9Sstevel@tonic-gate 	 * unless lwp_create() was called by /proc for the PCAGENT operation.
4507c478bd9Sstevel@tonic-gate 	 * We want to do this early enough so that we don't drop p->p_lock
4517c478bd9Sstevel@tonic-gate 	 * until the thread is put on the p->p_tlist.
4527c478bd9Sstevel@tonic-gate 	 */
4537c478bd9Sstevel@tonic-gate 	if (p == curproc) {
4547c478bd9Sstevel@tonic-gate 		prbarrier(p);
4557c478bd9Sstevel@tonic-gate 		/*
4567c478bd9Sstevel@tonic-gate 		 * If the current lwp has been requested to stop, do so now.
4577c478bd9Sstevel@tonic-gate 		 * Otherwise we have a race condition between /proc attempting
4587c478bd9Sstevel@tonic-gate 		 * to stop the process and this thread creating a new lwp
4597c478bd9Sstevel@tonic-gate 		 * that was not seen when the /proc PCSTOP request was issued.
4607c478bd9Sstevel@tonic-gate 		 * We rely on stop() to call prbarrier(p) before returning.
4617c478bd9Sstevel@tonic-gate 		 */
4627c478bd9Sstevel@tonic-gate 		while ((curthread->t_proc_flag & TP_PRSTOP) &&
463936e3a33SGangadhar Mylapuram 		    !ttolwp(curthread)->lwp_nostop) {
464936e3a33SGangadhar Mylapuram 			/*
465936e3a33SGangadhar Mylapuram 			 * We called pool_barrier_enter() before calling
466936e3a33SGangadhar Mylapuram 			 * here to lwp_create(). We have to call
467936e3a33SGangadhar Mylapuram 			 * pool_barrier_exit() before stopping.
468936e3a33SGangadhar Mylapuram 			 */
469936e3a33SGangadhar Mylapuram 			pool_barrier_exit();
470936e3a33SGangadhar Mylapuram 			prbarrier(p);
4717c478bd9Sstevel@tonic-gate 			stop(PR_REQUESTED, 0);
472936e3a33SGangadhar Mylapuram 			/*
473936e3a33SGangadhar Mylapuram 			 * And we have to repeat the call to
474936e3a33SGangadhar Mylapuram 			 * pool_barrier_enter after stopping.
475936e3a33SGangadhar Mylapuram 			 */
476936e3a33SGangadhar Mylapuram 			pool_barrier_enter();
477936e3a33SGangadhar Mylapuram 			prbarrier(p);
478936e3a33SGangadhar Mylapuram 		}
4807c478bd9Sstevel@tonic-gate 		/*
4817c478bd9Sstevel@tonic-gate 		 * If process is exiting, there could be a race between
4827c478bd9Sstevel@tonic-gate 		 * the agent lwp creation and the new lwp currently being
4837c478bd9Sstevel@tonic-gate 		 * created. So to prevent this race lwp creation is failed
4847c478bd9Sstevel@tonic-gate 		 * if the process is exiting.
4857c478bd9Sstevel@tonic-gate 		 */
4867c478bd9Sstevel@tonic-gate 		if (p->p_flag & (SEXITLWPS|SKILLED)) {
4877c478bd9Sstevel@tonic-gate 			err = 1;
4887c478bd9Sstevel@tonic-gate 			goto error;
4897c478bd9Sstevel@tonic-gate 		}
4917c478bd9Sstevel@tonic-gate 		/*
4927c478bd9Sstevel@tonic-gate 		 * Since we might have dropped p->p_lock, the
4937c478bd9Sstevel@tonic-gate 		 * lwp directory free list might have changed.
4947c478bd9Sstevel@tonic-gate 		 */
4957c478bd9Sstevel@tonic-gate 		if (p->p_lwpfree == NULL)
4967c478bd9Sstevel@tonic-gate 			goto grow;
4977c478bd9Sstevel@tonic-gate 	}
4997c478bd9Sstevel@tonic-gate 	kpreempt_disable();	/* can't grab cpu_lock here */
5017c478bd9Sstevel@tonic-gate 	/*
50235a5a358SJonathan Adams 	 * Inherit processor and processor set bindings from curthread.
50335a5a358SJonathan Adams 	 *
50435a5a358SJonathan Adams 	 * For kernel LWPs, we do not inherit processor set bindings at
50535a5a358SJonathan Adams 	 * process creation time (i.e. when p != curproc).  After the
50635a5a358SJonathan Adams 	 * kernel process is created, any subsequent LWPs must be created
50735a5a358SJonathan Adams 	 * by threads in the kernel process, at which point we *will*
50835a5a358SJonathan Adams 	 * inherit processor set bindings.
5097c478bd9Sstevel@tonic-gate 	 */
51035a5a358SJonathan Adams 	if (CLASS_KERNEL(cid) && p != curproc) {
5117c478bd9Sstevel@tonic-gate 		t->t_bind_cpu = binding = PBIND_NONE;
5127c478bd9Sstevel@tonic-gate 		t->t_cpupart = oldpart = &cp_default;
5137c478bd9Sstevel@tonic-gate 		t->t_bind_pset = PS_NONE;
5140b70c467Sakolb 		t->t_bindflag = (uchar_t)default_binding_mode;
5157c478bd9Sstevel@tonic-gate 	} else {
5167c478bd9Sstevel@tonic-gate 		binding = curthread->t_bind_cpu;
5177c478bd9Sstevel@tonic-gate 		t->t_bind_cpu = binding;
5187c478bd9Sstevel@tonic-gate 		oldpart = t->t_cpupart;
5197c478bd9Sstevel@tonic-gate 		t->t_cpupart = curthread->t_cpupart;
5207c478bd9Sstevel@tonic-gate 		t->t_bind_pset = curthread->t_bind_pset;
5210b70c467Sakolb 		t->t_bindflag = curthread->t_bindflag |
5220b70c467Sakolb 		    (uchar_t)default_binding_mode;
5237c478bd9Sstevel@tonic-gate 	}
5257c478bd9Sstevel@tonic-gate 	/*
5267c478bd9Sstevel@tonic-gate 	 * thread_create() initializes this thread's home lgroup to the root.
5277c478bd9Sstevel@tonic-gate 	 * Choose a more suitable lgroup, since this thread is associated
5287c478bd9Sstevel@tonic-gate 	 * with an lwp.
5297c478bd9Sstevel@tonic-gate 	 */
5307c478bd9Sstevel@tonic-gate 	ASSERT(oldpart != NULL);
5317c478bd9Sstevel@tonic-gate 	if (binding != PBIND_NONE && t->t_affinitycnt == 0) {
5327c478bd9Sstevel@tonic-gate 		t->t_bound_cpu = cpu[binding];
5337c478bd9Sstevel@tonic-gate 		if (t->t_lpl != t->t_bound_cpu->cpu_lpl)
5347c478bd9Sstevel@tonic-gate 			lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1);
535c2e5330eSJonathan Adams 	} else if (CLASS_KERNEL(cid)) {
536c2e5330eSJonathan Adams 		/*
537aab2fe41SJonathan Adams 		 * Kernel threads are always in the root lgrp.
538c2e5330eSJonathan Adams 		 */
539c2e5330eSJonathan Adams 		lgrp_move_thread(t,
540aab2fe41SJonathan Adams 		    &t->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1);
5417c478bd9Sstevel@tonic-gate 	} else {
5427c478bd9Sstevel@tonic-gate 		lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1);
5437c478bd9Sstevel@tonic-gate 	}
5457c478bd9Sstevel@tonic-gate 	kpreempt_enable();
5477c478bd9Sstevel@tonic-gate 	/*
5487c478bd9Sstevel@tonic-gate 	 * make sure lpl points to our own partition
5497c478bd9Sstevel@tonic-gate 	 */
5507c478bd9Sstevel@tonic-gate 	ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads);
5517c478bd9Sstevel@tonic-gate 	ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads +
5527c478bd9Sstevel@tonic-gate 	    t->t_cpupart->cp_nlgrploads);
5547c478bd9Sstevel@tonic-gate 	/*
5557c478bd9Sstevel@tonic-gate 	 * It is safe to point the thread to the new project without holding it
5567c478bd9Sstevel@tonic-gate 	 * since we're holding the target process' p_lock here and therefore
5577c478bd9Sstevel@tonic-gate 	 * we're guaranteed that it will not move to another project.
5587c478bd9Sstevel@tonic-gate 	 */
559aab2fe41SJonathan Adams 	newkpj = p->p_task->tk_proj;
5607c478bd9Sstevel@tonic-gate 	oldkpj = ttoproj(t);
5617c478bd9Sstevel@tonic-gate 	if (newkpj != oldkpj) {
5627c478bd9Sstevel@tonic-gate 		t->t_proj = newkpj;
5637c478bd9Sstevel@tonic-gate 		(void) project_hold(newkpj);
5647c478bd9Sstevel@tonic-gate 		project_rele(oldkpj);
5657c478bd9Sstevel@tonic-gate 	}
5677c478bd9Sstevel@tonic-gate 	if (cid != NOCLASS) {
5687c478bd9Sstevel@tonic-gate 		/*
5697c478bd9Sstevel@tonic-gate 		 * If the lwp is being created in the current process
5707c478bd9Sstevel@tonic-gate 		 * and matches the current thread's scheduling class,
5717c478bd9Sstevel@tonic-gate 		 * we should propagate the current thread's scheduling
5727c478bd9Sstevel@tonic-gate 		 * parameters by calling CL_FORK.  Otherwise just use
5737c478bd9Sstevel@tonic-gate 		 * the defaults by calling CL_ENTERCLASS.
5747c478bd9Sstevel@tonic-gate 		 */
5757c478bd9Sstevel@tonic-gate 		if (p != curproc || curthread->t_cid != cid) {
5767c478bd9Sstevel@tonic-gate 			err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp);
5777c478bd9Sstevel@tonic-gate 			t->t_pri = pri;	/* CL_ENTERCLASS may have changed it */
578d4204c85Sraf 			/*
579d4204c85Sraf 			 * We don't call schedctl_set_cidpri(t) here
580d4204c85Sraf 			 * because the schedctl data is not yet set
581d4204c85Sraf 			 * up for the newly-created lwp.
582d4204c85Sraf 			 */
5837c478bd9Sstevel@tonic-gate 		} else {
5847c478bd9Sstevel@tonic-gate 			t->t_clfuncs = &(sclass[cid].cl_funcs->thread);
5857c478bd9Sstevel@tonic-gate 			err = CL_FORK(curthread, t, bufp);
5867c478bd9Sstevel@tonic-gate 			t->t_cid = cid;
5877c478bd9Sstevel@tonic-gate 		}
5882dc692e0SJerry Jelinek 		if (err) {
5892dc692e0SJerry Jelinek 			atomic_inc_32(&p->p_zone->zone_ffmisc);
5907c478bd9Sstevel@tonic-gate 			goto error;
5912dc692e0SJerry Jelinek 		} else {
5927c478bd9Sstevel@tonic-gate 			bufp = NULL;
5932dc692e0SJerry Jelinek 		}
5947c478bd9Sstevel@tonic-gate 	}
5967c478bd9Sstevel@tonic-gate 	/*
5977c478bd9Sstevel@tonic-gate 	 * If we were given an lwpid then use it, else allocate one.
5987c478bd9Sstevel@tonic-gate 	 */
5997c478bd9Sstevel@tonic-gate 	if (lwpid != 0)
6007c478bd9Sstevel@tonic-gate 		t->t_tid = lwpid;
6017c478bd9Sstevel@tonic-gate 	else {
6027c478bd9Sstevel@tonic-gate 		/*
6037c478bd9Sstevel@tonic-gate 		 * lwp/thread id 0 is never valid; reserved for special checks.
6047c478bd9Sstevel@tonic-gate 		 * lwp/thread id 1 is reserved for the main thread.
6057c478bd9Sstevel@tonic-gate 		 * Start again at 2 when INT_MAX has been reached
6067c478bd9Sstevel@tonic-gate 		 * (id_t is a signed 32-bit integer).
6077c478bd9Sstevel@tonic-gate 		 */
6087c478bd9Sstevel@tonic-gate 		id_t prev_id = p->p_lwpid;	/* last allocated tid */
6107c478bd9Sstevel@tonic-gate 		do {			/* avoid lwpid duplication */
6117c478bd9Sstevel@tonic-gate 			if (p->p_lwpid == INT_MAX) {
6127c478bd9Sstevel@tonic-gate 				p->p_flag |= SLWPWRAP;
6137c478bd9Sstevel@tonic-gate 				p->p_lwpid = 1;
6147c478bd9Sstevel@tonic-gate 			}
6157c478bd9Sstevel@tonic-gate 			if ((t->t_tid = ++p->p_lwpid) == prev_id) {
6167c478bd9Sstevel@tonic-gate 				/*
6177c478bd9Sstevel@tonic-gate 				 * All lwpids are allocated; fail the request.
6187c478bd9Sstevel@tonic-gate 				 */
6197c478bd9Sstevel@tonic-gate 				err = 1;
6202dc692e0SJerry Jelinek 				atomic_inc_32(&p->p_zone->zone_ffnoproc);
6217c478bd9Sstevel@tonic-gate 				goto error;
6227c478bd9Sstevel@tonic-gate 			}
6237c478bd9Sstevel@tonic-gate 			/*
6247c478bd9Sstevel@tonic-gate 			 * We only need to worry about colliding with an id
6257c478bd9Sstevel@tonic-gate 			 * that's already in use if this process has
6267c478bd9Sstevel@tonic-gate 			 * cycled through all available lwp ids.
6277c478bd9Sstevel@tonic-gate 			 */
6287c478bd9Sstevel@tonic-gate 			if ((p->p_flag & SLWPWRAP) == 0)
6297c478bd9Sstevel@tonic-gate 				break;
6307c478bd9Sstevel@tonic-gate 		} while (lwp_hash_lookup(p, t->t_tid) != NULL);
6317c478bd9Sstevel@tonic-gate 	}
6339acbbeafSnn 	/*
6349acbbeafSnn 	 * If this is a branded process, let the brand do any necessary lwp
6359acbbeafSnn 	 * initialization.
6369acbbeafSnn 	 */
6379acbbeafSnn 	if (PROC_IS_BRANDED(p)) {
6389acbbeafSnn 		if (BROP(p)->b_initlwp(lwp)) {
6399acbbeafSnn 			err = 1;
6402dc692e0SJerry Jelinek 			atomic_inc_32(&p->p_zone->zone_ffmisc);
6419acbbeafSnn 			goto error;
6429acbbeafSnn 		}
6439acbbeafSnn 		branded = 1;
6449acbbeafSnn 	}
6462cb27123Saguzovsk 	if (t->t_tid == 1) {
6472cb27123Saguzovsk 		kpreempt_disable();
6482cb27123Saguzovsk 		ASSERT(t->t_lpl != NULL);
6492cb27123Saguzovsk 		p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid;
6502cb27123Saguzovsk 		kpreempt_enable();
6512cb27123Saguzovsk 		if (p->p_tr_lgrpid != LGRP_NONE &&
6522cb27123Saguzovsk 		    p->p_tr_lgrpid != p->p_t1_lgrpid) {
6532cb27123Saguzovsk 			lgrp_update_trthr_migrations(1);
6542cb27123Saguzovsk 		}
6552cb27123Saguzovsk 	}
6577c478bd9Sstevel@tonic-gate 	p->p_lwpcnt++;
6587c478bd9Sstevel@tonic-gate 	t->t_waitfor = -1;
6607c478bd9Sstevel@tonic-gate 	/*
6617c478bd9Sstevel@tonic-gate 	 * Turn microstate accounting on for thread if on for process.
6627c478bd9Sstevel@tonic-gate 	 */
6637c478bd9Sstevel@tonic-gate 	if (p->p_flag & SMSACCT)
6647c478bd9Sstevel@tonic-gate 		t->t_proc_flag |= TP_MSACCT;
6667c478bd9Sstevel@tonic-gate 	/*
6677c478bd9Sstevel@tonic-gate 	 * If the process has watchpoints, mark the new thread as such.
6687c478bd9Sstevel@tonic-gate 	 */
6697c478bd9Sstevel@tonic-gate 	if (pr_watch_active(p))
6707c478bd9Sstevel@tonic-gate 		watch_enable(t);
6727c478bd9Sstevel@tonic-gate 	/*
6737c478bd9Sstevel@tonic-gate 	 * The lwp is being created in the stopped state.
6747c478bd9Sstevel@tonic-gate 	 * We set all the necessary flags to indicate that fact here.
6757c478bd9Sstevel@tonic-gate 	 * We omit the TS_CREATE flag from t_schedflag so that the lwp
6767c478bd9Sstevel@tonic-gate 	 * cannot be set running until the caller is finished with it,
6777c478bd9Sstevel@tonic-gate 	 * even if lwp_continue() is called on it after we drop p->p_lock.
6787c478bd9Sstevel@tonic-gate 	 * When the caller is finished with the newly-created lwp,
6797c478bd9Sstevel@tonic-gate 	 * the caller must call lwp_create_done() to allow the lwp
6807c478bd9Sstevel@tonic-gate 	 * to be set running.  If the TP_HOLDLWP is left set, the
6817c478bd9Sstevel@tonic-gate 	 * lwp will suspend itself after reaching system call exit.
6827c478bd9Sstevel@tonic-gate 	 */
6837c478bd9Sstevel@tonic-gate 	init_mstate(t, LMS_STOPPED);
6847c478bd9Sstevel@tonic-gate 	t->t_proc_flag |= TP_HOLDLWP;
6857c478bd9Sstevel@tonic-gate 	t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE));
6867c478bd9Sstevel@tonic-gate 	t->t_whystop = PR_SUSPENDED;
6877c478bd9Sstevel@tonic-gate 	t->t_whatstop = SUSPEND_NORMAL;
6887c478bd9Sstevel@tonic-gate 	t->t_sig_check = 1;	/* ensure that TP_HOLDLWP is honored */
6907c478bd9Sstevel@tonic-gate 	/*
6917c478bd9Sstevel@tonic-gate 	 * Set system call processing flags in case tracing or profiling
6927c478bd9Sstevel@tonic-gate 	 * is set.  The first system call will evaluate these and turn
6937c478bd9Sstevel@tonic-gate 	 * them off if they aren't needed.
6947c478bd9Sstevel@tonic-gate 	 */
6957c478bd9Sstevel@tonic-gate 	t->t_pre_sys = 1;
6967c478bd9Sstevel@tonic-gate 	t->t_post_sys = 1;
6987c478bd9Sstevel@tonic-gate 	/*
6997c478bd9Sstevel@tonic-gate 	 * Insert the new thread into the list of all threads.
7007c478bd9Sstevel@tonic-gate 	 */
7017c478bd9Sstevel@tonic-gate 	if ((tx = p->p_tlist) == NULL) {
7027c478bd9Sstevel@tonic-gate 		t->t_back = t;
7037c478bd9Sstevel@tonic-gate 		t->t_forw = t;
7047c478bd9Sstevel@tonic-gate 		p->p_tlist = t;
7057c478bd9Sstevel@tonic-gate 	} else {
7067c478bd9Sstevel@tonic-gate 		t->t_forw = tx;
7077c478bd9Sstevel@tonic-gate 		t->t_back = tx->t_back;
7087c478bd9Sstevel@tonic-gate 		tx->t_back->t_forw = t;
7097c478bd9Sstevel@tonic-gate 		tx->t_back = t;
7107c478bd9Sstevel@tonic-gate 	}
7127c478bd9Sstevel@tonic-gate 	/*
7137c478bd9Sstevel@tonic-gate 	 * Insert the new lwp into an lwp directory slot position
7147c478bd9Sstevel@tonic-gate 	 * and into the lwpid hash table.
7157c478bd9Sstevel@tonic-gate 	 */
7167c478bd9Sstevel@tonic-gate 	lep->le_thread = t;
7177c478bd9Sstevel@tonic-gate 	lep->le_lwpid = t->t_tid;
7187c478bd9Sstevel@tonic-gate 	lep->le_start = t->t_start;
7196eb30ec3SRoger A. Faulkner 	lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1);
721*088d69f8SJerry Jelinek 	lwp_fp_init(lwp);
722*088d69f8SJerry Jelinek 
7237c478bd9Sstevel@tonic-gate 	if (state == TS_RUN) {
7247c478bd9Sstevel@tonic-gate 		/*
7257c478bd9Sstevel@tonic-gate 		 * We set the new lwp running immediately.
7267c478bd9Sstevel@tonic-gate 		 */
7277c478bd9Sstevel@tonic-gate 		t->t_proc_flag &= ~TP_HOLDLWP;
7287c478bd9Sstevel@tonic-gate 		lwp_create_done(t);
7297c478bd9Sstevel@tonic-gate 	}
7317c478bd9Sstevel@tonic-gate error:
7327c478bd9Sstevel@tonic-gate 	if (err) {
73335a5a358SJonathan Adams 		if (CLASS_KERNEL(cid)) {
73435a5a358SJonathan Adams 			/*
73535a5a358SJonathan Adams 			 * This should only happen if a system process runs
73635a5a358SJonathan Adams 			 * out of lwpids, which shouldn't occur.
73735a5a358SJonathan Adams 			 */
73835a5a358SJonathan Adams 			panic("Failed to create a system LWP");
73935a5a358SJonathan Adams 		}
7407c478bd9Sstevel@tonic-gate 		/*
7417c478bd9Sstevel@tonic-gate 		 * We have failed to create an lwp, so decrement the number
7427c478bd9Sstevel@tonic-gate 		 * of lwps in the task and let the lgroup load averages know
7437c478bd9Sstevel@tonic-gate 		 * that this thread isn't going to show up.
7447c478bd9Sstevel@tonic-gate 		 */
7457c478bd9Sstevel@tonic-gate 		kpreempt_disable();
7467c478bd9Sstevel@tonic-gate 		lgrp_move_thread(t, NULL, 1);
7477c478bd9Sstevel@tonic-gate 		kpreempt_enable();
7497c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&p->p_lock));
7507c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_zone->zone_nlwps_lock);
7517c478bd9Sstevel@tonic-gate 		p->p_task->tk_nlwps--;
7527c478bd9Sstevel@tonic-gate 		p->p_task->tk_proj->kpj_nlwps--;
7537c478bd9Sstevel@tonic-gate 		p->p_zone->zone_nlwps--;
7547c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_zone->zone_nlwps_lock);
7557c478bd9Sstevel@tonic-gate 		if (cid != NOCLASS && bufp != NULL)
7567c478bd9Sstevel@tonic-gate 			CL_FREE(cid, bufp);
7589acbbeafSnn 		if (branded)
7599acbbeafSnn 			BROP(p)->b_freelwp(lwp);
7617c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
7627c478bd9Sstevel@tonic-gate 		t->t_state = TS_FREE;
7637c478bd9Sstevel@tonic-gate 		thread_rele(t);
7657c478bd9Sstevel@tonic-gate 		/*
7667c478bd9Sstevel@tonic-gate 		 * We need to remove t from the list of all threads
7677c478bd9Sstevel@tonic-gate 		 * because thread_exit()/lwp_exit() isn't called on t.
7687c478bd9Sstevel@tonic-gate 		 */
7697c478bd9Sstevel@tonic-gate 		mutex_enter(&pidlock);
7707c478bd9Sstevel@tonic-gate 		ASSERT(t != t->t_next);		/* t0 never exits */
7717c478bd9Sstevel@tonic-gate 		t->t_next->t_prev = t->t_prev;
7727c478bd9Sstevel@tonic-gate 		t->t_prev->t_next = t->t_next;
7737c478bd9Sstevel@tonic-gate 		mutex_exit(&pidlock);
7757c478bd9Sstevel@tonic-gate 		thread_free(t);
7767c478bd9Sstevel@tonic-gate 		kmem_free(lep, sizeof (*lep));
7777c478bd9Sstevel@tonic-gate 		lwp = NULL;
7787c478bd9Sstevel@tonic-gate 	} else {
7797c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
7807c478bd9Sstevel@tonic-gate 	}
7826eb30ec3SRoger A. Faulkner 	if (old_dir != NULL)
7837c478bd9Sstevel@tonic-gate 		kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
7846eb30ec3SRoger A. Faulkner 	if (old_hash != NULL)
7857c478bd9Sstevel@tonic-gate 		kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
7866eb30ec3SRoger A. Faulkner 	if (ret_tidhash != NULL)
7876eb30ec3SRoger A. Faulkner 		kmem_free(ret_tidhash, sizeof (ret_tidhash_t));
7897c478bd9Sstevel@tonic-gate 	DTRACE_PROC1(lwp__create, kthread_t *, t);
7907c478bd9Sstevel@tonic-gate 	return (lwp);
7917c478bd9Sstevel@tonic-gate }
7937c478bd9Sstevel@tonic-gate /*
7947c478bd9Sstevel@tonic-gate  * lwp_create_done() is called by the caller of lwp_create() to set the
7957c478bd9Sstevel@tonic-gate  * newly-created lwp running after the caller has finished manipulating it.
7967c478bd9Sstevel@tonic-gate  */
7977c478bd9Sstevel@tonic-gate void
lwp_create_done(kthread_t * t)7987c478bd9Sstevel@tonic-gate lwp_create_done(kthread_t *t)
7997c478bd9Sstevel@tonic-gate {
8007c478bd9Sstevel@tonic-gate 	proc_t *p = ttoproc(t);
8027c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
8047c478bd9Sstevel@tonic-gate 	/*
8057c478bd9Sstevel@tonic-gate 	 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked().
8067c478bd9Sstevel@tonic-gate 	 * (The absence of the TS_CREATE flag prevents the lwp from running
8077c478bd9Sstevel@tonic-gate 	 * until we are finished with it, even if lwp_continue() is called on
8087c478bd9Sstevel@tonic-gate 	 * it by some other lwp in the process or elsewhere in the kernel.)
8097c478bd9Sstevel@tonic-gate 	 */
8107c478bd9Sstevel@tonic-gate 	thread_lock(t);
8117c478bd9Sstevel@tonic-gate 	ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE));
8127c478bd9Sstevel@tonic-gate 	/*
8137c478bd9Sstevel@tonic-gate 	 * If TS_CSTART is set, lwp_continue(t) has been called and
8147c478bd9Sstevel@tonic-gate 	 * has already incremented p_lwprcnt; avoid doing this twice.
8157c478bd9Sstevel@tonic-gate 	 */
8167c478bd9Sstevel@tonic-gate 	if (!(t->t_schedflag & TS_CSTART))
8177c478bd9Sstevel@tonic-gate 		p->p_lwprcnt++;
8187c478bd9Sstevel@tonic-gate 	t->t_schedflag |= (TS_CSTART | TS_CREATE);
8197c478bd9Sstevel@tonic-gate 	setrun_locked(t);
8207c478bd9Sstevel@tonic-gate 	thread_unlock(t);
8217c478bd9Sstevel@tonic-gate }
8237c478bd9Sstevel@tonic-gate /*
8247c478bd9Sstevel@tonic-gate  * Copy an LWP's active templates, and clear the latest contracts.
8257c478bd9Sstevel@tonic-gate  */
8267c478bd9Sstevel@tonic-gate void
lwp_ctmpl_copy(klwp_t * dst,klwp_t * src)8277c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(klwp_t *dst, klwp_t *src)
8287c478bd9Sstevel@tonic-gate {
8297c478bd9Sstevel@tonic-gate 	int i;
8317c478bd9Sstevel@tonic-gate 	for (i = 0; i < ct_ntypes; i++) {
8327c478bd9Sstevel@tonic-gate 		dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]);
8337c478bd9Sstevel@tonic-gate 		dst->lwp_ct_latest[i] = NULL;
8347c478bd9Sstevel@tonic-gate 	}
8357c478bd9Sstevel@tonic-gate }
8377c478bd9Sstevel@tonic-gate /*
8387c478bd9Sstevel@tonic-gate  * Clear an LWP's contract template state.
8397c478bd9Sstevel@tonic-gate  */
8407c478bd9Sstevel@tonic-gate void
lwp_ctmpl_clear(klwp_t * lwp)8417c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(klwp_t *lwp)
8427c478bd9Sstevel@tonic-gate {
8437c478bd9Sstevel@tonic-gate 	ct_template_t *tmpl;
8447c478bd9Sstevel@tonic-gate 	int i;
8467c478bd9Sstevel@tonic-gate 	for (i = 0; i < ct_ntypes; i++) {
8477c478bd9Sstevel@tonic-gate 		if ((tmpl = lwp->lwp_ct_active[i]) != NULL) {
8487c478bd9Sstevel@tonic-gate 			ctmpl_free(tmpl);
8497c478bd9Sstevel@tonic-gate 			lwp->lwp_ct_active[i] = NULL;
8507c478bd9Sstevel@tonic-gate 		}
8527c478bd9Sstevel@tonic-gate 		if (lwp->lwp_ct_latest[i] != NULL) {
8537c478bd9Sstevel@tonic-gate 			contract_rele(lwp->lwp_ct_latest[i]);
8547c478bd9Sstevel@tonic-gate 			lwp->lwp_ct_latest[i] = NULL;
8557c478bd9Sstevel@tonic-gate 		}
8567c478bd9Sstevel@tonic-gate 	}
8577c478bd9Sstevel@tonic-gate }
8597c478bd9Sstevel@tonic-gate /*
8607c478bd9Sstevel@tonic-gate  * Individual lwp exit.
8617c478bd9Sstevel@tonic-gate  * If this is the last lwp, exit the whole process.
8627c478bd9Sstevel@tonic-gate  */
8637c478bd9Sstevel@tonic-gate void
lwp_exit(void)8647c478bd9Sstevel@tonic-gate lwp_exit(void)
8657c478bd9Sstevel@tonic-gate {
8667c478bd9Sstevel@tonic-gate 	kthread_t *t = curthread;
8677c478bd9Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(t);
8687c478bd9Sstevel@tonic-gate 	proc_t *p = ttoproc(t);
8707c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&p->p_lock));
8727c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
8747c478bd9Sstevel@tonic-gate #if defined(__sparc)
8757c478bd9Sstevel@tonic-gate 	/*
8767c478bd9Sstevel@tonic-gate 	 * Ensure that the user stack is fully abandoned..
8777c478bd9Sstevel@tonic-gate 	 */
8787c478bd9Sstevel@tonic-gate 	trash_user_windows();
8797c478bd9Sstevel@tonic-gate #endif
8817c478bd9Sstevel@tonic-gate 	tsd_exit();			/* free thread specific data */
8837c478bd9Sstevel@tonic-gate 	kcpc_passivate();		/* Clean up performance counter state */
8857c478bd9Sstevel@tonic-gate 	pollcleanup();
8877c478bd9Sstevel@tonic-gate 	if (t->t_door)
8887c478bd9Sstevel@tonic-gate 		door_slam();
8907c478bd9Sstevel@tonic-gate 	if (t->t_schedctl != NULL)
8917c478bd9Sstevel@tonic-gate 		schedctl_lwp_cleanup(t);
8937c478bd9Sstevel@tonic-gate 	if (t->t_upimutex != NULL)
8947c478bd9Sstevel@tonic-gate 		upimutex_cleanup();
8969acbbeafSnn 	/*
8979acbbeafSnn 	 * Perform any brand specific exit processing, then release any
8989acbbeafSnn 	 * brand data associated with the lwp
8999acbbeafSnn 	 */
9009acbbeafSnn 	if (PROC_IS_BRANDED(p))
9019acbbeafSnn 		BROP(p)->b_lwpexit(lwp);
9031bc02a70SJakub Jermar 	lwp_pcb_exit();
9041bc02a70SJakub Jermar 
9057c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
9067c478bd9Sstevel@tonic-gate 	lwp_cleanup();
9087c478bd9Sstevel@tonic-gate 	/*
9097c478bd9Sstevel@tonic-gate 	 * When this process is dumping core, its lwps are held here
9107c478bd9Sstevel@tonic-gate 	 * until the core dump is finished. Then exitlwps() is called
9117c478bd9Sstevel@tonic-gate 	 * again to release these lwps so that they can finish exiting.
9127c478bd9Sstevel@tonic-gate 	 */
9137c478bd9Sstevel@tonic-gate 	if (p->p_flag & SCOREDUMP)
9147c478bd9Sstevel@tonic-gate 		stop(PR_SUSPENDED, SUSPEND_NORMAL);
9161bc02a70SJakub Jermar 	/*
9171bc02a70SJakub Jermar 	 * Block the process against /proc now that we have really acquired
9181bc02a70SJakub Jermar 	 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least).
9191bc02a70SJakub Jermar 	 */
9201bc02a70SJakub Jermar 	prbarrier(p);
9211bc02a70SJakub Jermar 
9227c478bd9Sstevel@tonic-gate 	/*
9237c478bd9Sstevel@tonic-gate 	 * Call proc_exit() if this is the last non-daemon lwp in the process.
9247c478bd9Sstevel@tonic-gate 	 */
9257c478bd9Sstevel@tonic-gate 	if (!(t->t_proc_flag & TP_DAEMON) &&
9267c478bd9Sstevel@tonic-gate 	    p->p_lwpcnt == p->p_lwpdaemon + 1) {
9277c478bd9Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
92897eda132Sraf 		if (proc_exit(CLD_EXITED, 0) == 0) {
9297c478bd9Sstevel@tonic-gate 			/* Restarting init. */
9307c478bd9Sstevel@tonic-gate 			return;
9317c478bd9Sstevel@tonic-gate 		}
9337c478bd9Sstevel@tonic-gate 		/*
9347c478bd9Sstevel@tonic-gate 		 * proc_exit() returns a non-zero value when some other
9357c478bd9Sstevel@tonic-gate 		 * lwp got there first.  We just have to continue in
9367c478bd9Sstevel@tonic-gate 		 * lwp_exit().
9377c478bd9Sstevel@tonic-gate 		 */
9387c478bd9Sstevel@tonic-gate 		mutex_enter(&p->p_lock);
9397c478bd9Sstevel@tonic-gate 		ASSERT(curproc->p_flag & SEXITLWPS);
9401bc02a70SJakub Jermar 		prbarrier(p);
9417c478bd9Sstevel@tonic-gate 	}
9437c478bd9Sstevel@tonic-gate 	DTRACE_PROC(lwp__exit);
9457c478bd9Sstevel@tonic-gate 	/*
9467c478bd9Sstevel@tonic-gate 	 * If the lwp is a detached lwp or if the process is exiting,
9477c478bd9Sstevel@tonic-gate 	 * remove (lwp_hash_out()) the lwp from the lwp directory.
9487c478bd9Sstevel@tonic-gate 	 * Otherwise null out the lwp's le_thread pointer in the lwp
9497c478bd9Sstevel@tonic-gate 	 * directory so that other threads will see it as a zombie lwp.
9507c478bd9Sstevel@tonic-gate 	 */
9517c478bd9Sstevel@tonic-gate 	prlwpexit(t);		/* notify /proc */
9527c478bd9Sstevel@tonic-gate 	if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS))
9537c478bd9Sstevel@tonic-gate 		lwp_hash_out(p, t->t_tid);
9547c478bd9Sstevel@tonic-gate 	else {
9557c478bd9Sstevel@tonic-gate 		ASSERT(!(t->t_proc_flag & TP_DAEMON));
9567c478bd9Sstevel@tonic-gate 		p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL;
9577c478bd9Sstevel@tonic-gate 		p->p_zombcnt++;
9587c478bd9Sstevel@tonic-gate 		cv_broadcast(&p->p_lwpexit);
9597c478bd9Sstevel@tonic-gate 	}
9607c478bd9Sstevel@tonic-gate 	if (t->t_proc_flag & TP_DAEMON) {
9617c478bd9Sstevel@tonic-gate 		p->p_lwpdaemon--;
9627c478bd9Sstevel@tonic-gate 		t->t_proc_flag &= ~TP_DAEMON;
9637c478bd9Sstevel@tonic-gate 	}
9647c478bd9Sstevel@tonic-gate 	t->t_proc_flag &= ~TP_TWAIT;
9667c478bd9Sstevel@tonic-gate 	/*
9677c478bd9Sstevel@tonic-gate 	 * Maintain accurate lwp count for task.max-lwps resource control.
9687c478bd9Sstevel@tonic-gate 	 */
9697c478bd9Sstevel@tonic-gate 	mutex_enter(&p->p_zone->zone_nlwps_lock);
9707c478bd9Sstevel@tonic-gate 	p->p_task->tk_nlwps--;
9717c478bd9Sstevel@tonic-gate 	p->p_task->tk_proj->kpj_nlwps--;
9727c478bd9Sstevel@tonic-gate 	p->p_zone->zone_nlwps--;
9737c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_zone->zone_nlwps_lock);
9757c478bd9Sstevel@tonic-gate 	CL_EXIT(t);		/* tell the scheduler that t is exiting */
9767c478bd9Sstevel@tonic-gate 	ASSERT(p->p_lwpcnt != 0);
9777c478bd9Sstevel@tonic-gate 	p->p_lwpcnt--;
9797c478bd9Sstevel@tonic-gate 	/*
9807c478bd9Sstevel@tonic-gate 	 * If all remaining non-daemon lwps are waiting in lwp_wait(),
9817c478bd9Sstevel@tonic-gate 	 * wake them up so someone can return EDEADLK.
9827c478bd9Sstevel@tonic-gate 	 * (See the block comment preceeding lwp_wait().)
9837c478bd9Sstevel@tonic-gate 	 */
9847c478bd9Sstevel@tonic-gate 	if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait))
9857c478bd9Sstevel@tonic-gate 		cv_broadcast(&p->p_lwpexit);
9877c478bd9Sstevel@tonic-gate 	t->t_proc_flag |= TP_LWPEXIT;
9887c478bd9Sstevel@tonic-gate 	term_mstate(t);
9907c478bd9Sstevel@tonic-gate #ifndef NPROBE
9917c478bd9Sstevel@tonic-gate 	/* Kernel probe */
9927c478bd9Sstevel@tonic-gate 	if (t->t_tnf_tpdp)
9937c478bd9Sstevel@tonic-gate 		tnf_thread_exit();
9947c478bd9Sstevel@tonic-gate #endif /* NPROBE */
9967c478bd9Sstevel@tonic-gate 	t->t_forw->t_back = t->t_back;
9977c478bd9Sstevel@tonic-gate 	t->t_back->t_forw = t->t_forw;
9987c478bd9Sstevel@tonic-gate 	if (t == p->p_tlist)
9997c478bd9Sstevel@tonic-gate 		p->p_tlist = t->t_forw;
10017c478bd9Sstevel@tonic-gate 	/*
10027c478bd9Sstevel@tonic-gate 	 * Clean up the signal state.
10037c478bd9Sstevel@tonic-gate 	 */
10047c478bd9Sstevel@tonic-gate 	if (t->t_sigqueue != NULL)
10057c478bd9Sstevel@tonic-gate 		sigdelq(p, t, 0);
10067c478bd9Sstevel@tonic-gate 	if (lwp->lwp_curinfo != NULL) {
10077c478bd9Sstevel@tonic-gate 		siginfofree(lwp->lwp_curinfo);
10087c478bd9Sstevel@tonic-gate 		lwp->lwp_curinfo = NULL;
10097c478bd9Sstevel@tonic-gate 	}
1011f971a346SBryan Cantrill 	/*
1012f971a346SBryan Cantrill 	 * If we have spymaster information (that is, if we're an agent LWP),
1013f971a346SBryan Cantrill 	 * free that now.
1014f971a346SBryan Cantrill 	 */
1015f971a346SBryan Cantrill 	if (lwp->lwp_spymaster != NULL) {
1016f971a346SBryan Cantrill 		kmem_free(lwp->lwp_spymaster, sizeof (psinfo_t));
1017f971a346SBryan Cantrill 		lwp->lwp_spymaster = NULL;
1018f971a346SBryan Cantrill 	}
1019f971a346SBryan Cantrill 
10207c478bd9Sstevel@tonic-gate 	thread_rele(t);
10227c478bd9Sstevel@tonic-gate 	/*
10237c478bd9Sstevel@tonic-gate 	 * Terminated lwps are associated with process zero and are put onto
10247c478bd9Sstevel@tonic-gate 	 * death-row by resume().  Avoid preemption after resetting t->t_procp.
10257c478bd9Sstevel@tonic-gate 	 */
10267c478bd9Sstevel@tonic-gate 	t->t_preempt++;
10280baeff3dSrab 	if (t->t_ctx != NULL)
10290baeff3dSrab 		exitctx(t);
10300baeff3dSrab 	if (p->p_pctx != NULL)
10310baeff3dSrab 		exitpctx(p);
10337c478bd9Sstevel@tonic-gate 	t->t_procp = &p0;
10357c478bd9Sstevel@tonic-gate 	/*
10367c478bd9Sstevel@tonic-gate 	 * Notify the HAT about the change of address space
10377c478bd9Sstevel@tonic-gate 	 */
10387c478bd9Sstevel@tonic-gate 	hat_thread_exit(t);
10397c478bd9Sstevel@tonic-gate 	/*
10407c478bd9Sstevel@tonic-gate 	 * When this is the last running lwp in this process and some lwp is
10417c478bd9Sstevel@tonic-gate 	 * waiting for this condition to become true, or this thread was being
10427c478bd9Sstevel@tonic-gate 	 * suspended, then the waiting lwp is awakened.
10437c478bd9Sstevel@tonic-gate 	 *
10447c478bd9Sstevel@tonic-gate 	 * Also, if the process is exiting, we may have a thread waiting in
10457c478bd9Sstevel@tonic-gate 	 * exitlwps() that needs to be notified.
10467c478bd9Sstevel@tonic-gate 	 */
10477c478bd9Sstevel@tonic-gate 	if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) ||
10487c478bd9Sstevel@tonic-gate 	    (p->p_flag & SEXITLWPS))
10497c478bd9Sstevel@tonic-gate 		cv_broadcast(&p->p_holdlwps);
10517c478bd9Sstevel@tonic-gate 	/*
10527c478bd9Sstevel@tonic-gate 	 * Need to drop p_lock so we can reacquire pidlock.
10537c478bd9Sstevel@tonic-gate 	 */
10547c478bd9Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
10557c478bd9Sstevel@tonic-gate 	mutex_enter(&pidlock);
10577c478bd9Sstevel@tonic-gate 	ASSERT(t != t->t_next);		/* t0 never exits */
10587c478bd9Sstevel@tonic-gate 	t->t_next->t_prev =