17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59acbbeafSnn * Common Development and Distribution License (the "License"). 69acbbeafSnn * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2197eda132Sraf 227c478bd9Sstevel@tonic-gate /* 236eb30ec3SRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 307c478bd9Sstevel@tonic-gate #include <sys/systm.h> 317c478bd9Sstevel@tonic-gate #include <sys/thread.h> 327c478bd9Sstevel@tonic-gate #include <sys/proc.h> 337c478bd9Sstevel@tonic-gate #include <sys/task.h> 347c478bd9Sstevel@tonic-gate #include <sys/project.h> 357c478bd9Sstevel@tonic-gate #include <sys/signal.h> 367c478bd9Sstevel@tonic-gate #include <sys/errno.h> 377c478bd9Sstevel@tonic-gate #include <sys/vmparam.h> 387c478bd9Sstevel@tonic-gate #include <sys/stack.h> 397c478bd9Sstevel@tonic-gate #include <sys/procfs.h> 407c478bd9Sstevel@tonic-gate #include <sys/prsystm.h> 417c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 427c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 437c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 447c478bd9Sstevel@tonic-gate #include <sys/door.h> 457c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 467c478bd9Sstevel@tonic-gate #include <sys/debug.h> 477c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 487c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 497c478bd9Sstevel@tonic-gate #include <sys/poll.h> 507c478bd9Sstevel@tonic-gate #include <sys/copyops.h> 517c478bd9Sstevel@tonic-gate #include <sys/lwp_upimutex_impl.h> 527c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 537c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 547c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 557c478bd9Sstevel@tonic-gate #include <sys/contract_impl.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 577c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 587c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 599acbbeafSnn #include <sys/brand.h> 60e0cf54a5SRoger A. Faulkner #include <sys/cyclic.h> 61936e3a33SGangadhar Mylapuram #include <sys/pool.h> 627c478bd9Sstevel@tonic-gate 636eb30ec3SRoger A. Faulkner /* hash function for the lwpid hash table, p->p_tidhash[] */ 646eb30ec3SRoger A. Faulkner #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 656eb30ec3SRoger A. Faulkner 667c478bd9Sstevel@tonic-gate void *segkp_lwp; /* cookie for pool of segkp resources */ 67575a7426Spt extern void reapq_move_lq_to_tq(kthread_t *); 68575a7426Spt extern void freectx_ctx(struct ctxop *); 697c478bd9Sstevel@tonic-gate 70*35a5a358SJonathan Adams /* 71*35a5a358SJonathan Adams * Create a kernel thread associated with a particular system process. Give 72*35a5a358SJonathan Adams * it an LWP so that microstate accounting will be available for it. 73*35a5a358SJonathan Adams */ 74*35a5a358SJonathan Adams kthread_t * 75*35a5a358SJonathan Adams lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri) 76*35a5a358SJonathan Adams { 77*35a5a358SJonathan Adams klwp_t *lwp; 78*35a5a358SJonathan Adams 79*35a5a358SJonathan Adams VERIFY((p->p_flag & SSYS) != 0); 80*35a5a358SJonathan Adams 81*35a5a358SJonathan Adams lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0); 82*35a5a358SJonathan Adams 83*35a5a358SJonathan Adams VERIFY(lwp != NULL); 84*35a5a358SJonathan Adams 85*35a5a358SJonathan Adams return (lwptot(lwp)); 86*35a5a358SJonathan Adams } 87*35a5a358SJonathan Adams 887c478bd9Sstevel@tonic-gate /* 897c478bd9Sstevel@tonic-gate * Create a thread that appears to be stopped at sys_rtt. 907c478bd9Sstevel@tonic-gate */ 917c478bd9Sstevel@tonic-gate klwp_t * 927c478bd9Sstevel@tonic-gate lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 937c478bd9Sstevel@tonic-gate int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 947c478bd9Sstevel@tonic-gate { 957c478bd9Sstevel@tonic-gate klwp_t *lwp = NULL; 967c478bd9Sstevel@tonic-gate kthread_t *t; 977c478bd9Sstevel@tonic-gate kthread_t *tx; 987c478bd9Sstevel@tonic-gate cpupart_t *oldpart = NULL; 997c478bd9Sstevel@tonic-gate size_t stksize; 1007c478bd9Sstevel@tonic-gate caddr_t lwpdata = NULL; 1017c478bd9Sstevel@tonic-gate processorid_t binding; 1027c478bd9Sstevel@tonic-gate int err = 0; 1037c478bd9Sstevel@tonic-gate kproject_t *oldkpj, *newkpj; 1047c478bd9Sstevel@tonic-gate void *bufp = NULL; 105*35a5a358SJonathan Adams klwp_t *curlwp; 1067c478bd9Sstevel@tonic-gate lwpent_t *lep; 1077c478bd9Sstevel@tonic-gate lwpdir_t *old_dir = NULL; 1087c478bd9Sstevel@tonic-gate uint_t old_dirsz = 0; 1096eb30ec3SRoger A. Faulkner tidhash_t *old_hash = NULL; 1107c478bd9Sstevel@tonic-gate uint_t old_hashsz = 0; 1116eb30ec3SRoger A. Faulkner ret_tidhash_t *ret_tidhash = NULL; 1127c478bd9Sstevel@tonic-gate int i; 1137c478bd9Sstevel@tonic-gate int rctlfail = 0; 1149acbbeafSnn boolean_t branded = 0; 115575a7426Spt struct ctxop *ctx = NULL; 1167c478bd9Sstevel@tonic-gate 117*35a5a358SJonathan Adams ASSERT(cid != sysdccid); /* system threads must start in SYS */ 118*35a5a358SJonathan Adams 119*35a5a358SJonathan Adams ASSERT(p != &p0); /* No new LWPs in p0. */ 120*35a5a358SJonathan Adams 1217c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1227c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * don't enforce rctl limits on system processes 1257c478bd9Sstevel@tonic-gate */ 126*35a5a358SJonathan Adams if (!CLASS_KERNEL(cid)) { 1277c478bd9Sstevel@tonic-gate if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 1287c478bd9Sstevel@tonic-gate if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 1297c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1307c478bd9Sstevel@tonic-gate rctlfail = 1; 1317c478bd9Sstevel@tonic-gate if (p->p_task->tk_proj->kpj_nlwps >= 1327c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps_ctl) 1337c478bd9Sstevel@tonic-gate if (rctl_test(rc_project_nlwps, 1347c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_rctls, p, 1, 0) 1357c478bd9Sstevel@tonic-gate & RCT_DENY) 1367c478bd9Sstevel@tonic-gate rctlfail = 1; 1377c478bd9Sstevel@tonic-gate if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 1387c478bd9Sstevel@tonic-gate if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 1397c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1407c478bd9Sstevel@tonic-gate rctlfail = 1; 1417c478bd9Sstevel@tonic-gate } 1427c478bd9Sstevel@tonic-gate if (rctlfail) { 1437c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1447c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1457c478bd9Sstevel@tonic-gate return (NULL); 1467c478bd9Sstevel@tonic-gate } 1477c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps++; 1487c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps++; 1497c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps++; 1507c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1517c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1527c478bd9Sstevel@tonic-gate 153*35a5a358SJonathan Adams if (CLASS_KERNEL(cid)) { 154*35a5a358SJonathan Adams curlwp = NULL; /* don't inherit from curlwp */ 1557c478bd9Sstevel@tonic-gate stksize = lwp_default_stksize; 156*35a5a358SJonathan Adams } else { 157*35a5a358SJonathan Adams curlwp = ttolwp(curthread); 158*35a5a358SJonathan Adams if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 159*35a5a358SJonathan Adams stksize = lwp_default_stksize; 160*35a5a358SJonathan Adams } 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate /* 163*35a5a358SJonathan Adams * For system threads, we sleep for our swap reservation, and the 164*35a5a358SJonathan Adams * thread stack can't be swapped. 165*35a5a358SJonathan Adams * 166*35a5a358SJonathan Adams * Otherwise, try to reclaim a <lwp,stack> from 'deathrow' 1677c478bd9Sstevel@tonic-gate */ 168*35a5a358SJonathan Adams if (CLASS_KERNEL(cid)) { 169*35a5a358SJonathan Adams lwpdata = (caddr_t)segkp_get(segkp, stksize, 170*35a5a358SJonathan Adams (KPD_NO_ANON | KPD_HASREDZONE | KPD_LOCKED)); 171*35a5a358SJonathan Adams 172*35a5a358SJonathan Adams } else if (stksize == lwp_default_stksize) { 1737c478bd9Sstevel@tonic-gate if (lwp_reapcnt > 0) { 1747c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 1757c478bd9Sstevel@tonic-gate if ((t = lwp_deathrow) != NULL) { 1767c478bd9Sstevel@tonic-gate ASSERT(t->t_swap); 1777c478bd9Sstevel@tonic-gate lwp_deathrow = t->t_forw; 1787c478bd9Sstevel@tonic-gate lwp_reapcnt--; 1797c478bd9Sstevel@tonic-gate lwpdata = t->t_swap; 1807c478bd9Sstevel@tonic-gate lwp = t->t_lwp; 181575a7426Spt ctx = t->t_ctx; 1827c478bd9Sstevel@tonic-gate t->t_swap = NULL; 1837c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 184575a7426Spt t->t_ctx = NULL; 185575a7426Spt reapq_move_lq_to_tq(t); 186575a7426Spt } 187575a7426Spt mutex_exit(&reaplock); 188575a7426Spt if (lwp != NULL) { 189575a7426Spt lwp_stk_fini(lwp); 190575a7426Spt } 191575a7426Spt if (ctx != NULL) { 192575a7426Spt freectx_ctx(ctx); 1937c478bd9Sstevel@tonic-gate } 1947c478bd9Sstevel@tonic-gate } 1957c478bd9Sstevel@tonic-gate if (lwpdata == NULL && 1967c478bd9Sstevel@tonic-gate (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 1977c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1987c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1997c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 2007c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 2017c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 2027c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 2037c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 2047c478bd9Sstevel@tonic-gate return (NULL); 2057c478bd9Sstevel@tonic-gate } 2067c478bd9Sstevel@tonic-gate } else { 2077c478bd9Sstevel@tonic-gate stksize = roundup(stksize, PAGESIZE); 2087c478bd9Sstevel@tonic-gate if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 2097c478bd9Sstevel@tonic-gate (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 2107c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2117c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 2127c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 2137c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 2147c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 2157c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 2167c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 2177c478bd9Sstevel@tonic-gate return (NULL); 2187c478bd9Sstevel@tonic-gate } 2197c478bd9Sstevel@tonic-gate } 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate /* 2227c478bd9Sstevel@tonic-gate * Create a thread, initializing the stack pointer 2237c478bd9Sstevel@tonic-gate */ 2247c478bd9Sstevel@tonic-gate t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate t->t_swap = lwpdata; /* Start of page-able data */ 2277c478bd9Sstevel@tonic-gate if (lwp == NULL) 2287c478bd9Sstevel@tonic-gate lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 2297c478bd9Sstevel@tonic-gate bzero(lwp, sizeof (*lwp)); 2307c478bd9Sstevel@tonic-gate t->t_lwp = lwp; 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate t->t_hold = *smask; 2337c478bd9Sstevel@tonic-gate lwp->lwp_thread = t; 2347c478bd9Sstevel@tonic-gate lwp->lwp_procp = p; 2357c478bd9Sstevel@tonic-gate lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 2367c478bd9Sstevel@tonic-gate if (curlwp != NULL && curlwp->lwp_childstksz != 0) 2377c478bd9Sstevel@tonic-gate lwp->lwp_childstksz = curlwp->lwp_childstksz; 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate t->t_stk = lwp_stk_init(lwp, t->t_stk); 2407c478bd9Sstevel@tonic-gate thread_load(t, proc, arg, len); 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate /* 2437c478bd9Sstevel@tonic-gate * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 2447c478bd9Sstevel@tonic-gate */ 245e0cf54a5SRoger A. Faulkner if (p->p_rprof_cyclic != CYCLIC_NONE) 2467c478bd9Sstevel@tonic-gate t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate if (cid != NOCLASS) 2497c478bd9Sstevel@tonic-gate (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 2507c478bd9Sstevel@tonic-gate 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Allocate an lwp directory entry for the new lwp. 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2577c478bd9Sstevel@tonic-gate grow: 2587c478bd9Sstevel@tonic-gate /* 2597c478bd9Sstevel@tonic-gate * Grow the lwp (thread) directory and lwpid hash table if necessary. 2607c478bd9Sstevel@tonic-gate * A note on the growth algorithm: 2617c478bd9Sstevel@tonic-gate * The new lwp directory size is computed as: 2627c478bd9Sstevel@tonic-gate * new = 2 * old + 2 2637c478bd9Sstevel@tonic-gate * Starting with an initial size of 2 (see exec_common()), 2647c478bd9Sstevel@tonic-gate * this yields numbers that are a power of two minus 2: 2657c478bd9Sstevel@tonic-gate * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 2667c478bd9Sstevel@tonic-gate * The size of the lwpid hash table must be a power of two 2677c478bd9Sstevel@tonic-gate * and must be commensurate in size with the lwp directory 2687c478bd9Sstevel@tonic-gate * so that hash bucket chains remain short. Therefore, 2697c478bd9Sstevel@tonic-gate * the lwpid hash table size is computed as: 2707c478bd9Sstevel@tonic-gate * hashsz = (dirsz + 2) / 2 2717c478bd9Sstevel@tonic-gate * which leads to these hash table sizes corresponding to 2727c478bd9Sstevel@tonic-gate * the above directory sizes: 2737c478bd9Sstevel@tonic-gate * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 2746eb30ec3SRoger A. Faulkner * A note on growing the hash table: 2756eb30ec3SRoger A. Faulkner * For performance reasons, code in lwp_unpark() does not 2766eb30ec3SRoger A. Faulkner * acquire curproc->p_lock when searching the hash table. 2776eb30ec3SRoger A. Faulkner * Rather, it calls lwp_hash_lookup_and_lock() which 2786eb30ec3SRoger A. Faulkner * acquires only the individual hash bucket lock, taking 2796eb30ec3SRoger A. Faulkner * care to deal with reallocation of the hash table 2806eb30ec3SRoger A. Faulkner * during the time it takes to acquire the lock. 2816eb30ec3SRoger A. Faulkner * 2826eb30ec3SRoger A. Faulkner * This is sufficient to protect the integrity of the 2836eb30ec3SRoger A. Faulkner * hash table, but it requires us to acquire all of the 2846eb30ec3SRoger A. Faulkner * old hash bucket locks before growing the hash table 2856eb30ec3SRoger A. Faulkner * and to release them afterwards. It also requires us 2866eb30ec3SRoger A. Faulkner * not to free the old hash table because some thread 2876eb30ec3SRoger A. Faulkner * in lwp_hash_lookup_and_lock() might still be trying 2886eb30ec3SRoger A. Faulkner * to acquire the old bucket lock. 2896eb30ec3SRoger A. Faulkner * 2906eb30ec3SRoger A. Faulkner * So we adopt the tactic of keeping all of the retired 2916eb30ec3SRoger A. Faulkner * hash tables on a linked list, so they can be safely 2926eb30ec3SRoger A. Faulkner * freed when the process exits or execs. 2936eb30ec3SRoger A. Faulkner * 2946eb30ec3SRoger A. Faulkner * Because the hash table grows in powers of two, the 2956eb30ec3SRoger A. Faulkner * total size of all of the hash tables will be slightly 2966eb30ec3SRoger A. Faulkner * less than twice the size of the largest hash table. 2977c478bd9Sstevel@tonic-gate */ 2987c478bd9Sstevel@tonic-gate while (p->p_lwpfree == NULL) { 2997c478bd9Sstevel@tonic-gate uint_t dirsz = p->p_lwpdir_sz; 3007c478bd9Sstevel@tonic-gate lwpdir_t *new_dir; 3016eb30ec3SRoger A. Faulkner uint_t new_dirsz; 3027c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 3036eb30ec3SRoger A. Faulkner tidhash_t *new_hash; 3046eb30ec3SRoger A. Faulkner uint_t new_hashsz; 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 3077c478bd9Sstevel@tonic-gate 3086eb30ec3SRoger A. Faulkner /* 3096eb30ec3SRoger A. Faulkner * Prepare to remember the old p_tidhash for later 3106eb30ec3SRoger A. Faulkner * kmem_free()ing when the process exits or execs. 3116eb30ec3SRoger A. Faulkner */ 3126eb30ec3SRoger A. Faulkner if (ret_tidhash == NULL) 3136eb30ec3SRoger A. Faulkner ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 3146eb30ec3SRoger A. Faulkner KM_SLEEP); 3156eb30ec3SRoger A. Faulkner if (old_dir != NULL) 3167c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 3176eb30ec3SRoger A. Faulkner if (old_hash != NULL) 3187c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 3196eb30ec3SRoger A. Faulkner 3207c478bd9Sstevel@tonic-gate new_dirsz = 2 * dirsz + 2; 3217c478bd9Sstevel@tonic-gate new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 3227c478bd9Sstevel@tonic-gate for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 3237c478bd9Sstevel@tonic-gate ldp->ld_next = ldp + 1; 3247c478bd9Sstevel@tonic-gate new_hashsz = (new_dirsz + 2) / 2; 3256eb30ec3SRoger A. Faulkner new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 326575a7426Spt KM_SLEEP); 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 3297c478bd9Sstevel@tonic-gate if (p == curproc) 3307c478bd9Sstevel@tonic-gate prbarrier(p); 3317c478bd9Sstevel@tonic-gate 3327c478bd9Sstevel@tonic-gate if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 3337c478bd9Sstevel@tonic-gate /* 3347c478bd9Sstevel@tonic-gate * Someone else beat us to it or some lwp exited. 3357c478bd9Sstevel@tonic-gate * Set up to free our memory and take a lap. 3367c478bd9Sstevel@tonic-gate */ 3377c478bd9Sstevel@tonic-gate old_dir = new_dir; 3387c478bd9Sstevel@tonic-gate old_dirsz = new_dirsz; 3397c478bd9Sstevel@tonic-gate old_hash = new_hash; 3407c478bd9Sstevel@tonic-gate old_hashsz = new_hashsz; 3417c478bd9Sstevel@tonic-gate } else { 3426eb30ec3SRoger A. Faulkner /* 3436eb30ec3SRoger A. Faulkner * For the benefit of lwp_hash_lookup_and_lock(), 3446eb30ec3SRoger A. Faulkner * called from lwp_unpark(), which searches the 3456eb30ec3SRoger A. Faulkner * tid hash table without acquiring p->p_lock, 3466eb30ec3SRoger A. Faulkner * we must acquire all of the tid hash table 3476eb30ec3SRoger A. Faulkner * locks before replacing p->p_tidhash. 3486eb30ec3SRoger A. Faulkner */ 3497c478bd9Sstevel@tonic-gate old_hash = p->p_tidhash; 3507c478bd9Sstevel@tonic-gate old_hashsz = p->p_tidhash_sz; 3516eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 3526eb30ec3SRoger A. Faulkner mutex_enter(&old_hash[i].th_lock); 3536eb30ec3SRoger A. Faulkner mutex_enter(&new_hash[i].th_lock); 3546eb30ec3SRoger A. Faulkner } 3556eb30ec3SRoger A. Faulkner 3567c478bd9Sstevel@tonic-gate /* 3577c478bd9Sstevel@tonic-gate * We simply hash in all of the old directory entries. 3587c478bd9Sstevel@tonic-gate * This works because the old directory has no empty 3597c478bd9Sstevel@tonic-gate * slots and the new hash table starts out empty. 3607c478bd9Sstevel@tonic-gate * This reproduces the original directory ordering 3617c478bd9Sstevel@tonic-gate * (required for /proc directory semantics). 3627c478bd9Sstevel@tonic-gate */ 3636eb30ec3SRoger A. Faulkner old_dir = p->p_lwpdir; 3646eb30ec3SRoger A. Faulkner old_dirsz = p->p_lwpdir_sz; 3656eb30ec3SRoger A. Faulkner p->p_lwpdir = new_dir; 3666eb30ec3SRoger A. Faulkner p->p_lwpfree = new_dir; 3676eb30ec3SRoger A. Faulkner p->p_lwpdir_sz = new_dirsz; 3686eb30ec3SRoger A. Faulkner for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 3696eb30ec3SRoger A. Faulkner lwp_hash_in(p, ldp->ld_entry, 3706eb30ec3SRoger A. Faulkner new_hash, new_hashsz, 0); 3716eb30ec3SRoger A. Faulkner 3726eb30ec3SRoger A. Faulkner /* 3736eb30ec3SRoger A. Faulkner * Remember the old hash table along with all 3746eb30ec3SRoger A. Faulkner * of the previously-remembered hash tables. 3756eb30ec3SRoger A. Faulkner * We will free them at process exit or exec. 3766eb30ec3SRoger A. Faulkner */ 3776eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash = old_hash; 3786eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash_sz = old_hashsz; 3796eb30ec3SRoger A. Faulkner ret_tidhash->rth_next = p->p_ret_tidhash; 3806eb30ec3SRoger A. Faulkner p->p_ret_tidhash = ret_tidhash; 3816eb30ec3SRoger A. Faulkner 3827c478bd9Sstevel@tonic-gate /* 3836eb30ec3SRoger A. Faulkner * Now establish the new tid hash table. 3846eb30ec3SRoger A. Faulkner * As soon as we assign p->p_tidhash, 3856eb30ec3SRoger A. Faulkner * code in lwp_unpark() can start using it. 3867c478bd9Sstevel@tonic-gate */ 3876eb30ec3SRoger A. Faulkner membar_producer(); 3886eb30ec3SRoger A. Faulkner p->p_tidhash = new_hash; 3896eb30ec3SRoger A. Faulkner 3906eb30ec3SRoger A. Faulkner /* 3916eb30ec3SRoger A. Faulkner * It is necessary that p_tidhash reach global 3926eb30ec3SRoger A. Faulkner * visibility before p_tidhash_sz. Otherwise, 3936eb30ec3SRoger A. Faulkner * code in lwp_hash_lookup_and_lock() could 3946eb30ec3SRoger A. Faulkner * index into the old p_tidhash using the new 3956eb30ec3SRoger A. Faulkner * p_tidhash_sz and thereby access invalid data. 3966eb30ec3SRoger A. Faulkner */ 3976eb30ec3SRoger A. Faulkner membar_producer(); 3986eb30ec3SRoger A. Faulkner p->p_tidhash_sz = new_hashsz; 3996eb30ec3SRoger A. Faulkner 4006eb30ec3SRoger A. Faulkner /* 4016eb30ec3SRoger A. Faulkner * Release the locks; allow lwp_unpark() to carry on. 4026eb30ec3SRoger A. Faulkner */ 4036eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 4046eb30ec3SRoger A. Faulkner mutex_exit(&old_hash[i].th_lock); 4056eb30ec3SRoger A. Faulkner mutex_exit(&new_hash[i].th_lock); 4066eb30ec3SRoger A. Faulkner } 4076eb30ec3SRoger A. Faulkner 4086eb30ec3SRoger A. Faulkner /* 4096eb30ec3SRoger A. Faulkner * Avoid freeing these objects below. 4106eb30ec3SRoger A. Faulkner */ 4116eb30ec3SRoger A. Faulkner ret_tidhash = NULL; 4126eb30ec3SRoger A. Faulkner old_hash = NULL; 4136eb30ec3SRoger A. Faulkner old_hashsz = 0; 4147c478bd9Sstevel@tonic-gate } 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate /* 4187c478bd9Sstevel@tonic-gate * Block the process against /proc while we manipulate p->p_tlist, 4197c478bd9Sstevel@tonic-gate * unless lwp_create() was called by /proc for the PCAGENT operation. 4207c478bd9Sstevel@tonic-gate * We want to do this early enough so that we don't drop p->p_lock 4217c478bd9Sstevel@tonic-gate * until the thread is put on the p->p_tlist. 4227c478bd9Sstevel@tonic-gate */ 4237c478bd9Sstevel@tonic-gate if (p == curproc) { 4247c478bd9Sstevel@tonic-gate prbarrier(p); 4257c478bd9Sstevel@tonic-gate /* 4267c478bd9Sstevel@tonic-gate * If the current lwp has been requested to stop, do so now. 4277c478bd9Sstevel@tonic-gate * Otherwise we have a race condition between /proc attempting 4287c478bd9Sstevel@tonic-gate * to stop the process and this thread creating a new lwp 4297c478bd9Sstevel@tonic-gate * that was not seen when the /proc PCSTOP request was issued. 4307c478bd9Sstevel@tonic-gate * We rely on stop() to call prbarrier(p) before returning. 4317c478bd9Sstevel@tonic-gate */ 4327c478bd9Sstevel@tonic-gate while ((curthread->t_proc_flag & TP_PRSTOP) && 433936e3a33SGangadhar Mylapuram !ttolwp(curthread)->lwp_nostop) { 434936e3a33SGangadhar Mylapuram /* 435936e3a33SGangadhar Mylapuram * We called pool_barrier_enter() before calling 436936e3a33SGangadhar Mylapuram * here to lwp_create(). We have to call 437936e3a33SGangadhar Mylapuram * pool_barrier_exit() before stopping. 438936e3a33SGangadhar Mylapuram */ 439936e3a33SGangadhar Mylapuram pool_barrier_exit(); 440936e3a33SGangadhar Mylapuram prbarrier(p); 4417c478bd9Sstevel@tonic-gate stop(PR_REQUESTED, 0); 442936e3a33SGangadhar Mylapuram /* 443936e3a33SGangadhar Mylapuram * And we have to repeat the call to 444936e3a33SGangadhar Mylapuram * pool_barrier_enter after stopping. 445936e3a33SGangadhar Mylapuram */ 446936e3a33SGangadhar Mylapuram pool_barrier_enter(); 447936e3a33SGangadhar Mylapuram prbarrier(p); 448936e3a33SGangadhar Mylapuram } 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate /* 4517c478bd9Sstevel@tonic-gate * If process is exiting, there could be a race between 4527c478bd9Sstevel@tonic-gate * the agent lwp creation and the new lwp currently being 4537c478bd9Sstevel@tonic-gate * created. So to prevent this race lwp creation is failed 4547c478bd9Sstevel@tonic-gate * if the process is exiting. 4557c478bd9Sstevel@tonic-gate */ 4567c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS|SKILLED)) { 4577c478bd9Sstevel@tonic-gate err = 1; 4587c478bd9Sstevel@tonic-gate goto error; 4597c478bd9Sstevel@tonic-gate } 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate /* 4627c478bd9Sstevel@tonic-gate * Since we might have dropped p->p_lock, the 4637c478bd9Sstevel@tonic-gate * lwp directory free list might have changed. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate if (p->p_lwpfree == NULL) 4667c478bd9Sstevel@tonic-gate goto grow; 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate kpreempt_disable(); /* can't grab cpu_lock here */ 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate /* 472*35a5a358SJonathan Adams * Inherit processor and processor set bindings from curthread. 473*35a5a358SJonathan Adams * 474*35a5a358SJonathan Adams * For kernel LWPs, we do not inherit processor set bindings at 475*35a5a358SJonathan Adams * process creation time (i.e. when p != curproc). After the 476*35a5a358SJonathan Adams * kernel process is created, any subsequent LWPs must be created 477*35a5a358SJonathan Adams * by threads in the kernel process, at which point we *will* 478*35a5a358SJonathan Adams * inherit processor set bindings. 4797c478bd9Sstevel@tonic-gate */ 480*35a5a358SJonathan Adams if (CLASS_KERNEL(cid) && p != curproc) { 4817c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding = PBIND_NONE; 4827c478bd9Sstevel@tonic-gate t->t_cpupart = oldpart = &cp_default; 4837c478bd9Sstevel@tonic-gate t->t_bind_pset = PS_NONE; 4840b70c467Sakolb t->t_bindflag = (uchar_t)default_binding_mode; 4857c478bd9Sstevel@tonic-gate } else { 4867c478bd9Sstevel@tonic-gate binding = curthread->t_bind_cpu; 4877c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding; 4887c478bd9Sstevel@tonic-gate oldpart = t->t_cpupart; 4897c478bd9Sstevel@tonic-gate t->t_cpupart = curthread->t_cpupart; 4907c478bd9Sstevel@tonic-gate t->t_bind_pset = curthread->t_bind_pset; 4910b70c467Sakolb t->t_bindflag = curthread->t_bindflag | 4920b70c467Sakolb (uchar_t)default_binding_mode; 4937c478bd9Sstevel@tonic-gate } 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate /* 4967c478bd9Sstevel@tonic-gate * thread_create() initializes this thread's home lgroup to the root. 4977c478bd9Sstevel@tonic-gate * Choose a more suitable lgroup, since this thread is associated 4987c478bd9Sstevel@tonic-gate * with an lwp. 4997c478bd9Sstevel@tonic-gate */ 5007c478bd9Sstevel@tonic-gate ASSERT(oldpart != NULL); 5017c478bd9Sstevel@tonic-gate if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 5027c478bd9Sstevel@tonic-gate t->t_bound_cpu = cpu[binding]; 5037c478bd9Sstevel@tonic-gate if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 5047c478bd9Sstevel@tonic-gate lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 5057c478bd9Sstevel@tonic-gate } else { 5067c478bd9Sstevel@tonic-gate lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 5077c478bd9Sstevel@tonic-gate } 5087c478bd9Sstevel@tonic-gate 5097c478bd9Sstevel@tonic-gate kpreempt_enable(); 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate /* 5127c478bd9Sstevel@tonic-gate * make sure lpl points to our own partition 5137c478bd9Sstevel@tonic-gate */ 5147c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 5157c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 5167c478bd9Sstevel@tonic-gate t->t_cpupart->cp_nlgrploads); 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate /* 5197c478bd9Sstevel@tonic-gate * If we're creating a new process, then inherit the project from our 5207c478bd9Sstevel@tonic-gate * parent. If we're only creating an additional lwp then use the 5217c478bd9Sstevel@tonic-gate * project pointer of the target process. 5227c478bd9Sstevel@tonic-gate */ 5237c478bd9Sstevel@tonic-gate if (p->p_task == NULL) 5247c478bd9Sstevel@tonic-gate newkpj = ttoproj(curthread); 5257c478bd9Sstevel@tonic-gate else 5267c478bd9Sstevel@tonic-gate newkpj = p->p_task->tk_proj; 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate /* 5297c478bd9Sstevel@tonic-gate * It is safe to point the thread to the new project without holding it 5307c478bd9Sstevel@tonic-gate * since we're holding the target process' p_lock here and therefore 5317c478bd9Sstevel@tonic-gate * we're guaranteed that it will not move to another project. 5327c478bd9Sstevel@tonic-gate */ 5337c478bd9Sstevel@tonic-gate oldkpj = ttoproj(t); 5347c478bd9Sstevel@tonic-gate if (newkpj != oldkpj) { 5357c478bd9Sstevel@tonic-gate t->t_proj = newkpj; 5367c478bd9Sstevel@tonic-gate (void) project_hold(newkpj); 5377c478bd9Sstevel@tonic-gate project_rele(oldkpj); 5387c478bd9Sstevel@tonic-gate } 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate if (cid != NOCLASS) { 5417c478bd9Sstevel@tonic-gate /* 5427c478bd9Sstevel@tonic-gate * If the lwp is being created in the current process 5437c478bd9Sstevel@tonic-gate * and matches the current thread's scheduling class, 5447c478bd9Sstevel@tonic-gate * we should propagate the current thread's scheduling 5457c478bd9Sstevel@tonic-gate * parameters by calling CL_FORK. Otherwise just use 5467c478bd9Sstevel@tonic-gate * the defaults by calling CL_ENTERCLASS. 5477c478bd9Sstevel@tonic-gate */ 5487c478bd9Sstevel@tonic-gate if (p != curproc || curthread->t_cid != cid) { 5497c478bd9Sstevel@tonic-gate err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 5507c478bd9Sstevel@tonic-gate t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 551d4204c85Sraf /* 552d4204c85Sraf * We don't call schedctl_set_cidpri(t) here 553d4204c85Sraf * because the schedctl data is not yet set 554d4204c85Sraf * up for the newly-created lwp. 555d4204c85Sraf */ 5567c478bd9Sstevel@tonic-gate } else { 5577c478bd9Sstevel@tonic-gate t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 5587c478bd9Sstevel@tonic-gate err = CL_FORK(curthread, t, bufp); 5597c478bd9Sstevel@tonic-gate t->t_cid = cid; 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate if (err) 5627c478bd9Sstevel@tonic-gate goto error; 5637c478bd9Sstevel@tonic-gate else 5647c478bd9Sstevel@tonic-gate bufp = NULL; 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate /* 5687c478bd9Sstevel@tonic-gate * If we were given an lwpid then use it, else allocate one. 5697c478bd9Sstevel@tonic-gate */ 5707c478bd9Sstevel@tonic-gate if (lwpid != 0) 5717c478bd9Sstevel@tonic-gate t->t_tid = lwpid; 5727c478bd9Sstevel@tonic-gate else { 5737c478bd9Sstevel@tonic-gate /* 5747c478bd9Sstevel@tonic-gate * lwp/thread id 0 is never valid; reserved for special checks. 5757c478bd9Sstevel@tonic-gate * lwp/thread id 1 is reserved for the main thread. 5767c478bd9Sstevel@tonic-gate * Start again at 2 when INT_MAX has been reached 5777c478bd9Sstevel@tonic-gate * (id_t is a signed 32-bit integer). 5787c478bd9Sstevel@tonic-gate */ 5797c478bd9Sstevel@tonic-gate id_t prev_id = p->p_lwpid; /* last allocated tid */ 5807c478bd9Sstevel@tonic-gate 5817c478bd9Sstevel@tonic-gate do { /* avoid lwpid duplication */ 5827c478bd9Sstevel@tonic-gate if (p->p_lwpid == INT_MAX) { 5837c478bd9Sstevel@tonic-gate p->p_flag |= SLWPWRAP; 5847c478bd9Sstevel@tonic-gate p->p_lwpid = 1; 5857c478bd9Sstevel@tonic-gate } 5867c478bd9Sstevel@tonic-gate if ((t->t_tid = ++p->p_lwpid) == prev_id) { 5877c478bd9Sstevel@tonic-gate /* 5887c478bd9Sstevel@tonic-gate * All lwpids are allocated; fail the request. 5897c478bd9Sstevel@tonic-gate */ 5907c478bd9Sstevel@tonic-gate err = 1; 5917c478bd9Sstevel@tonic-gate goto error; 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate /* 5947c478bd9Sstevel@tonic-gate * We only need to worry about colliding with an id 5957c478bd9Sstevel@tonic-gate * that's already in use if this process has 5967c478bd9Sstevel@tonic-gate * cycled through all available lwp ids. 5977c478bd9Sstevel@tonic-gate */ 5987c478bd9Sstevel@tonic-gate if ((p->p_flag & SLWPWRAP) == 0) 5997c478bd9Sstevel@tonic-gate break; 6007c478bd9Sstevel@tonic-gate } while (lwp_hash_lookup(p, t->t_tid) != NULL); 6017c478bd9Sstevel@tonic-gate } 6029acbbeafSnn 6039acbbeafSnn /* 6049acbbeafSnn * If this is a branded process, let the brand do any necessary lwp 6059acbbeafSnn * initialization. 6069acbbeafSnn */ 6079acbbeafSnn if (PROC_IS_BRANDED(p)) { 6089acbbeafSnn if (BROP(p)->b_initlwp(lwp)) { 6099acbbeafSnn err = 1; 6109acbbeafSnn goto error; 6119acbbeafSnn } 6129acbbeafSnn branded = 1; 6139acbbeafSnn } 6149acbbeafSnn 6152cb27123Saguzovsk if (t->t_tid == 1) { 6162cb27123Saguzovsk kpreempt_disable(); 6172cb27123Saguzovsk ASSERT(t->t_lpl != NULL); 6182cb27123Saguzovsk p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 6192cb27123Saguzovsk kpreempt_enable(); 6202cb27123Saguzovsk if (p->p_tr_lgrpid != LGRP_NONE && 6212cb27123Saguzovsk p->p_tr_lgrpid != p->p_t1_lgrpid) { 6222cb27123Saguzovsk lgrp_update_trthr_migrations(1); 6232cb27123Saguzovsk } 6242cb27123Saguzovsk } 6252cb27123Saguzovsk 6267c478bd9Sstevel@tonic-gate p->p_lwpcnt++; 6277c478bd9Sstevel@tonic-gate t->t_waitfor = -1; 6287c478bd9Sstevel@tonic-gate 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * Turn microstate accounting on for thread if on for process. 6317c478bd9Sstevel@tonic-gate */ 6327c478bd9Sstevel@tonic-gate if (p->p_flag & SMSACCT) 6337c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_MSACCT; 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate /* 6367c478bd9Sstevel@tonic-gate * If the process has watchpoints, mark the new thread as such. 6377c478bd9Sstevel@tonic-gate */ 6387c478bd9Sstevel@tonic-gate if (pr_watch_active(p)) 6397c478bd9Sstevel@tonic-gate watch_enable(t); 6407c478bd9Sstevel@tonic-gate 6417c478bd9Sstevel@tonic-gate /* 6427c478bd9Sstevel@tonic-gate * The lwp is being created in the stopped state. 6437c478bd9Sstevel@tonic-gate * We set all the necessary flags to indicate that fact here. 6447c478bd9Sstevel@tonic-gate * We omit the TS_CREATE flag from t_schedflag so that the lwp 6457c478bd9Sstevel@tonic-gate * cannot be set running until the caller is finished with it, 6467c478bd9Sstevel@tonic-gate * even if lwp_continue() is called on it after we drop p->p_lock. 6477c478bd9Sstevel@tonic-gate * When the caller is finished with the newly-created lwp, 6487c478bd9Sstevel@tonic-gate * the caller must call lwp_create_done() to allow the lwp 6497c478bd9Sstevel@tonic-gate * to be set running. If the TP_HOLDLWP is left set, the 6507c478bd9Sstevel@tonic-gate * lwp will suspend itself after reaching system call exit. 6517c478bd9Sstevel@tonic-gate */ 6527c478bd9Sstevel@tonic-gate init_mstate(t, LMS_STOPPED); 6537c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 6547c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 6557c478bd9Sstevel@tonic-gate t->t_whystop = PR_SUSPENDED; 6567c478bd9Sstevel@tonic-gate t->t_whatstop = SUSPEND_NORMAL; 6577c478bd9Sstevel@tonic-gate t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate /* 6607c478bd9Sstevel@tonic-gate * Set system call processing flags in case tracing or profiling 6617c478bd9Sstevel@tonic-gate * is set. The first system call will evaluate these and turn 6627c478bd9Sstevel@tonic-gate * them off if they aren't needed. 6637c478bd9Sstevel@tonic-gate */ 6647c478bd9Sstevel@tonic-gate t->t_pre_sys = 1; 6657c478bd9Sstevel@tonic-gate t->t_post_sys = 1; 6667c478bd9Sstevel@tonic-gate 6677c478bd9Sstevel@tonic-gate /* 6687c478bd9Sstevel@tonic-gate * Insert the new thread into the list of all threads. 6697c478bd9Sstevel@tonic-gate */ 6707c478bd9Sstevel@tonic-gate if ((tx = p->p_tlist) == NULL) { 6717c478bd9Sstevel@tonic-gate t->t_back = t; 6727c478bd9Sstevel@tonic-gate t->t_forw = t; 6737c478bd9Sstevel@tonic-gate p->p_tlist = t; 6747c478bd9Sstevel@tonic-gate } else { 6757c478bd9Sstevel@tonic-gate t->t_forw = tx; 6767c478bd9Sstevel@tonic-gate t->t_back = tx->t_back; 6777c478bd9Sstevel@tonic-gate tx->t_back->t_forw = t; 6787c478bd9Sstevel@tonic-gate tx->t_back = t; 6797c478bd9Sstevel@tonic-gate } 6807c478bd9Sstevel@tonic-gate 6817c478bd9Sstevel@tonic-gate /* 6827c478bd9Sstevel@tonic-gate * Insert the new lwp into an lwp directory slot position 6837c478bd9Sstevel@tonic-gate * and into the lwpid hash table. 6847c478bd9Sstevel@tonic-gate */ 6857c478bd9Sstevel@tonic-gate lep->le_thread = t; 6867c478bd9Sstevel@tonic-gate lep->le_lwpid = t->t_tid; 6877c478bd9Sstevel@tonic-gate lep->le_start = t->t_start; 6886eb30ec3SRoger A. Faulkner lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate if (state == TS_RUN) { 6917c478bd9Sstevel@tonic-gate /* 6927c478bd9Sstevel@tonic-gate * We set the new lwp running immediately. 6937c478bd9Sstevel@tonic-gate */ 6947c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 6957c478bd9Sstevel@tonic-gate lwp_create_done(t); 6967c478bd9Sstevel@tonic-gate } 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate error: 6997c478bd9Sstevel@tonic-gate if (err) { 700*35a5a358SJonathan Adams if (CLASS_KERNEL(cid)) { 701*35a5a358SJonathan Adams /* 702*35a5a358SJonathan Adams * This should only happen if a system process runs 703*35a5a358SJonathan Adams * out of lwpids, which shouldn't occur. 704*35a5a358SJonathan Adams */ 705*35a5a358SJonathan Adams panic("Failed to create a system LWP"); 706*35a5a358SJonathan Adams } 7077c478bd9Sstevel@tonic-gate /* 7087c478bd9Sstevel@tonic-gate * We have failed to create an lwp, so decrement the number 7097c478bd9Sstevel@tonic-gate * of lwps in the task and let the lgroup load averages know 7107c478bd9Sstevel@tonic-gate * that this thread isn't going to show up. 7117c478bd9Sstevel@tonic-gate */ 7127c478bd9Sstevel@tonic-gate kpreempt_disable(); 7137c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 7147c478bd9Sstevel@tonic-gate kpreempt_enable(); 7157c478bd9Sstevel@tonic-gate 7167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7177c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 7187c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 7197c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 7207c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 7217c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 7227c478bd9Sstevel@tonic-gate if (cid != NOCLASS && bufp != NULL) 7237c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 7247c478bd9Sstevel@tonic-gate 7259acbbeafSnn if (branded) 7269acbbeafSnn BROP(p)->b_freelwp(lwp); 7279acbbeafSnn 7287c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7297c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 7307c478bd9Sstevel@tonic-gate thread_rele(t); 7317c478bd9Sstevel@tonic-gate 7327c478bd9Sstevel@tonic-gate /* 7337c478bd9Sstevel@tonic-gate * We need to remove t from the list of all threads 7347c478bd9Sstevel@tonic-gate * because thread_exit()/lwp_exit() isn't called on t. 7357c478bd9Sstevel@tonic-gate */ 7367c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 7377c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 7387c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 7397c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 7407c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 7417c478bd9Sstevel@tonic-gate 7427c478bd9Sstevel@tonic-gate thread_free(t); 7437c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 7447c478bd9Sstevel@tonic-gate lwp = NULL; 7457c478bd9Sstevel@tonic-gate } else { 7467c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7477c478bd9Sstevel@tonic-gate } 7487c478bd9Sstevel@tonic-gate 7496eb30ec3SRoger A. Faulkner if (old_dir != NULL) 7507c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 7516eb30ec3SRoger A. Faulkner if (old_hash != NULL) 7527c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 7536eb30ec3SRoger A. Faulkner if (ret_tidhash != NULL) 7546eb30ec3SRoger A. Faulkner kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 7557c478bd9Sstevel@tonic-gate 7567c478bd9Sstevel@tonic-gate DTRACE_PROC1(lwp__create, kthread_t *, t); 7577c478bd9Sstevel@tonic-gate return (lwp); 7587c478bd9Sstevel@tonic-gate } 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate /* 7617c478bd9Sstevel@tonic-gate * lwp_create_done() is called by the caller of lwp_create() to set the 7627c478bd9Sstevel@tonic-gate * newly-created lwp running after the caller has finished manipulating it. 7637c478bd9Sstevel@tonic-gate */ 7647c478bd9Sstevel@tonic-gate void 7657c478bd9Sstevel@tonic-gate lwp_create_done(kthread_t *t) 7667c478bd9Sstevel@tonic-gate { 7677c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 7687c478bd9Sstevel@tonic-gate 7697c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 7737c478bd9Sstevel@tonic-gate * (The absence of the TS_CREATE flag prevents the lwp from running 7747c478bd9Sstevel@tonic-gate * until we are finished with it, even if lwp_continue() is called on 7757c478bd9Sstevel@tonic-gate * it by some other lwp in the process or elsewhere in the kernel.) 7767c478bd9Sstevel@tonic-gate */ 7777c478bd9Sstevel@tonic-gate thread_lock(t); 7787c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 7797c478bd9Sstevel@tonic-gate /* 7807c478bd9Sstevel@tonic-gate * If TS_CSTART is set, lwp_continue(t) has been called and 7817c478bd9Sstevel@tonic-gate * has already incremented p_lwprcnt; avoid doing this twice. 7827c478bd9Sstevel@tonic-gate */ 7837c478bd9Sstevel@tonic-gate if (!(t->t_schedflag & TS_CSTART)) 7847c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 7857c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_CSTART | TS_CREATE); 7867c478bd9Sstevel@tonic-gate setrun_locked(t); 7877c478bd9Sstevel@tonic-gate thread_unlock(t); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate /* 7917c478bd9Sstevel@tonic-gate * Copy an LWP's active templates, and clear the latest contracts. 7927c478bd9Sstevel@tonic-gate */ 7937c478bd9Sstevel@tonic-gate void 7947c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 7957c478bd9Sstevel@tonic-gate { 7967c478bd9Sstevel@tonic-gate int i; 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 7997c478bd9Sstevel@tonic-gate dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 8007c478bd9Sstevel@tonic-gate dst->lwp_ct_latest[i] = NULL; 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate } 8037c478bd9Sstevel@tonic-gate 8047c478bd9Sstevel@tonic-gate /* 8057c478bd9Sstevel@tonic-gate * Clear an LWP's contract template state. 8067c478bd9Sstevel@tonic-gate */ 8077c478bd9Sstevel@tonic-gate void 8087c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(klwp_t *lwp) 8097c478bd9Sstevel@tonic-gate { 8107c478bd9Sstevel@tonic-gate ct_template_t *tmpl; 8117c478bd9Sstevel@tonic-gate int i; 8127c478bd9Sstevel@tonic-gate 8137c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 8147c478bd9Sstevel@tonic-gate if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 8157c478bd9Sstevel@tonic-gate ctmpl_free(tmpl); 8167c478bd9Sstevel@tonic-gate lwp->lwp_ct_active[i] = NULL; 8177c478bd9Sstevel@tonic-gate } 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate if (lwp->lwp_ct_latest[i] != NULL) { 8207c478bd9Sstevel@tonic-gate contract_rele(lwp->lwp_ct_latest[i]); 8217c478bd9Sstevel@tonic-gate lwp->lwp_ct_latest[i] = NULL; 8227c478bd9Sstevel@tonic-gate } 8237c478bd9Sstevel@tonic-gate } 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate /* 8277c478bd9Sstevel@tonic-gate * Individual lwp exit. 8287c478bd9Sstevel@tonic-gate * If this is the last lwp, exit the whole process. 8297c478bd9Sstevel@tonic-gate */ 8307c478bd9Sstevel@tonic-gate void 8317c478bd9Sstevel@tonic-gate lwp_exit(void) 8327c478bd9Sstevel@tonic-gate { 8337c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 8347c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8357c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 8387c478bd9Sstevel@tonic-gate 8397c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate #if defined(__sparc) 8427c478bd9Sstevel@tonic-gate /* 8437c478bd9Sstevel@tonic-gate * Ensure that the user stack is fully abandoned.. 8447c478bd9Sstevel@tonic-gate */ 8457c478bd9Sstevel@tonic-gate trash_user_windows(); 8467c478bd9Sstevel@tonic-gate #endif 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate tsd_exit(); /* free thread specific data */ 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate kcpc_passivate(); /* Clean up performance counter state */ 8517c478bd9Sstevel@tonic-gate 8527c478bd9Sstevel@tonic-gate pollcleanup(); 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate if (t->t_door) 8557c478bd9Sstevel@tonic-gate door_slam(); 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate if (t->t_schedctl != NULL) 8587c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(t); 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate if (t->t_upimutex != NULL) 8617c478bd9Sstevel@tonic-gate upimutex_cleanup(); 8627c478bd9Sstevel@tonic-gate 8639acbbeafSnn /* 8649acbbeafSnn * Perform any brand specific exit processing, then release any 8659acbbeafSnn * brand data associated with the lwp 8669acbbeafSnn */ 8679acbbeafSnn if (PROC_IS_BRANDED(p)) 8689acbbeafSnn BROP(p)->b_lwpexit(lwp); 8699acbbeafSnn 8707c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 8717c478bd9Sstevel@tonic-gate lwp_cleanup(); 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate /* 8747c478bd9Sstevel@tonic-gate * When this process is dumping core, its lwps are held here 8757c478bd9Sstevel@tonic-gate * until the core dump is finished. Then exitlwps() is called 8767c478bd9Sstevel@tonic-gate * again to release these lwps so that they can finish exiting. 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate if (p->p_flag & SCOREDUMP) 8797c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 8807c478bd9Sstevel@tonic-gate 8817c478bd9Sstevel@tonic-gate /* 8827c478bd9Sstevel@tonic-gate * Block the process against /proc now that we have really acquired 8837c478bd9Sstevel@tonic-gate * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 8847c478bd9Sstevel@tonic-gate */ 8857c478bd9Sstevel@tonic-gate prbarrier(p); 8867c478bd9Sstevel@tonic-gate 8877c478bd9Sstevel@tonic-gate /* 8887c478bd9Sstevel@tonic-gate * Call proc_exit() if this is the last non-daemon lwp in the process. 8897c478bd9Sstevel@tonic-gate */ 8907c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_DAEMON) && 8917c478bd9Sstevel@tonic-gate p->p_lwpcnt == p->p_lwpdaemon + 1) { 8927c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 89397eda132Sraf if (proc_exit(CLD_EXITED, 0) == 0) { 8947c478bd9Sstevel@tonic-gate /* Restarting init. */ 8957c478bd9Sstevel@tonic-gate return; 8967c478bd9Sstevel@tonic-gate } 8977c478bd9Sstevel@tonic-gate 8987c478bd9Sstevel@tonic-gate /* 8997c478bd9Sstevel@tonic-gate * proc_exit() returns a non-zero value when some other 9007c478bd9Sstevel@tonic-gate * lwp got there first. We just have to continue in 9017c478bd9Sstevel@tonic-gate * lwp_exit(). 9027c478bd9Sstevel@tonic-gate */ 9037c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 9047c478bd9Sstevel@tonic-gate ASSERT(curproc->p_flag & SEXITLWPS); 9057c478bd9Sstevel@tonic-gate prbarrier(p); 9067c478bd9Sstevel@tonic-gate } 9077c478bd9Sstevel@tonic-gate 9087c478bd9Sstevel@tonic-gate DTRACE_PROC(lwp__exit); 9097c478bd9Sstevel@tonic-gate 9107c478bd9Sstevel@tonic-gate /* 9117c478bd9Sstevel@tonic-gate * If the lwp is a detached lwp or if the process is exiting, 9127c478bd9Sstevel@tonic-gate * remove (lwp_hash_out()) the lwp from the lwp directory. 9137c478bd9Sstevel@tonic-gate * Otherwise null out the lwp's le_thread pointer in the lwp 9147c478bd9Sstevel@tonic-gate * directory so that other threads will see it as a zombie lwp. 9157c478bd9Sstevel@tonic-gate */ 9167c478bd9Sstevel@tonic-gate prlwpexit(t); /* notify /proc */ 9177c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 9187c478bd9Sstevel@tonic-gate lwp_hash_out(p, t->t_tid); 9197c478bd9Sstevel@tonic-gate else { 9207c478bd9Sstevel@tonic-gate ASSERT(!(t->t_proc_flag & TP_DAEMON)); 9217c478bd9Sstevel@tonic-gate p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 9227c478bd9Sstevel@tonic-gate p->p_zombcnt++; 9237c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 9247c478bd9Sstevel@tonic-gate } 9257c478bd9Sstevel@tonic-gate if (t->t_proc_flag & TP_DAEMON) { 9267c478bd9Sstevel@tonic-gate p->p_lwpdaemon--; 9277c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_DAEMON; 9287c478bd9Sstevel@tonic-gate } 9297c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate /* 9327c478bd9Sstevel@tonic-gate * Maintain accurate lwp count for task.max-lwps resource control. 9337c478bd9Sstevel@tonic-gate */ 9347c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 9357c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 9367c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 9377c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 9387c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate CL_EXIT(t); /* tell the scheduler that t is exiting */ 9417c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt != 0); 9427c478bd9Sstevel@tonic-gate p->p_lwpcnt--; 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * If all remaining non-daemon lwps are waiting in lwp_wait(), 9467c478bd9Sstevel@tonic-gate * wake them up so someone can return EDEADLK. 9477c478bd9Sstevel@tonic-gate * (See the block comment preceeding lwp_wait().) 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 9507c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_LWPEXIT; 9537c478bd9Sstevel@tonic-gate term_mstate(t); 954c97ad5cdSakolb 9557c478bd9Sstevel@tonic-gate #ifndef NPROBE 9567c478bd9Sstevel@tonic-gate /* Kernel probe */ 9577c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 9587c478bd9Sstevel@tonic-gate tnf_thread_exit(); 9597c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate t->t_forw->t_back = t->t_back; 9627c478bd9Sstevel@tonic-gate t->t_back->t_forw = t->t_forw; 9637c478bd9Sstevel@tonic-gate if (t == p->p_tlist) 9647c478bd9Sstevel@tonic-gate p->p_tlist = t->t_forw; 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate /* 9677c478bd9Sstevel@tonic-gate * Clean up the signal state. 9687c478bd9Sstevel@tonic-gate */ 9697c478bd9Sstevel@tonic-gate if (t->t_sigqueue != NULL) 9707c478bd9Sstevel@tonic-gate sigdelq(p, t, 0); 9717c478bd9Sstevel@tonic-gate if (lwp->lwp_curinfo != NULL) { 9727c478bd9Sstevel@tonic-gate siginfofree(lwp->lwp_curinfo); 9737c478bd9Sstevel@tonic-gate lwp->lwp_curinfo = NULL; 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate thread_rele(t); 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * Terminated lwps are associated with process zero and are put onto 9807c478bd9Sstevel@tonic-gate * death-row by resume(). Avoid preemption after resetting t->t_procp. 9817c478bd9Sstevel@tonic-gate */ 9827c478bd9Sstevel@tonic-gate t->t_preempt++; 9830baeff3dSrab 9840baeff3dSrab if (t->t_ctx != NULL) 9850baeff3dSrab exitctx(t); 9860baeff3dSrab if (p->p_pctx != NULL) 9870baeff3dSrab exitpctx(p); 9880baeff3dSrab 9897c478bd9Sstevel@tonic-gate t->t_procp = &p0; 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate /* 9927c478bd9Sstevel@tonic-gate * Notify the HAT about the change of address space 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate hat_thread_exit(t); 9957c478bd9Sstevel@tonic-gate /* 9967c478bd9Sstevel@tonic-gate * When this is the last running lwp in this process and some lwp is 9977c478bd9Sstevel@tonic-gate * waiting for this condition to become true, or this thread was being 9987c478bd9Sstevel@tonic-gate * suspended, then the waiting lwp is awakened. 9997c478bd9Sstevel@tonic-gate * 10007c478bd9Sstevel@tonic-gate * Also, if the process is exiting, we may have a thread waiting in 10017c478bd9Sstevel@tonic-gate * exitlwps() that needs to be notified. 10027c478bd9Sstevel@tonic-gate */ 10037c478bd9Sstevel@tonic-gate if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 10047c478bd9Sstevel@tonic-gate (p->p_flag & SEXITLWPS)) 10057c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate /* 10087c478bd9Sstevel@tonic-gate * Need to drop p_lock so we can reacquire pidlock. 10097c478bd9Sstevel@tonic-gate */ 10107c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 10117c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 10147c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 10157c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 10167c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 10177c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 10187c478bd9Sstevel@tonic-gate 10197c478bd9Sstevel@tonic-gate lwp_pcb_exit(); 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate t->t_state = TS_ZOMB; 10227c478bd9Sstevel@tonic-gate swtch_from_zombie(); 10237c478bd9Sstevel@tonic-gate /* never returns */ 10247c478bd9Sstevel@tonic-gate } 10257c478bd9Sstevel@tonic-gate 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate /* 10287c478bd9Sstevel@tonic-gate * Cleanup function for an exiting lwp. 10297c478bd9Sstevel@tonic-gate * Called both from lwp_exit() and from proc_exit(). 10307c478bd9Sstevel@tonic-gate * p->p_lock is repeatedly released and grabbed in this function. 10317c478bd9Sstevel@tonic-gate */ 10327c478bd9Sstevel@tonic-gate void 10337c478bd9Sstevel@tonic-gate lwp_cleanup(void) 10347c478bd9Sstevel@tonic-gate { 10357c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 10367c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 10397c478bd9Sstevel@tonic-gate 10407c478bd9Sstevel@tonic-gate /* untimeout any lwp-bound realtime timers */ 10417c478bd9Sstevel@tonic-gate if (p->p_itimer != NULL) 10427c478bd9Sstevel@tonic-gate timer_lwpexit(); 10437c478bd9Sstevel@tonic-gate 10447c478bd9Sstevel@tonic-gate /* 10457c478bd9Sstevel@tonic-gate * If this is the /proc agent lwp that is exiting, readjust p_lwpid 10467c478bd9Sstevel@tonic-gate * so it appears that the agent never existed, and clear p_agenttp. 10477c478bd9Sstevel@tonic-gate */ 10487c478bd9Sstevel@tonic-gate if (t == p->p_agenttp) { 10497c478bd9Sstevel@tonic-gate ASSERT(t->t_tid == p->p_lwpid); 10507c478bd9Sstevel@tonic-gate p->p_lwpid--; 10517c478bd9Sstevel@tonic-gate p->p_agenttp = NULL; 10527c478bd9Sstevel@tonic-gate } 10537c478bd9Sstevel@tonic-gate 10547c478bd9Sstevel@tonic-gate /* 10557c478bd9Sstevel@tonic-gate * Do lgroup bookkeeping to account for thread exiting. 10567c478bd9Sstevel@tonic-gate */ 10577c478bd9Sstevel@tonic-gate kpreempt_disable(); 10587c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 10592cb27123Saguzovsk if (t->t_tid == 1) { 10602cb27123Saguzovsk p->p_t1_lgrpid = LGRP_NONE; 10612cb27123Saguzovsk } 10627c478bd9Sstevel@tonic-gate kpreempt_enable(); 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(ttolwp(t)); 10657c478bd9Sstevel@tonic-gate } 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate int 10687c478bd9Sstevel@tonic-gate lwp_suspend(kthread_t *t) 10697c478bd9Sstevel@tonic-gate { 10707c478bd9Sstevel@tonic-gate int tid; 10717c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 10747c478bd9Sstevel@tonic-gate 10757c478bd9Sstevel@tonic-gate /* 10767c478bd9Sstevel@tonic-gate * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 10777c478bd9Sstevel@tonic-gate * If an lwp is stopping itself, there is no need to wait. 10787c478bd9Sstevel@tonic-gate */ 10798132eb48Sraf top: 10807c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 10817c478bd9Sstevel@tonic-gate if (t == curthread) { 10827c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 10837c478bd9Sstevel@tonic-gate } else { 10847c478bd9Sstevel@tonic-gate /* 10857c478bd9Sstevel@tonic-gate * Make sure the lwp stops promptly. 10867c478bd9Sstevel@tonic-gate */ 10877c478bd9Sstevel@tonic-gate thread_lock(t); 10887c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 10897c478bd9Sstevel@tonic-gate /* 10907c478bd9Sstevel@tonic-gate * XXX Should use virtual stop like /proc does instead of 10917c478bd9Sstevel@tonic-gate * XXX waking the thread to get it to stop. 10927c478bd9Sstevel@tonic-gate */ 1093c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 10947c478bd9Sstevel@tonic-gate setrun_locked(t); 1095c97ad5cdSakolb } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 10967c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 1097c97ad5cdSakolb } 1098c97ad5cdSakolb 10997c478bd9Sstevel@tonic-gate tid = t->t_tid; /* remember thread ID */ 11007c478bd9Sstevel@tonic-gate /* 11017c478bd9Sstevel@tonic-gate * Wait for lwp to stop 11027c478bd9Sstevel@tonic-gate */ 11037c478bd9Sstevel@tonic-gate while (!SUSPENDED(t)) { 11047c478bd9Sstevel@tonic-gate /* 11057c478bd9Sstevel@tonic-gate * Drop the thread lock before waiting and reacquire it 11067c478bd9Sstevel@tonic-gate * afterwards, so the thread can change its t_state 11077c478bd9Sstevel@tonic-gate * field. 11087c478bd9Sstevel@tonic-gate */ 11097c478bd9Sstevel@tonic-gate thread_unlock(t); 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate /* 11127c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 11137c478bd9Sstevel@tonic-gate */ 11147c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 11157c478bd9Sstevel@tonic-gate lwp_exit(); 11167c478bd9Sstevel@tonic-gate 11177c478bd9Sstevel@tonic-gate /* 11187c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping 11197c478bd9Sstevel@tonic-gate * by calling cv_wait_sig() to wait for the target 11207c478bd9Sstevel@tonic-gate * lwp to stop. Just using cv_wait() can lead to 11217c478bd9Sstevel@tonic-gate * deadlock because, if some other lwp has stopped 11227c478bd9Sstevel@tonic-gate * by either of these mechanisms, then p_lwprcnt will 11237c478bd9Sstevel@tonic-gate * never become zero if we do a cv_wait(). 11247c478bd9Sstevel@tonic-gate */ 11257c478bd9Sstevel@tonic-gate if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 11267c478bd9Sstevel@tonic-gate return (EINTR); 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate /* 11297c478bd9Sstevel@tonic-gate * Check to see if thread died while we were 11307c478bd9Sstevel@tonic-gate * waiting for it to suspend. 11317c478bd9Sstevel@tonic-gate */ 11327c478bd9Sstevel@tonic-gate if (idtot(p, tid) == NULL) 11337c478bd9Sstevel@tonic-gate return (ESRCH); 11347c478bd9Sstevel@tonic-gate 11357c478bd9Sstevel@tonic-gate thread_lock(t); 11367c478bd9Sstevel@tonic-gate /* 11378132eb48Sraf * If the TP_HOLDLWP flag went away, lwp_continue() 11388132eb48Sraf * or vfork() must have been called while we were 11398132eb48Sraf * waiting, so start over again. 11407c478bd9Sstevel@tonic-gate */ 11417c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 11427c478bd9Sstevel@tonic-gate thread_unlock(t); 11438132eb48Sraf goto top; 11447c478bd9Sstevel@tonic-gate } 11457c478bd9Sstevel@tonic-gate } 11467c478bd9Sstevel@tonic-gate thread_unlock(t); 11477c478bd9Sstevel@tonic-gate } 11487c478bd9Sstevel@tonic-gate return (0); 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate /* 11527c478bd9Sstevel@tonic-gate * continue a lwp that's been stopped by lwp_suspend(). 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate void 11557c478bd9Sstevel@tonic-gate lwp_continue(kthread_t *t) 11567c478bd9Sstevel@tonic-gate { 11577c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 11587c478bd9Sstevel@tonic-gate int was_suspended = t->t_proc_flag & TP_HOLDLWP; 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 11637c478bd9Sstevel@tonic-gate thread_lock(t); 11647c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && 11657c478bd9Sstevel@tonic-gate !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 11667c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 11677c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 11687c478bd9Sstevel@tonic-gate setrun_locked(t); 11697c478bd9Sstevel@tonic-gate } 11707c478bd9Sstevel@tonic-gate thread_unlock(t); 11717c478bd9Sstevel@tonic-gate /* 11727c478bd9Sstevel@tonic-gate * Wakeup anyone waiting for this thread to be suspended 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate if (was_suspended) 11757c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 11767c478bd9Sstevel@tonic-gate } 11777c478bd9Sstevel@tonic-gate 11787c478bd9Sstevel@tonic-gate /* 11797c478bd9Sstevel@tonic-gate * ******************************** 11807c478bd9Sstevel@tonic-gate * Miscellaneous lwp routines * 11817c478bd9Sstevel@tonic-gate * ******************************** 11827c478bd9Sstevel@tonic-gate */ 11837c478bd9Sstevel@tonic-gate /* 11847c478bd9Sstevel@tonic-gate * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 11857c478bd9Sstevel@tonic-gate * This will cause the process's lwps to stop at a hold point. A hold 11867c478bd9Sstevel@tonic-gate * point is where a kernel thread has a flat stack. This is at the 11877c478bd9Sstevel@tonic-gate * return from a system call and at the return from a user level trap. 11887c478bd9Sstevel@tonic-gate * 11897c478bd9Sstevel@tonic-gate * When a process is undergoing a fork1() or vfork(), its p_flag is set to 11907c478bd9Sstevel@tonic-gate * SHOLDFORK1. This will cause the process's lwps to stop at a modified 11917c478bd9Sstevel@tonic-gate * hold point. The lwps in the process are not being cloned, so they 11927c478bd9Sstevel@tonic-gate * are held at the usual hold points and also within issig_forreal(). 11937c478bd9Sstevel@tonic-gate * This has the side-effect that their system calls do not return 11947c478bd9Sstevel@tonic-gate * showing EINTR. 11957c478bd9Sstevel@tonic-gate * 11967c478bd9Sstevel@tonic-gate * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 11977c478bd9Sstevel@tonic-gate * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 11987c478bd9Sstevel@tonic-gate * lwp is waiting for the target lwp to be stopped. 11997c478bd9Sstevel@tonic-gate */ 12007c478bd9Sstevel@tonic-gate void 12017c478bd9Sstevel@tonic-gate holdlwp(void) 12027c478bd9Sstevel@tonic-gate { 12037c478bd9Sstevel@tonic-gate proc_t *p = curproc; 12047c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 12057c478bd9Sstevel@tonic-gate 12067c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 12077c478bd9Sstevel@tonic-gate /* 12087c478bd9Sstevel@tonic-gate * Don't terminate immediately if the process is dumping core. 12097c478bd9Sstevel@tonic-gate * Once the process has dumped core, all lwps are terminated. 12107c478bd9Sstevel@tonic-gate */ 12117c478bd9Sstevel@tonic-gate if (!(p->p_flag & SCOREDUMP)) { 12127c478bd9Sstevel@tonic-gate if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 12137c478bd9Sstevel@tonic-gate lwp_exit(); 12147c478bd9Sstevel@tonic-gate } 12157c478bd9Sstevel@tonic-gate if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 12167c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12177c478bd9Sstevel@tonic-gate return; 12187c478bd9Sstevel@tonic-gate } 12197c478bd9Sstevel@tonic-gate /* 12207c478bd9Sstevel@tonic-gate * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 12217c478bd9Sstevel@tonic-gate * when p->p_lwprcnt becomes zero. 12227c478bd9Sstevel@tonic-gate */ 12237c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12247c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 12257c478bd9Sstevel@tonic-gate lwp_exit(); 12267c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12277c478bd9Sstevel@tonic-gate } 12287c478bd9Sstevel@tonic-gate 12297c478bd9Sstevel@tonic-gate /* 12307c478bd9Sstevel@tonic-gate * Have all lwps within the process hold at a point where they are 12317c478bd9Sstevel@tonic-gate * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 12327c478bd9Sstevel@tonic-gate */ 12337c478bd9Sstevel@tonic-gate int 12347c478bd9Sstevel@tonic-gate holdlwps(int holdflag) 12357c478bd9Sstevel@tonic-gate { 12367c478bd9Sstevel@tonic-gate proc_t *p = curproc; 12377c478bd9Sstevel@tonic-gate 12387c478bd9Sstevel@tonic-gate ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 12397c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 12407c478bd9Sstevel@tonic-gate schedctl_finish_sigblock(curthread); 12417c478bd9Sstevel@tonic-gate again: 12427c478bd9Sstevel@tonic-gate while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 12437c478bd9Sstevel@tonic-gate /* 12447c478bd9Sstevel@tonic-gate * If another lwp is doing a forkall() or proc_exit(), bail out. 12457c478bd9Sstevel@tonic-gate */ 12467c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 12477c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12487c478bd9Sstevel@tonic-gate return (0); 12497c478bd9Sstevel@tonic-gate } 12507c478bd9Sstevel@tonic-gate /* 12517c478bd9Sstevel@tonic-gate * Another lwp is doing a fork1() or is undergoing 12527c478bd9Sstevel@tonic-gate * watchpoint activity. We hold here for it to complete. 12537c478bd9Sstevel@tonic-gate */ 12547c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12557c478bd9Sstevel@tonic-gate } 12567c478bd9Sstevel@tonic-gate p->p_flag |= holdflag; 12577c478bd9Sstevel@tonic-gate pokelwps(p); 12587c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * Wait for the process to become quiescent (p->p_lwprcnt == 0). 12617c478bd9Sstevel@tonic-gate */ 12627c478bd9Sstevel@tonic-gate while (p->p_lwprcnt > 0) { 12637c478bd9Sstevel@tonic-gate /* 12647c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 12657c478bd9Sstevel@tonic-gate * Also check if SHOLDWATCH is set; it takes precedence. 12667c478bd9Sstevel@tonic-gate */ 12677c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 12687c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12697c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12707c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 12717c478bd9Sstevel@tonic-gate goto again; 12727c478bd9Sstevel@tonic-gate } 12737c478bd9Sstevel@tonic-gate /* 12747c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping. 12757c478bd9Sstevel@tonic-gate * If some other lwp has stopped by either of these 12767c478bd9Sstevel@tonic-gate * mechanisms, then p_lwprcnt will never become zero 12777c478bd9Sstevel@tonic-gate * and the process will appear deadlocked unless we 12787c478bd9Sstevel@tonic-gate * stop here in sympathy with the other lwp before 12797c478bd9Sstevel@tonic-gate * doing the cv_wait() below. 12807c478bd9Sstevel@tonic-gate * 12817c478bd9Sstevel@tonic-gate * If the other lwp stops after we do the cv_wait(), it 12827c478bd9Sstevel@tonic-gate * will wake us up to loop around and do the sympathy stop. 12837c478bd9Sstevel@tonic-gate * 12847c478bd9Sstevel@tonic-gate * Since stop() drops p->p_lock, we must start from 12857c478bd9Sstevel@tonic-gate * the top again on returning from stop(). 12867c478bd9Sstevel@tonic-gate */ 12877c478bd9Sstevel@tonic-gate if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 12887c478bd9Sstevel@tonic-gate int whystop = p->p_stopsig? PR_JOBCONTROL : 12897c478bd9Sstevel@tonic-gate PR_REQUESTED; 12907c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12917c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12927c478bd9Sstevel@tonic-gate stop(whystop, p->p_stopsig); 12937c478bd9Sstevel@tonic-gate goto again; 12947c478bd9Sstevel@tonic-gate } 12957c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 12967c478bd9Sstevel@tonic-gate } 12977c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12987c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12997c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 13007c478bd9Sstevel@tonic-gate return (1); 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate /* 13047c478bd9Sstevel@tonic-gate * See comments for holdwatch(), below. 13057c478bd9Sstevel@tonic-gate */ 13067c478bd9Sstevel@tonic-gate static int 13077c478bd9Sstevel@tonic-gate holdcheck(int clearflags) 13087c478bd9Sstevel@tonic-gate { 13097c478bd9Sstevel@tonic-gate proc_t *p = curproc; 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate /* 13127c478bd9Sstevel@tonic-gate * If we are trying to exit, that takes precedence over anything else. 13137c478bd9Sstevel@tonic-gate */ 13147c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 13157c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13167c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 13177c478bd9Sstevel@tonic-gate lwp_exit(); 13187c478bd9Sstevel@tonic-gate } 13197c478bd9Sstevel@tonic-gate 13207c478bd9Sstevel@tonic-gate /* 13217c478bd9Sstevel@tonic-gate * If another thread is calling fork1(), stop the current thread so the 13227c478bd9Sstevel@tonic-gate * other can complete. 13237c478bd9Sstevel@tonic-gate */ 13247c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK1) { 13257c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13267c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 13277c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 13287c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 13297c478bd9Sstevel@tonic-gate lwp_exit(); 13307c478bd9Sstevel@tonic-gate } 13317c478bd9Sstevel@tonic-gate return (-1); 13327c478bd9Sstevel@tonic-gate } 13337c478bd9Sstevel@tonic-gate 13347c478bd9Sstevel@tonic-gate /* 13357c478bd9Sstevel@tonic-gate * If another thread is calling fork(), then indicate we are doing 13367c478bd9Sstevel@tonic-gate * watchpoint activity. This will cause holdlwps() above to stop the 13377c478bd9Sstevel@tonic-gate * forking thread, at which point we can continue with watchpoint 13387c478bd9Sstevel@tonic-gate * activity. 13397c478bd9Sstevel@tonic-gate */ 13407c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK) { 13417c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13427c478bd9Sstevel@tonic-gate while (p->p_flag & SHOLDFORK) { 13437c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 13447c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 13457c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 13467c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 13477c478bd9Sstevel@tonic-gate } 13487c478bd9Sstevel@tonic-gate return (-1); 13497c478bd9Sstevel@tonic-gate } 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate return (0); 13527c478bd9Sstevel@tonic-gate } 13537c478bd9Sstevel@tonic-gate 13547c478bd9Sstevel@tonic-gate /* 13557c478bd9Sstevel@tonic-gate * Stop all lwps within the process, holding themselves in the kernel while the 13567c478bd9Sstevel@tonic-gate * active lwp undergoes watchpoint activity. This is more complicated than 13577c478bd9Sstevel@tonic-gate * expected because stop() relies on calling holdwatch() in order to copyin data 13587c478bd9Sstevel@tonic-gate * from the user's address space. A double barrier is used to prevent an 13597c478bd9Sstevel@tonic-gate * infinite loop. 13607c478bd9Sstevel@tonic-gate * 13617c478bd9Sstevel@tonic-gate * o The first thread into holdwatch() is the 'master' thread and does 13627c478bd9Sstevel@tonic-gate * the following: 13637c478bd9Sstevel@tonic-gate * 13647c478bd9Sstevel@tonic-gate * - Sets SHOLDWATCH on the current process 13657c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 13667c478bd9Sstevel@tonic-gate * - Waits for all threads to be either stopped or have 13677c478bd9Sstevel@tonic-gate * TP_WATCHSTOP set. 13687c478bd9Sstevel@tonic-gate * - Sets the SWATCHOK flag on the process 13697c478bd9Sstevel@tonic-gate * - Unsets TP_WATCHSTOP 13707c478bd9Sstevel@tonic-gate * - Waits for the other threads to completely stop 13717c478bd9Sstevel@tonic-gate * - Unsets SWATCHOK 13727c478bd9Sstevel@tonic-gate * 13737c478bd9Sstevel@tonic-gate * o If SHOLDWATCH is already set when we enter this function, then another 13747c478bd9Sstevel@tonic-gate * thread is already trying to stop this thread. This 'slave' thread 13757c478bd9Sstevel@tonic-gate * does the following: 13767c478bd9Sstevel@tonic-gate * 13777c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 13787c478bd9Sstevel@tonic-gate * - Waits for SWATCHOK flag to be set 13797c478bd9Sstevel@tonic-gate * - Calls stop() 13807c478bd9Sstevel@tonic-gate * 13817c478bd9Sstevel@tonic-gate * o If SWATCHOK is set on the process, then this function immediately 13827c478bd9Sstevel@tonic-gate * returns, as we must have been called via stop(). 13837c478bd9Sstevel@tonic-gate * 13847c478bd9Sstevel@tonic-gate * In addition, there are other flags that take precedence over SHOLDWATCH: 13857c478bd9Sstevel@tonic-gate * 13867c478bd9Sstevel@tonic-gate * o If SEXITLWPS is set, exit immediately. 13877c478bd9Sstevel@tonic-gate * 13887c478bd9Sstevel@tonic-gate * o If SHOLDFORK1 is set, wait for fork1() to complete. 13897c478bd9Sstevel@tonic-gate * 13907c478bd9Sstevel@tonic-gate * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 13917c478bd9Sstevel@tonic-gate * case, set SHOLDWATCH, signalling the forking thread to stop first. 13927c478bd9Sstevel@tonic-gate * 13937c478bd9Sstevel@tonic-gate * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 13947c478bd9Sstevel@tonic-gate * stop the current thread. 13957c478bd9Sstevel@tonic-gate * 13967c478bd9Sstevel@tonic-gate * Returns 0 if all threads have been quiesced. Returns non-zero if not all 13977c478bd9Sstevel@tonic-gate * threads were stopped, or the list of watched pages has changed. 13987c478bd9Sstevel@tonic-gate */ 13997c478bd9Sstevel@tonic-gate int 14007c478bd9Sstevel@tonic-gate holdwatch(void) 14017c478bd9Sstevel@tonic-gate { 14027c478bd9Sstevel@tonic-gate proc_t *p = curproc; 14037c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 14047c478bd9Sstevel@tonic-gate int ret = 0; 14057c478bd9Sstevel@tonic-gate 14067c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate p->p_lwprcnt--; 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate /* 14117c478bd9Sstevel@tonic-gate * Check for bail-out conditions as outlined above. 14127c478bd9Sstevel@tonic-gate */ 14137c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 14147c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14157c478bd9Sstevel@tonic-gate return (-1); 14167c478bd9Sstevel@tonic-gate } 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate if (!(p->p_flag & SHOLDWATCH)) { 14197c478bd9Sstevel@tonic-gate /* 14207c478bd9Sstevel@tonic-gate * We are the master watchpoint thread. Set SHOLDWATCH and poke 14217c478bd9Sstevel@tonic-gate * the other threads. 14227c478bd9Sstevel@tonic-gate */ 14237c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 14247c478bd9Sstevel@tonic-gate pokelwps(p); 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate /* 14277c478bd9Sstevel@tonic-gate * Wait for all threads to be stopped or have TP_WATCHSTOP set. 14287c478bd9Sstevel@tonic-gate */ 14297c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 1) > 0) { 14307c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH) != 0) { 14317c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 14327c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14337c478bd9Sstevel@tonic-gate return (-1); 14347c478bd9Sstevel@tonic-gate } 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14377c478bd9Sstevel@tonic-gate } 14387c478bd9Sstevel@tonic-gate 14397c478bd9Sstevel@tonic-gate /* 14407c478bd9Sstevel@tonic-gate * All threads are now stopped or in the process of stopping. 14417c478bd9Sstevel@tonic-gate * Set SWATCHOK and let them stop completely. 14427c478bd9Sstevel@tonic-gate */ 14437c478bd9Sstevel@tonic-gate p->p_flag |= SWATCHOK; 14447c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14457c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 14467c478bd9Sstevel@tonic-gate 14477c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 0) > 0) { 14487c478bd9Sstevel@tonic-gate /* 14497c478bd9Sstevel@tonic-gate * At first glance, it may appear that we don't need a 14507c478bd9Sstevel@tonic-gate * call to holdcheck() here. But if the process gets a 14517c478bd9Sstevel@tonic-gate * SIGKILL signal, one of our stopped threads may have 14527c478bd9Sstevel@tonic-gate * been awakened and is waiting in exitlwps(), which 14537c478bd9Sstevel@tonic-gate * takes precedence over watchpoints. 14547c478bd9Sstevel@tonic-gate */ 14557c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 14567c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 14577c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14587c478bd9Sstevel@tonic-gate return (-1); 14597c478bd9Sstevel@tonic-gate } 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14627c478bd9Sstevel@tonic-gate } 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate /* 14657c478bd9Sstevel@tonic-gate * All threads are now completely stopped. 14667c478bd9Sstevel@tonic-gate */ 14677c478bd9Sstevel@tonic-gate p->p_flag &= ~SWATCHOK; 14687c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 14697c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 14707c478bd9Sstevel@tonic-gate 14717c478bd9Sstevel@tonic-gate } else if (!(p->p_flag & SWATCHOK)) { 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate /* 14747c478bd9Sstevel@tonic-gate * SHOLDWATCH is set, so another thread is trying to do 14757c478bd9Sstevel@tonic-gate * watchpoint activity. Indicate this thread is stopping, and 14767c478bd9Sstevel@tonic-gate * wait for the OK from the master thread. 14777c478bd9Sstevel@tonic-gate */ 14787c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_WATCHSTOP; 14797c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate while (!(p->p_flag & SWATCHOK)) { 14827c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 14837c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14847c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14857c478bd9Sstevel@tonic-gate return (-1); 14867c478bd9Sstevel@tonic-gate } 14877c478bd9Sstevel@tonic-gate 14887c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14897c478bd9Sstevel@tonic-gate } 14907c478bd9Sstevel@tonic-gate 14917c478bd9Sstevel@tonic-gate /* 14927c478bd9Sstevel@tonic-gate * Once the master thread has given the OK, this thread can 14937c478bd9Sstevel@tonic-gate * actually call stop(). 14947c478bd9Sstevel@tonic-gate */ 14957c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14967c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 14977c478bd9Sstevel@tonic-gate 14987c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate /* 15017c478bd9Sstevel@tonic-gate * It's not OK to do watchpoint activity, notify caller to 15027c478bd9Sstevel@tonic-gate * retry. 15037c478bd9Sstevel@tonic-gate */ 15047c478bd9Sstevel@tonic-gate ret = -1; 15057c478bd9Sstevel@tonic-gate 15067c478bd9Sstevel@tonic-gate } else { 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate /* 15097c478bd9Sstevel@tonic-gate * The only way we can hit the case where SHOLDWATCH is set and 15107c478bd9Sstevel@tonic-gate * SWATCHOK is set is if we are triggering this from within a 15117c478bd9Sstevel@tonic-gate * stop() call. Assert that this is the case. 15127c478bd9Sstevel@tonic-gate */ 15137c478bd9Sstevel@tonic-gate 15147c478bd9Sstevel@tonic-gate ASSERT(t->t_proc_flag & TP_STOPPING); 15157c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15167c478bd9Sstevel@tonic-gate } 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 15197c478bd9Sstevel@tonic-gate 15207c478bd9Sstevel@tonic-gate return (ret); 15217c478bd9Sstevel@tonic-gate } 15227c478bd9Sstevel@tonic-gate 15237c478bd9Sstevel@tonic-gate /* 15247c478bd9Sstevel@tonic-gate * force all interruptible lwps to trap into the kernel. 15257c478bd9Sstevel@tonic-gate */ 15267c478bd9Sstevel@tonic-gate void 15277c478bd9Sstevel@tonic-gate pokelwps(proc_t *p) 15287c478bd9Sstevel@tonic-gate { 15297c478bd9Sstevel@tonic-gate kthread_t *t; 15307c478bd9Sstevel@tonic-gate 15317c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate t = p->p_tlist; 15347c478bd9Sstevel@tonic-gate do { 15357c478bd9Sstevel@tonic-gate if (t == curthread) 15367c478bd9Sstevel@tonic-gate continue; 15377c478bd9Sstevel@tonic-gate thread_lock(t); 15387c478bd9Sstevel@tonic-gate aston(t); /* make thread trap or do post_syscall */ 1539c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 1540c97ad5cdSakolb setrun_locked(t); 15417c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_STOPPED) { 15427c478bd9Sstevel@tonic-gate /* 15437c478bd9Sstevel@tonic-gate * Ensure that proc_exit() is not blocked by lwps 15447c478bd9Sstevel@tonic-gate * that were stopped via jobcontrol or /proc. 15457c478bd9Sstevel@tonic-gate */ 15467c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 15477c478bd9Sstevel@tonic-gate p->p_stopsig = 0; 15487c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_XSTART | TS_PSTART); 15497c478bd9Sstevel@tonic-gate setrun_locked(t); 15507c478bd9Sstevel@tonic-gate } 15517c478bd9Sstevel@tonic-gate /* 15527c478bd9Sstevel@tonic-gate * If we are holding lwps for a forkall(), 15537c478bd9Sstevel@tonic-gate * force lwps that have been suspended via 15547c478bd9Sstevel@tonic-gate * lwp_suspend() and are suspended inside 15557c478bd9Sstevel@tonic-gate * of a system call to proceed to their 15567c478bd9Sstevel@tonic-gate * holdlwp() points where they are clonable. 15577c478bd9Sstevel@tonic-gate */ 15587c478bd9Sstevel@tonic-gate if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 15597c478bd9Sstevel@tonic-gate if ((t->t_schedflag & TS_CSTART) == 0) { 15607c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15617c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 15627c478bd9Sstevel@tonic-gate setrun_locked(t); 15637c478bd9Sstevel@tonic-gate } 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_ONPROC) { 15667c478bd9Sstevel@tonic-gate if (t->t_cpu != CPU) 15677c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate thread_unlock(t); 15707c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 15717c478bd9Sstevel@tonic-gate } 15727c478bd9Sstevel@tonic-gate 15737c478bd9Sstevel@tonic-gate /* 15747c478bd9Sstevel@tonic-gate * undo the effects of holdlwps() or holdwatch(). 15757c478bd9Sstevel@tonic-gate */ 15767c478bd9Sstevel@tonic-gate void 15777c478bd9Sstevel@tonic-gate continuelwps(proc_t *p) 15787c478bd9Sstevel@tonic-gate { 15797c478bd9Sstevel@tonic-gate kthread_t *t; 15807c478bd9Sstevel@tonic-gate 15817c478bd9Sstevel@tonic-gate /* 15827c478bd9Sstevel@tonic-gate * If this flag is set, then the original holdwatch() didn't actually 15837c478bd9Sstevel@tonic-gate * stop the process. See comments for holdwatch(). 15847c478bd9Sstevel@tonic-gate */ 15857c478bd9Sstevel@tonic-gate if (p->p_flag & SWATCHOK) { 15867c478bd9Sstevel@tonic-gate ASSERT(curthread->t_proc_flag & TP_STOPPING); 15877c478bd9Sstevel@tonic-gate return; 15887c478bd9Sstevel@tonic-gate } 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 15917c478bd9Sstevel@tonic-gate ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate t = p->p_tlist; 15947c478bd9Sstevel@tonic-gate do { 15957c478bd9Sstevel@tonic-gate thread_lock(t); /* SUSPENDED looks at t_schedflag */ 15967c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 15977c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15987c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 15997c478bd9Sstevel@tonic-gate setrun_locked(t); 16007c478bd9Sstevel@tonic-gate } 16017c478bd9Sstevel@tonic-gate thread_unlock(t); 16027c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 16037c478bd9Sstevel@tonic-gate } 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate /* 16067c478bd9Sstevel@tonic-gate * Force all other LWPs in the current process other than the caller to exit, 16077c478bd9Sstevel@tonic-gate * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 16087c478bd9Sstevel@tonic-gate * is typically used in these situations: 16097c478bd9Sstevel@tonic-gate * 16107c478bd9Sstevel@tonic-gate * (a) prior to an exec() system call 16117c478bd9Sstevel@tonic-gate * (b) prior to dumping a core file 16127c478bd9Sstevel@tonic-gate * (c) prior to a uadmin() shutdown 16137c478bd9Sstevel@tonic-gate * 16147c478bd9Sstevel@tonic-gate * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 16157c478bd9Sstevel@tonic-gate * Multiple threads in the process can call this function at one time by 16167c478bd9Sstevel@tonic-gate * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 16177c478bd9Sstevel@tonic-gate * to declare one particular thread the winner who gets to kill the others. 16187c478bd9Sstevel@tonic-gate * If a thread wins the exitlwps() dance, zero is returned; otherwise an 16197c478bd9Sstevel@tonic-gate * appropriate errno value is returned to caller for its system call to return. 16207c478bd9Sstevel@tonic-gate */ 16217c478bd9Sstevel@tonic-gate int 16227c478bd9Sstevel@tonic-gate exitlwps(int coredump) 16237c478bd9Sstevel@tonic-gate { 16247c478bd9Sstevel@tonic-gate proc_t *p = curproc; 16257c478bd9Sstevel@tonic-gate int heldcnt; 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate if (curthread->t_door) 16287c478bd9Sstevel@tonic-gate door_slam(); 16297c478bd9Sstevel@tonic-gate if (p->p_door_list) 16307c478bd9Sstevel@tonic-gate door_revoke_all(); 16317c478bd9Sstevel@tonic-gate if (curthread->t_schedctl != NULL) 16327c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(curthread); 16337c478bd9Sstevel@tonic-gate 16347c478bd9Sstevel@tonic-gate /* 16357c478bd9Sstevel@tonic-gate * Ensure that before starting to wait for other lwps to exit, 16367c478bd9Sstevel@tonic-gate * cleanup all upimutexes held by curthread. Otherwise, some other 16377c478bd9Sstevel@tonic-gate * lwp could be waiting (uninterruptibly) for a upimutex held by 16387c478bd9Sstevel@tonic-gate * curthread, and the call to pokelwps() below would deadlock. 16397c478bd9Sstevel@tonic-gate * Even if a blocked upimutex_lock is made interruptible, 16407c478bd9Sstevel@tonic-gate * curthread's upimutexes need to be unlocked: do it here. 16417c478bd9Sstevel@tonic-gate */ 16427c478bd9Sstevel@tonic-gate if (curthread->t_upimutex != NULL) 16437c478bd9Sstevel@tonic-gate upimutex_cleanup(); 16447c478bd9Sstevel@tonic-gate 16457c478bd9Sstevel@tonic-gate /* 16467c478bd9Sstevel@tonic-gate * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 16477c478bd9Sstevel@tonic-gate * We must also block any further /proc access from this point forward. 16487c478bd9Sstevel@tonic-gate */ 16497c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 16507c478bd9Sstevel@tonic-gate prbarrier(p); 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 16537c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 16547c478bd9Sstevel@tonic-gate aston(curthread); /* force a trip through post_syscall */ 16557c478bd9Sstevel@tonic-gate return (set_errno(EINTR)); 16567c478bd9Sstevel@tonic-gate } 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate p->p_flag |= SEXITLWPS; 16597c478bd9Sstevel@tonic-gate if (coredump) /* tell other lwps to stop, not exit */ 16607c478bd9Sstevel@tonic-gate p->p_flag |= SCOREDUMP; 16617c478bd9Sstevel@tonic-gate 16627c478bd9Sstevel@tonic-gate /* 16637c478bd9Sstevel@tonic-gate * Give precedence to exitlwps() if a holdlwps() is 16647c478bd9Sstevel@tonic-gate * in progress. The lwp doing the holdlwps() operation 16657c478bd9Sstevel@tonic-gate * is aborted when it is awakened. 16667c478bd9Sstevel@tonic-gate */ 16677c478bd9Sstevel@tonic-gate while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 16687c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 16697c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 167097eda132Sraf prbarrier(p); 16717c478bd9Sstevel@tonic-gate } 16727c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDFORK; 16737c478bd9Sstevel@tonic-gate pokelwps(p); 16747c478bd9Sstevel@tonic-gate 16757c478bd9Sstevel@tonic-gate /* 16767c478bd9Sstevel@tonic-gate * Wait for process to become quiescent. 16777c478bd9Sstevel@tonic-gate */ 16787c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 167997eda132Sraf while (p->p_lwprcnt > 0) { 16807c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 168197eda132Sraf prbarrier(p); 168297eda132Sraf } 16837c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 16847c478bd9Sstevel@tonic-gate ASSERT(p->p_lwprcnt == 1); 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * The SCOREDUMP flag puts the process into a quiescent 16887c478bd9Sstevel@tonic-gate * state. The process's lwps remain attached to this 16897c478bd9Sstevel@tonic-gate * process until exitlwps() is called again without the 16907c478bd9Sstevel@tonic-gate * 'coredump' flag set, then the lwps are terminated 16917c478bd9Sstevel@tonic-gate * and the process can exit. 16927c478bd9Sstevel@tonic-gate */ 16937c478bd9Sstevel@tonic-gate if (coredump) { 16947c478bd9Sstevel@tonic-gate p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 16957c478bd9Sstevel@tonic-gate goto out; 16967c478bd9Sstevel@tonic-gate } 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate /* 16997c478bd9Sstevel@tonic-gate * Determine if there are any lwps left dangling in 17007c478bd9Sstevel@tonic-gate * the stopped state. This happens when exitlwps() 17017c478bd9Sstevel@tonic-gate * aborts a holdlwps() operation. 17027c478bd9Sstevel@tonic-gate */ 17037c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDFORK; 17047c478bd9Sstevel@tonic-gate if ((heldcnt = p->p_lwpcnt) > 1) { 17057c478bd9Sstevel@tonic-gate kthread_t *t; 17067c478bd9Sstevel@tonic-gate for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 17077c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 17087c478bd9Sstevel@tonic-gate lwp_continue(t); 17097c478bd9Sstevel@tonic-gate } 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate 17127c478bd9Sstevel@tonic-gate /* 17137c478bd9Sstevel@tonic-gate * Wait for all other lwps to exit. 17147c478bd9Sstevel@tonic-gate */ 17157c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 171697eda132Sraf while (p->p_lwpcnt > 1) { 17177c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 171897eda132Sraf prbarrier(p); 171997eda132Sraf } 17207c478bd9Sstevel@tonic-gate ++p->p_lwprcnt; 17217c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate p->p_flag &= ~SEXITLWPS; 17247c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_TWAIT; 17257c478bd9Sstevel@tonic-gate 17267c478bd9Sstevel@tonic-gate out: 17277c478bd9Sstevel@tonic-gate if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 17287c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 17297c478bd9Sstevel@tonic-gate lwpent_t *lep; 17307c478bd9Sstevel@tonic-gate int i; 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 17337c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 17347c478bd9Sstevel@tonic-gate if (lep != NULL && lep->le_thread != curthread) { 17357c478bd9Sstevel@tonic-gate ASSERT(lep->le_thread == NULL); 17367c478bd9Sstevel@tonic-gate p->p_zombcnt--; 17377c478bd9Sstevel@tonic-gate lwp_hash_out(p, lep->le_lwpid); 17387c478bd9Sstevel@tonic-gate } 17397c478bd9Sstevel@tonic-gate } 17407c478bd9Sstevel@tonic-gate ASSERT(p->p_zombcnt == 0); 17417c478bd9Sstevel@tonic-gate } 17427c478bd9Sstevel@tonic-gate 17437c478bd9Sstevel@tonic-gate /* 17447c478bd9Sstevel@tonic-gate * If some other LWP in the process wanted us to suspend ourself, 17457c478bd9Sstevel@tonic-gate * then we will not do it. The other LWP is now terminated and 17467c478bd9Sstevel@tonic-gate * no one will ever continue us again if we suspend ourself. 17477c478bd9Sstevel@tonic-gate */ 17487c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_HOLDLWP; 17497c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 17507c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 17517c478bd9Sstevel@tonic-gate return (0); 17527c478bd9Sstevel@tonic-gate } 17537c478bd9Sstevel@tonic-gate 17547c478bd9Sstevel@tonic-gate /* 17557c478bd9Sstevel@tonic-gate * duplicate a lwp. 17567c478bd9Sstevel@tonic-gate */ 17577c478bd9Sstevel@tonic-gate klwp_t * 17587c478bd9Sstevel@tonic-gate forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 17597c478bd9Sstevel@tonic-gate { 17607c478bd9Sstevel@tonic-gate klwp_t *clwp; 17617c478bd9Sstevel@tonic-gate void *tregs, *tfpu; 17627c478bd9Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 17637c478bd9Sstevel@tonic-gate kthread_t *ct; 17647c478bd9Sstevel@tonic-gate proc_t *p = lwptoproc(lwp); 17657c478bd9Sstevel@tonic-gate int cid; 17667c478bd9Sstevel@tonic-gate void *bufp; 17679acbbeafSnn void *brand_data; 17687c478bd9Sstevel@tonic-gate int val; 17697c478bd9Sstevel@tonic-gate 17707c478bd9Sstevel@tonic-gate ASSERT(p == curproc); 17717c478bd9Sstevel@tonic-gate ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 17727c478bd9Sstevel@tonic-gate 17737c478bd9Sstevel@tonic-gate #if defined(__sparc) 17747c478bd9Sstevel@tonic-gate if (t == curthread) 17757c478bd9Sstevel@tonic-gate (void) flush_user_windows_to_stack(NULL); 17767c478bd9Sstevel@tonic-gate #endif 17777c478bd9Sstevel@tonic-gate 17787c478bd9Sstevel@tonic-gate if (t == curthread) 17797c478bd9Sstevel@tonic-gate /* copy args out of registers first */ 17807c478bd9Sstevel@tonic-gate (void) save_syscall_args(); 17819acbbeafSnn 17827c478bd9Sstevel@tonic-gate clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 17837c478bd9Sstevel@tonic-gate NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 17847c478bd9Sstevel@tonic-gate if (clwp == NULL) 17857c478bd9Sstevel@tonic-gate return (NULL); 17867c478bd9Sstevel@tonic-gate 17877c478bd9Sstevel@tonic-gate /* 17887c478bd9Sstevel@tonic-gate * most of the parent's lwp can be copied to its duplicate, 17897c478bd9Sstevel@tonic-gate * except for the fields that are unique to each lwp, like 17907c478bd9Sstevel@tonic-gate * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 17917c478bd9Sstevel@tonic-gate */ 17927c478bd9Sstevel@tonic-gate ct = clwp->lwp_thread; 17937c478bd9Sstevel@tonic-gate tregs = clwp->lwp_regs; 17947c478bd9Sstevel@tonic-gate tfpu = clwp->lwp_fpu; 17959acbbeafSnn brand_data = clwp->lwp_brand; 17967c478bd9Sstevel@tonic-gate 17975d3ff519Sjohansen /* 17985d3ff519Sjohansen * Copy parent lwp to child lwp. Hold child's p_lock to prevent 17995d3ff519Sjohansen * mstate_aggr_state() from reading stale mstate entries copied 18005d3ff519Sjohansen * from lwp to clwp. 18015d3ff519Sjohansen */ 18025d3ff519Sjohansen mutex_enter(&cp->p_lock); 18037c478bd9Sstevel@tonic-gate *clwp = *lwp; 18047c478bd9Sstevel@tonic-gate 18055d3ff519Sjohansen /* clear microstate and resource usage data in new lwp */ 18065d3ff519Sjohansen init_mstate(ct, LMS_STOPPED); 18075d3ff519Sjohansen bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 18085d3ff519Sjohansen mutex_exit(&cp->p_lock); 18095d3ff519Sjohansen 18107c478bd9Sstevel@tonic-gate /* fix up child's lwp */ 18117c478bd9Sstevel@tonic-gate 18127712e92cSsudheer clwp->lwp_pcb.pcb_flags = 0; 18137712e92cSsudheer #if defined(__sparc) 18147c478bd9Sstevel@tonic-gate clwp->lwp_pcb.pcb_step = STEP_NONE; 18157c478bd9Sstevel@tonic-gate #endif 18167c478bd9Sstevel@tonic-gate clwp->lwp_cursig = 0; 18177c478bd9Sstevel@tonic-gate clwp->lwp_extsig = 0; 18187c478bd9Sstevel@tonic-gate clwp->lwp_curinfo = (struct sigqueue *)0; 18197c478bd9Sstevel@tonic-gate clwp->lwp_thread = ct; 18207c478bd9Sstevel@tonic-gate ct->t_sysnum = t->t_sysnum; 18217c478bd9Sstevel@tonic-gate clwp->lwp_regs = tregs; 18227c478bd9Sstevel@tonic-gate clwp->lwp_fpu = tfpu; 18239acbbeafSnn clwp->lwp_brand = brand_data; 18247c478bd9Sstevel@tonic-gate clwp->lwp_ap = clwp->lwp_arg; 18257c478bd9Sstevel@tonic-gate clwp->lwp_procp = cp; 18267c478bd9Sstevel@tonic-gate bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 18277c478bd9Sstevel@tonic-gate clwp->lwp_lastfault = 0; 18287c478bd9Sstevel@tonic-gate clwp->lwp_lastfaddr = 0; 18297c478bd9Sstevel@tonic-gate 18307c478bd9Sstevel@tonic-gate /* copy parent's struct regs to child. */ 18317c478bd9Sstevel@tonic-gate lwp_forkregs(lwp, clwp); 18327c478bd9Sstevel@tonic-gate 18337c478bd9Sstevel@tonic-gate /* 18340baeff3dSrab * Fork thread context ops, if any. 18357c478bd9Sstevel@tonic-gate */ 18367c478bd9Sstevel@tonic-gate if (t->t_ctx) 18377c478bd9Sstevel@tonic-gate forkctx(t, ct); 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate /* fix door state in the child */ 18407c478bd9Sstevel@tonic-gate if (t->t_door) 18417c478bd9Sstevel@tonic-gate door_fork(t, ct); 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate /* copy current contract templates, clear latest contracts */ 18447c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(clwp, lwp); 18457c478bd9Sstevel@tonic-gate 18467c478bd9Sstevel@tonic-gate mutex_enter(&cp->p_lock); 18477c478bd9Sstevel@tonic-gate /* lwp_create() set the TP_HOLDLWP flag */ 18487c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_HOLDLWP)) 18497c478bd9Sstevel@tonic-gate ct->t_proc_flag &= ~TP_HOLDLWP; 18507c478bd9Sstevel@tonic-gate if (cp->p_flag & SMSACCT) 18517c478bd9Sstevel@tonic-gate ct->t_proc_flag |= TP_MSACCT; 18527c478bd9Sstevel@tonic-gate mutex_exit(&cp->p_lock); 18537c478bd9Sstevel@tonic-gate 18549acbbeafSnn /* Allow brand to propagate brand-specific state */ 18559acbbeafSnn if (PROC_IS_BRANDED(p)) 18569acbbeafSnn BROP(p)->b_forklwp(lwp, clwp); 18579acbbeafSnn 18587c478bd9Sstevel@tonic-gate retry: 18597c478bd9Sstevel@tonic-gate cid = t->t_cid; 18607c478bd9Sstevel@tonic-gate 18617c478bd9Sstevel@tonic-gate val = CL_ALLOC(&bufp, cid, KM_SLEEP); 18627c478bd9Sstevel@tonic-gate ASSERT(val == 0); 18637c478bd9Sstevel@tonic-gate 18647c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 18657c478bd9Sstevel@tonic-gate if (cid != t->t_cid) { 18667c478bd9Sstevel@tonic-gate /* 18677c478bd9Sstevel@tonic-gate * Someone just changed this thread's scheduling class, 18687c478bd9Sstevel@tonic-gate * so try pre-allocating the buffer again. Hopefully we 18697c478bd9Sstevel@tonic-gate * don't hit this often. 18707c478bd9Sstevel@tonic-gate */ 18717c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 18727c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 18737c478bd9Sstevel@tonic-gate goto retry; 18747c478bd9Sstevel@tonic-gate } 18757c478bd9Sstevel@tonic-gate 18767c478bd9Sstevel@tonic-gate ct->t_unpark = t->t_unpark; 18777c478bd9Sstevel@tonic-gate ct->t_clfuncs = t->t_clfuncs; 18787c478bd9Sstevel@tonic-gate CL_FORK(t, ct, bufp); 18797c478bd9Sstevel@tonic-gate ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 18807c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 18817c478bd9Sstevel@tonic-gate 18827c478bd9Sstevel@tonic-gate return (clwp); 18837c478bd9Sstevel@tonic-gate } 18847c478bd9Sstevel@tonic-gate 18857c478bd9Sstevel@tonic-gate /* 18867c478bd9Sstevel@tonic-gate * Add a new lwp entry to the lwp directory and to the lwpid hash table. 18877c478bd9Sstevel@tonic-gate */ 18887c478bd9Sstevel@tonic-gate void 18896eb30ec3SRoger A. Faulkner lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 18906eb30ec3SRoger A. Faulkner int do_lock) 18917c478bd9Sstevel@tonic-gate { 18926eb30ec3SRoger A. Faulkner tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 18937c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 18947c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 18957c478bd9Sstevel@tonic-gate kthread_t *t; 18967c478bd9Sstevel@tonic-gate 18977c478bd9Sstevel@tonic-gate /* 18987c478bd9Sstevel@tonic-gate * Allocate a directory element from the free list. 18997c478bd9Sstevel@tonic-gate * Code elsewhere guarantees a free slot. 19007c478bd9Sstevel@tonic-gate */ 19017c478bd9Sstevel@tonic-gate ldp = p->p_lwpfree; 19027c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp->ld_next; 19037c478bd9Sstevel@tonic-gate ASSERT(ldp->ld_entry == NULL); 19047c478bd9Sstevel@tonic-gate ldp->ld_entry = lep; 19057c478bd9Sstevel@tonic-gate 19066eb30ec3SRoger A. Faulkner if (do_lock) 19076eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 19086eb30ec3SRoger A. Faulkner 19097c478bd9Sstevel@tonic-gate /* 19107c478bd9Sstevel@tonic-gate * Insert it into the lwpid hash table. 19117c478bd9Sstevel@tonic-gate */ 19126eb30ec3SRoger A. Faulkner ldpp = &thp->th_list; 19137c478bd9Sstevel@tonic-gate ldp->ld_next = *ldpp; 19147c478bd9Sstevel@tonic-gate *ldpp = ldp; 19157c478bd9Sstevel@tonic-gate 19167c478bd9Sstevel@tonic-gate /* 19177c478bd9Sstevel@tonic-gate * Set the active thread's directory slot entry. 19187c478bd9Sstevel@tonic-gate */ 19197c478bd9Sstevel@tonic-gate if ((t = lep->le_thread) != NULL) { 19207c478bd9Sstevel@tonic-gate ASSERT(lep->le_lwpid == t->t_tid); 19217c478bd9Sstevel@tonic-gate t->t_dslot = (int)(ldp - p->p_lwpdir); 19227c478bd9Sstevel@tonic-gate } 19236eb30ec3SRoger A. Faulkner 19246eb30ec3SRoger A. Faulkner if (do_lock) 19256eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19267c478bd9Sstevel@tonic-gate } 19277c478bd9Sstevel@tonic-gate 19287c478bd9Sstevel@tonic-gate /* 19297c478bd9Sstevel@tonic-gate * Remove an lwp from the lwpid hash table and free its directory entry. 19307c478bd9Sstevel@tonic-gate * This is done when a detached lwp exits in lwp_exit() or 19317c478bd9Sstevel@tonic-gate * when a non-detached lwp is waited for in lwp_wait() or 19327c478bd9Sstevel@tonic-gate * when a zombie lwp is detached in lwp_detach(). 19337c478bd9Sstevel@tonic-gate */ 19347c478bd9Sstevel@tonic-gate void 19357c478bd9Sstevel@tonic-gate lwp_hash_out(proc_t *p, id_t lwpid) 19367c478bd9Sstevel@tonic-gate { 19376eb30ec3SRoger A. Faulkner tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 19387c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 19397c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 19407c478bd9Sstevel@tonic-gate lwpent_t *lep; 19417c478bd9Sstevel@tonic-gate 19426eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 19436eb30ec3SRoger A. Faulkner for (ldpp = &thp->th_list; 19447c478bd9Sstevel@tonic-gate (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 19457c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 19467c478bd9Sstevel@tonic-gate if (lep->le_lwpid == lwpid) { 19477c478bd9Sstevel@tonic-gate prlwpfree(p, lep); /* /proc deals with le_trace */ 19487c478bd9Sstevel@tonic-gate *ldpp = ldp->ld_next; 19497c478bd9Sstevel@tonic-gate ldp->ld_entry = NULL; 19507c478bd9Sstevel@tonic-gate ldp->ld_next = p->p_lwpfree; 19517c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp; 19527c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 19537c478bd9Sstevel@tonic-gate break; 19547c478bd9Sstevel@tonic-gate } 19557c478bd9Sstevel@tonic-gate } 19566eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19577c478bd9Sstevel@tonic-gate } 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate /* 19607c478bd9Sstevel@tonic-gate * Lookup an lwp in the lwpid hash table by lwpid. 19617c478bd9Sstevel@tonic-gate */ 19627c478bd9Sstevel@tonic-gate lwpdir_t * 19637c478bd9Sstevel@tonic-gate lwp_hash_lookup(proc_t *p, id_t lwpid) 19647c478bd9Sstevel@tonic-gate { 19656eb30ec3SRoger A. Faulkner tidhash_t *thp; 19667c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 19677c478bd9Sstevel@tonic-gate 19687c478bd9Sstevel@tonic-gate /* 19697c478bd9Sstevel@tonic-gate * The process may be exiting, after p_tidhash has been set to NULL in 19707c478bd9Sstevel@tonic-gate * proc_exit() but before prfee() has been called. Return failure in 19717c478bd9Sstevel@tonic-gate * this case. 19727c478bd9Sstevel@tonic-gate */ 19737c478bd9Sstevel@tonic-gate if (p->p_tidhash == NULL) 19747c478bd9Sstevel@tonic-gate return (NULL); 19757c478bd9Sstevel@tonic-gate 19766eb30ec3SRoger A. Faulkner thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 19776eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 19787c478bd9Sstevel@tonic-gate if (ldp->ld_entry->le_lwpid == lwpid) 19797c478bd9Sstevel@tonic-gate return (ldp); 19807c478bd9Sstevel@tonic-gate } 19817c478bd9Sstevel@tonic-gate 19827c478bd9Sstevel@tonic-gate return (NULL); 19837c478bd9Sstevel@tonic-gate } 19847c478bd9Sstevel@tonic-gate 19856eb30ec3SRoger A. Faulkner /* 19866eb30ec3SRoger A. Faulkner * Same as lwp_hash_lookup(), but acquire and return 19876eb30ec3SRoger A. Faulkner * the tid hash table entry lock on success. 19886eb30ec3SRoger A. Faulkner */ 19896eb30ec3SRoger A. Faulkner lwpdir_t * 19906eb30ec3SRoger A. Faulkner lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 19916eb30ec3SRoger A. Faulkner { 19926eb30ec3SRoger A. Faulkner tidhash_t *tidhash; 19936eb30ec3SRoger A. Faulkner uint_t tidhash_sz; 19946eb30ec3SRoger A. Faulkner tidhash_t *thp; 19956eb30ec3SRoger A. Faulkner lwpdir_t *ldp; 19966eb30ec3SRoger A. Faulkner 19976eb30ec3SRoger A. Faulkner top: 19986eb30ec3SRoger A. Faulkner tidhash_sz = p->p_tidhash_sz; 19996eb30ec3SRoger A. Faulkner membar_consumer(); 20006eb30ec3SRoger A. Faulkner if ((tidhash = p->p_tidhash) == NULL) 20016eb30ec3SRoger A. Faulkner return (NULL); 20026eb30ec3SRoger A. Faulkner 20036eb30ec3SRoger A. Faulkner thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 20046eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 20056eb30ec3SRoger A. Faulkner 20066eb30ec3SRoger A. Faulkner /* 20076eb30ec3SRoger A. Faulkner * Since we are not holding p->p_lock, the tid hash table 20086eb30ec3SRoger A. Faulkner * may have changed. If so, start over. If not, then 20096eb30ec3SRoger A. Faulkner * it cannot change until after we drop &thp->th_lock; 20106eb30ec3SRoger A. Faulkner */ 20116eb30ec3SRoger A. Faulkner if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 20126eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 20136eb30ec3SRoger A. Faulkner goto top; 20146eb30ec3SRoger A. Faulkner } 20156eb30ec3SRoger A. Faulkner 20166eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 20176eb30ec3SRoger A. Faulkner if (ldp->ld_entry->le_lwpid == lwpid) { 20186eb30ec3SRoger A. Faulkner *mpp = &thp->th_lock; 20196eb30ec3SRoger A. Faulkner return (ldp); 20206eb30ec3SRoger A. Faulkner } 20216eb30ec3SRoger A. Faulkner } 20226eb30ec3SRoger A. Faulkner 20236eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 20246eb30ec3SRoger A. Faulkner return (NULL); 20256eb30ec3SRoger A. Faulkner } 20266eb30ec3SRoger A. Faulkner 20277c478bd9Sstevel@tonic-gate /* 20287c478bd9Sstevel@tonic-gate * Update the indicated LWP usage statistic for the current LWP. 20297c478bd9Sstevel@tonic-gate */ 20307c478bd9Sstevel@tonic-gate void 20317c478bd9Sstevel@tonic-gate lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 20327c478bd9Sstevel@tonic-gate { 20337c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 20347c478bd9Sstevel@tonic-gate 20357c478bd9Sstevel@tonic-gate if (lwp == NULL) 20367c478bd9Sstevel@tonic-gate return; 20377c478bd9Sstevel@tonic-gate 20387c478bd9Sstevel@tonic-gate switch (lwp_stat_id) { 20397c478bd9Sstevel@tonic-gate case LWP_STAT_INBLK: 20407c478bd9Sstevel@tonic-gate lwp->lwp_ru.inblock += inc; 20417c478bd9Sstevel@tonic-gate break; 20427c478bd9Sstevel@tonic-gate case LWP_STAT_OUBLK: 20437c478bd9Sstevel@tonic-gate lwp->lwp_ru.oublock += inc; 20447c478bd9Sstevel@tonic-gate break; 20457c478bd9Sstevel@tonic-gate case LWP_STAT_MSGRCV: 20467c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgrcv += inc; 20477c478bd9Sstevel@tonic-gate break; 20487c478bd9Sstevel@tonic-gate case LWP_STAT_MSGSND: 20497c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgsnd += inc; 20507c478bd9Sstevel@tonic-gate break; 20517c478bd9Sstevel@tonic-gate default: 20527c478bd9Sstevel@tonic-gate panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 20537c478bd9Sstevel@tonic-gate } 20547c478bd9Sstevel@tonic-gate } 2055