17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59acbbeafSnn * Common Development and Distribution License (the "License"). 69acbbeafSnn * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2197eda132Sraf 227c478bd9Sstevel@tonic-gate /* 236eb30ec3SRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 307c478bd9Sstevel@tonic-gate #include <sys/systm.h> 317c478bd9Sstevel@tonic-gate #include <sys/thread.h> 327c478bd9Sstevel@tonic-gate #include <sys/proc.h> 337c478bd9Sstevel@tonic-gate #include <sys/task.h> 347c478bd9Sstevel@tonic-gate #include <sys/project.h> 357c478bd9Sstevel@tonic-gate #include <sys/signal.h> 367c478bd9Sstevel@tonic-gate #include <sys/errno.h> 377c478bd9Sstevel@tonic-gate #include <sys/vmparam.h> 387c478bd9Sstevel@tonic-gate #include <sys/stack.h> 397c478bd9Sstevel@tonic-gate #include <sys/procfs.h> 407c478bd9Sstevel@tonic-gate #include <sys/prsystm.h> 417c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 427c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 437c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 447c478bd9Sstevel@tonic-gate #include <sys/door.h> 457c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 467c478bd9Sstevel@tonic-gate #include <sys/debug.h> 477c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 487c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 497c478bd9Sstevel@tonic-gate #include <sys/poll.h> 507c478bd9Sstevel@tonic-gate #include <sys/copyops.h> 517c478bd9Sstevel@tonic-gate #include <sys/lwp_upimutex_impl.h> 527c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 537c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 547c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 557c478bd9Sstevel@tonic-gate #include <sys/contract_impl.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 577c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 587c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 599acbbeafSnn #include <sys/brand.h> 60e0cf54a5SRoger A. Faulkner #include <sys/cyclic.h> 61*936e3a33SGangadhar Mylapuram #include <sys/pool.h> 627c478bd9Sstevel@tonic-gate 636eb30ec3SRoger A. Faulkner /* hash function for the lwpid hash table, p->p_tidhash[] */ 646eb30ec3SRoger A. Faulkner #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 656eb30ec3SRoger A. Faulkner 667c478bd9Sstevel@tonic-gate void *segkp_lwp; /* cookie for pool of segkp resources */ 67575a7426Spt extern void reapq_move_lq_to_tq(kthread_t *); 68575a7426Spt extern void freectx_ctx(struct ctxop *); 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate /* 717c478bd9Sstevel@tonic-gate * Create a thread that appears to be stopped at sys_rtt. 727c478bd9Sstevel@tonic-gate */ 737c478bd9Sstevel@tonic-gate klwp_t * 747c478bd9Sstevel@tonic-gate lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 757c478bd9Sstevel@tonic-gate int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 767c478bd9Sstevel@tonic-gate { 777c478bd9Sstevel@tonic-gate klwp_t *lwp = NULL; 787c478bd9Sstevel@tonic-gate kthread_t *t; 797c478bd9Sstevel@tonic-gate kthread_t *tx; 807c478bd9Sstevel@tonic-gate cpupart_t *oldpart = NULL; 817c478bd9Sstevel@tonic-gate size_t stksize; 827c478bd9Sstevel@tonic-gate caddr_t lwpdata = NULL; 837c478bd9Sstevel@tonic-gate processorid_t binding; 847c478bd9Sstevel@tonic-gate int err = 0; 857c478bd9Sstevel@tonic-gate kproject_t *oldkpj, *newkpj; 867c478bd9Sstevel@tonic-gate void *bufp = NULL; 877c478bd9Sstevel@tonic-gate klwp_t *curlwp = ttolwp(curthread); 887c478bd9Sstevel@tonic-gate lwpent_t *lep; 897c478bd9Sstevel@tonic-gate lwpdir_t *old_dir = NULL; 907c478bd9Sstevel@tonic-gate uint_t old_dirsz = 0; 916eb30ec3SRoger A. Faulkner tidhash_t *old_hash = NULL; 927c478bd9Sstevel@tonic-gate uint_t old_hashsz = 0; 936eb30ec3SRoger A. Faulkner ret_tidhash_t *ret_tidhash = NULL; 947c478bd9Sstevel@tonic-gate int i; 957c478bd9Sstevel@tonic-gate int rctlfail = 0; 969acbbeafSnn boolean_t branded = 0; 97575a7426Spt struct ctxop *ctx = NULL; 987c478bd9Sstevel@tonic-gate 997c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1007c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1017c478bd9Sstevel@tonic-gate /* 1027c478bd9Sstevel@tonic-gate * don't enforce rctl limits on system processes 1037c478bd9Sstevel@tonic-gate */ 1047c478bd9Sstevel@tonic-gate if (cid != syscid) { 1057c478bd9Sstevel@tonic-gate if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 1067c478bd9Sstevel@tonic-gate if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 1077c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1087c478bd9Sstevel@tonic-gate rctlfail = 1; 1097c478bd9Sstevel@tonic-gate if (p->p_task->tk_proj->kpj_nlwps >= 1107c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps_ctl) 1117c478bd9Sstevel@tonic-gate if (rctl_test(rc_project_nlwps, 1127c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_rctls, p, 1, 0) 1137c478bd9Sstevel@tonic-gate & RCT_DENY) 1147c478bd9Sstevel@tonic-gate rctlfail = 1; 1157c478bd9Sstevel@tonic-gate if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 1167c478bd9Sstevel@tonic-gate if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 1177c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1187c478bd9Sstevel@tonic-gate rctlfail = 1; 1197c478bd9Sstevel@tonic-gate } 1207c478bd9Sstevel@tonic-gate if (rctlfail) { 1217c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1227c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1237c478bd9Sstevel@tonic-gate return (NULL); 1247c478bd9Sstevel@tonic-gate } 1257c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps++; 1267c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps++; 1277c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps++; 1287c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1297c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 1327c478bd9Sstevel@tonic-gate stksize = lwp_default_stksize; 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate /* 1357c478bd9Sstevel@tonic-gate * Try to reclaim a <lwp,stack> from 'deathrow' 1367c478bd9Sstevel@tonic-gate */ 1377c478bd9Sstevel@tonic-gate if (stksize == lwp_default_stksize) { 1387c478bd9Sstevel@tonic-gate if (lwp_reapcnt > 0) { 1397c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 1407c478bd9Sstevel@tonic-gate if ((t = lwp_deathrow) != NULL) { 1417c478bd9Sstevel@tonic-gate ASSERT(t->t_swap); 1427c478bd9Sstevel@tonic-gate lwp_deathrow = t->t_forw; 1437c478bd9Sstevel@tonic-gate lwp_reapcnt--; 1447c478bd9Sstevel@tonic-gate lwpdata = t->t_swap; 1457c478bd9Sstevel@tonic-gate lwp = t->t_lwp; 146575a7426Spt ctx = t->t_ctx; 1477c478bd9Sstevel@tonic-gate t->t_swap = NULL; 1487c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 149575a7426Spt t->t_ctx = NULL; 150575a7426Spt reapq_move_lq_to_tq(t); 151575a7426Spt } 152575a7426Spt mutex_exit(&reaplock); 153575a7426Spt if (lwp != NULL) { 154575a7426Spt lwp_stk_fini(lwp); 155575a7426Spt } 156575a7426Spt if (ctx != NULL) { 157575a7426Spt freectx_ctx(ctx); 1587c478bd9Sstevel@tonic-gate } 1597c478bd9Sstevel@tonic-gate } 1607c478bd9Sstevel@tonic-gate if (lwpdata == NULL && 1617c478bd9Sstevel@tonic-gate (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 1627c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1637c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1647c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 1657c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 1667c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 1677c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1687c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1697c478bd9Sstevel@tonic-gate return (NULL); 1707c478bd9Sstevel@tonic-gate } 1717c478bd9Sstevel@tonic-gate } else { 1727c478bd9Sstevel@tonic-gate stksize = roundup(stksize, PAGESIZE); 1737c478bd9Sstevel@tonic-gate if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 1747c478bd9Sstevel@tonic-gate (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 1757c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1767c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1777c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 1787c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 1797c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 1807c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1817c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1827c478bd9Sstevel@tonic-gate return (NULL); 1837c478bd9Sstevel@tonic-gate } 1847c478bd9Sstevel@tonic-gate } 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate /* 1877c478bd9Sstevel@tonic-gate * Create a thread, initializing the stack pointer 1887c478bd9Sstevel@tonic-gate */ 1897c478bd9Sstevel@tonic-gate t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 1907c478bd9Sstevel@tonic-gate 1917c478bd9Sstevel@tonic-gate t->t_swap = lwpdata; /* Start of page-able data */ 1927c478bd9Sstevel@tonic-gate if (lwp == NULL) 1937c478bd9Sstevel@tonic-gate lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 1947c478bd9Sstevel@tonic-gate bzero(lwp, sizeof (*lwp)); 1957c478bd9Sstevel@tonic-gate t->t_lwp = lwp; 1967c478bd9Sstevel@tonic-gate 1977c478bd9Sstevel@tonic-gate t->t_hold = *smask; 1987c478bd9Sstevel@tonic-gate lwp->lwp_thread = t; 1997c478bd9Sstevel@tonic-gate lwp->lwp_procp = p; 2007c478bd9Sstevel@tonic-gate lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 2017c478bd9Sstevel@tonic-gate if (curlwp != NULL && curlwp->lwp_childstksz != 0) 2027c478bd9Sstevel@tonic-gate lwp->lwp_childstksz = curlwp->lwp_childstksz; 2037c478bd9Sstevel@tonic-gate 2047c478bd9Sstevel@tonic-gate t->t_stk = lwp_stk_init(lwp, t->t_stk); 2057c478bd9Sstevel@tonic-gate thread_load(t, proc, arg, len); 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate /* 2087c478bd9Sstevel@tonic-gate * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 2097c478bd9Sstevel@tonic-gate */ 210e0cf54a5SRoger A. Faulkner if (p->p_rprof_cyclic != CYCLIC_NONE) 2117c478bd9Sstevel@tonic-gate t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate if (cid != NOCLASS) 2147c478bd9Sstevel@tonic-gate (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate /* 2177c478bd9Sstevel@tonic-gate * Allocate an lwp directory entry for the new lwp. 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2227c478bd9Sstevel@tonic-gate grow: 2237c478bd9Sstevel@tonic-gate /* 2247c478bd9Sstevel@tonic-gate * Grow the lwp (thread) directory and lwpid hash table if necessary. 2257c478bd9Sstevel@tonic-gate * A note on the growth algorithm: 2267c478bd9Sstevel@tonic-gate * The new lwp directory size is computed as: 2277c478bd9Sstevel@tonic-gate * new = 2 * old + 2 2287c478bd9Sstevel@tonic-gate * Starting with an initial size of 2 (see exec_common()), 2297c478bd9Sstevel@tonic-gate * this yields numbers that are a power of two minus 2: 2307c478bd9Sstevel@tonic-gate * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 2317c478bd9Sstevel@tonic-gate * The size of the lwpid hash table must be a power of two 2327c478bd9Sstevel@tonic-gate * and must be commensurate in size with the lwp directory 2337c478bd9Sstevel@tonic-gate * so that hash bucket chains remain short. Therefore, 2347c478bd9Sstevel@tonic-gate * the lwpid hash table size is computed as: 2357c478bd9Sstevel@tonic-gate * hashsz = (dirsz + 2) / 2 2367c478bd9Sstevel@tonic-gate * which leads to these hash table sizes corresponding to 2377c478bd9Sstevel@tonic-gate * the above directory sizes: 2387c478bd9Sstevel@tonic-gate * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 2396eb30ec3SRoger A. Faulkner * A note on growing the hash table: 2406eb30ec3SRoger A. Faulkner * For performance reasons, code in lwp_unpark() does not 2416eb30ec3SRoger A. Faulkner * acquire curproc->p_lock when searching the hash table. 2426eb30ec3SRoger A. Faulkner * Rather, it calls lwp_hash_lookup_and_lock() which 2436eb30ec3SRoger A. Faulkner * acquires only the individual hash bucket lock, taking 2446eb30ec3SRoger A. Faulkner * care to deal with reallocation of the hash table 2456eb30ec3SRoger A. Faulkner * during the time it takes to acquire the lock. 2466eb30ec3SRoger A. Faulkner * 2476eb30ec3SRoger A. Faulkner * This is sufficient to protect the integrity of the 2486eb30ec3SRoger A. Faulkner * hash table, but it requires us to acquire all of the 2496eb30ec3SRoger A. Faulkner * old hash bucket locks before growing the hash table 2506eb30ec3SRoger A. Faulkner * and to release them afterwards. It also requires us 2516eb30ec3SRoger A. Faulkner * not to free the old hash table because some thread 2526eb30ec3SRoger A. Faulkner * in lwp_hash_lookup_and_lock() might still be trying 2536eb30ec3SRoger A. Faulkner * to acquire the old bucket lock. 2546eb30ec3SRoger A. Faulkner * 2556eb30ec3SRoger A. Faulkner * So we adopt the tactic of keeping all of the retired 2566eb30ec3SRoger A. Faulkner * hash tables on a linked list, so they can be safely 2576eb30ec3SRoger A. Faulkner * freed when the process exits or execs. 2586eb30ec3SRoger A. Faulkner * 2596eb30ec3SRoger A. Faulkner * Because the hash table grows in powers of two, the 2606eb30ec3SRoger A. Faulkner * total size of all of the hash tables will be slightly 2616eb30ec3SRoger A. Faulkner * less than twice the size of the largest hash table. 2627c478bd9Sstevel@tonic-gate */ 2637c478bd9Sstevel@tonic-gate while (p->p_lwpfree == NULL) { 2647c478bd9Sstevel@tonic-gate uint_t dirsz = p->p_lwpdir_sz; 2657c478bd9Sstevel@tonic-gate lwpdir_t *new_dir; 2666eb30ec3SRoger A. Faulkner uint_t new_dirsz; 2677c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 2686eb30ec3SRoger A. Faulkner tidhash_t *new_hash; 2696eb30ec3SRoger A. Faulkner uint_t new_hashsz; 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 2727c478bd9Sstevel@tonic-gate 2736eb30ec3SRoger A. Faulkner /* 2746eb30ec3SRoger A. Faulkner * Prepare to remember the old p_tidhash for later 2756eb30ec3SRoger A. Faulkner * kmem_free()ing when the process exits or execs. 2766eb30ec3SRoger A. Faulkner */ 2776eb30ec3SRoger A. Faulkner if (ret_tidhash == NULL) 2786eb30ec3SRoger A. Faulkner ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 2796eb30ec3SRoger A. Faulkner KM_SLEEP); 2806eb30ec3SRoger A. Faulkner if (old_dir != NULL) 2817c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 2826eb30ec3SRoger A. Faulkner if (old_hash != NULL) 2837c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 2846eb30ec3SRoger A. Faulkner 2857c478bd9Sstevel@tonic-gate new_dirsz = 2 * dirsz + 2; 2867c478bd9Sstevel@tonic-gate new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 2877c478bd9Sstevel@tonic-gate for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 2887c478bd9Sstevel@tonic-gate ldp->ld_next = ldp + 1; 2897c478bd9Sstevel@tonic-gate new_hashsz = (new_dirsz + 2) / 2; 2906eb30ec3SRoger A. Faulkner new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 291575a7426Spt KM_SLEEP); 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2947c478bd9Sstevel@tonic-gate if (p == curproc) 2957c478bd9Sstevel@tonic-gate prbarrier(p); 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 2987c478bd9Sstevel@tonic-gate /* 2997c478bd9Sstevel@tonic-gate * Someone else beat us to it or some lwp exited. 3007c478bd9Sstevel@tonic-gate * Set up to free our memory and take a lap. 3017c478bd9Sstevel@tonic-gate */ 3027c478bd9Sstevel@tonic-gate old_dir = new_dir; 3037c478bd9Sstevel@tonic-gate old_dirsz = new_dirsz; 3047c478bd9Sstevel@tonic-gate old_hash = new_hash; 3057c478bd9Sstevel@tonic-gate old_hashsz = new_hashsz; 3067c478bd9Sstevel@tonic-gate } else { 3076eb30ec3SRoger A. Faulkner /* 3086eb30ec3SRoger A. Faulkner * For the benefit of lwp_hash_lookup_and_lock(), 3096eb30ec3SRoger A. Faulkner * called from lwp_unpark(), which searches the 3106eb30ec3SRoger A. Faulkner * tid hash table without acquiring p->p_lock, 3116eb30ec3SRoger A. Faulkner * we must acquire all of the tid hash table 3126eb30ec3SRoger A. Faulkner * locks before replacing p->p_tidhash. 3136eb30ec3SRoger A. Faulkner */ 3147c478bd9Sstevel@tonic-gate old_hash = p->p_tidhash; 3157c478bd9Sstevel@tonic-gate old_hashsz = p->p_tidhash_sz; 3166eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 3176eb30ec3SRoger A. Faulkner mutex_enter(&old_hash[i].th_lock); 3186eb30ec3SRoger A. Faulkner mutex_enter(&new_hash[i].th_lock); 3196eb30ec3SRoger A. Faulkner } 3206eb30ec3SRoger A. Faulkner 3217c478bd9Sstevel@tonic-gate /* 3227c478bd9Sstevel@tonic-gate * We simply hash in all of the old directory entries. 3237c478bd9Sstevel@tonic-gate * This works because the old directory has no empty 3247c478bd9Sstevel@tonic-gate * slots and the new hash table starts out empty. 3257c478bd9Sstevel@tonic-gate * This reproduces the original directory ordering 3267c478bd9Sstevel@tonic-gate * (required for /proc directory semantics). 3277c478bd9Sstevel@tonic-gate */ 3286eb30ec3SRoger A. Faulkner old_dir = p->p_lwpdir; 3296eb30ec3SRoger A. Faulkner old_dirsz = p->p_lwpdir_sz; 3306eb30ec3SRoger A. Faulkner p->p_lwpdir = new_dir; 3316eb30ec3SRoger A. Faulkner p->p_lwpfree = new_dir; 3326eb30ec3SRoger A. Faulkner p->p_lwpdir_sz = new_dirsz; 3336eb30ec3SRoger A. Faulkner for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 3346eb30ec3SRoger A. Faulkner lwp_hash_in(p, ldp->ld_entry, 3356eb30ec3SRoger A. Faulkner new_hash, new_hashsz, 0); 3366eb30ec3SRoger A. Faulkner 3376eb30ec3SRoger A. Faulkner /* 3386eb30ec3SRoger A. Faulkner * Remember the old hash table along with all 3396eb30ec3SRoger A. Faulkner * of the previously-remembered hash tables. 3406eb30ec3SRoger A. Faulkner * We will free them at process exit or exec. 3416eb30ec3SRoger A. Faulkner */ 3426eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash = old_hash; 3436eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash_sz = old_hashsz; 3446eb30ec3SRoger A. Faulkner ret_tidhash->rth_next = p->p_ret_tidhash; 3456eb30ec3SRoger A. Faulkner p->p_ret_tidhash = ret_tidhash; 3466eb30ec3SRoger A. Faulkner 3477c478bd9Sstevel@tonic-gate /* 3486eb30ec3SRoger A. Faulkner * Now establish the new tid hash table. 3496eb30ec3SRoger A. Faulkner * As soon as we assign p->p_tidhash, 3506eb30ec3SRoger A. Faulkner * code in lwp_unpark() can start using it. 3517c478bd9Sstevel@tonic-gate */ 3526eb30ec3SRoger A. Faulkner membar_producer(); 3536eb30ec3SRoger A. Faulkner p->p_tidhash = new_hash; 3546eb30ec3SRoger A. Faulkner 3556eb30ec3SRoger A. Faulkner /* 3566eb30ec3SRoger A. Faulkner * It is necessary that p_tidhash reach global 3576eb30ec3SRoger A. Faulkner * visibility before p_tidhash_sz. Otherwise, 3586eb30ec3SRoger A. Faulkner * code in lwp_hash_lookup_and_lock() could 3596eb30ec3SRoger A. Faulkner * index into the old p_tidhash using the new 3606eb30ec3SRoger A. Faulkner * p_tidhash_sz and thereby access invalid data. 3616eb30ec3SRoger A. Faulkner */ 3626eb30ec3SRoger A. Faulkner membar_producer(); 3636eb30ec3SRoger A. Faulkner p->p_tidhash_sz = new_hashsz; 3646eb30ec3SRoger A. Faulkner 3656eb30ec3SRoger A. Faulkner /* 3666eb30ec3SRoger A. Faulkner * Release the locks; allow lwp_unpark() to carry on. 3676eb30ec3SRoger A. Faulkner */ 3686eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 3696eb30ec3SRoger A. Faulkner mutex_exit(&old_hash[i].th_lock); 3706eb30ec3SRoger A. Faulkner mutex_exit(&new_hash[i].th_lock); 3716eb30ec3SRoger A. Faulkner } 3726eb30ec3SRoger A. Faulkner 3736eb30ec3SRoger A. Faulkner /* 3746eb30ec3SRoger A. Faulkner * Avoid freeing these objects below. 3756eb30ec3SRoger A. Faulkner */ 3766eb30ec3SRoger A. Faulkner ret_tidhash = NULL; 3776eb30ec3SRoger A. Faulkner old_hash = NULL; 3786eb30ec3SRoger A. Faulkner old_hashsz = 0; 3797c478bd9Sstevel@tonic-gate } 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate /* 3837c478bd9Sstevel@tonic-gate * Block the process against /proc while we manipulate p->p_tlist, 3847c478bd9Sstevel@tonic-gate * unless lwp_create() was called by /proc for the PCAGENT operation. 3857c478bd9Sstevel@tonic-gate * We want to do this early enough so that we don't drop p->p_lock 3867c478bd9Sstevel@tonic-gate * until the thread is put on the p->p_tlist. 3877c478bd9Sstevel@tonic-gate */ 3887c478bd9Sstevel@tonic-gate if (p == curproc) { 3897c478bd9Sstevel@tonic-gate prbarrier(p); 3907c478bd9Sstevel@tonic-gate /* 3917c478bd9Sstevel@tonic-gate * If the current lwp has been requested to stop, do so now. 3927c478bd9Sstevel@tonic-gate * Otherwise we have a race condition between /proc attempting 3937c478bd9Sstevel@tonic-gate * to stop the process and this thread creating a new lwp 3947c478bd9Sstevel@tonic-gate * that was not seen when the /proc PCSTOP request was issued. 3957c478bd9Sstevel@tonic-gate * We rely on stop() to call prbarrier(p) before returning. 3967c478bd9Sstevel@tonic-gate */ 3977c478bd9Sstevel@tonic-gate while ((curthread->t_proc_flag & TP_PRSTOP) && 398*936e3a33SGangadhar Mylapuram !ttolwp(curthread)->lwp_nostop) { 399*936e3a33SGangadhar Mylapuram /* 400*936e3a33SGangadhar Mylapuram * We called pool_barrier_enter() before calling 401*936e3a33SGangadhar Mylapuram * here to lwp_create(). We have to call 402*936e3a33SGangadhar Mylapuram * pool_barrier_exit() before stopping. 403*936e3a33SGangadhar Mylapuram */ 404*936e3a33SGangadhar Mylapuram pool_barrier_exit(); 405*936e3a33SGangadhar Mylapuram prbarrier(p); 4067c478bd9Sstevel@tonic-gate stop(PR_REQUESTED, 0); 407*936e3a33SGangadhar Mylapuram /* 408*936e3a33SGangadhar Mylapuram * And we have to repeat the call to 409*936e3a33SGangadhar Mylapuram * pool_barrier_enter after stopping. 410*936e3a33SGangadhar Mylapuram */ 411*936e3a33SGangadhar Mylapuram pool_barrier_enter(); 412*936e3a33SGangadhar Mylapuram prbarrier(p); 413*936e3a33SGangadhar Mylapuram } 4147c478bd9Sstevel@tonic-gate 4157c478bd9Sstevel@tonic-gate /* 4167c478bd9Sstevel@tonic-gate * If process is exiting, there could be a race between 4177c478bd9Sstevel@tonic-gate * the agent lwp creation and the new lwp currently being 4187c478bd9Sstevel@tonic-gate * created. So to prevent this race lwp creation is failed 4197c478bd9Sstevel@tonic-gate * if the process is exiting. 4207c478bd9Sstevel@tonic-gate */ 4217c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS|SKILLED)) { 4227c478bd9Sstevel@tonic-gate err = 1; 4237c478bd9Sstevel@tonic-gate goto error; 4247c478bd9Sstevel@tonic-gate } 4257c478bd9Sstevel@tonic-gate 4267c478bd9Sstevel@tonic-gate /* 4277c478bd9Sstevel@tonic-gate * Since we might have dropped p->p_lock, the 4287c478bd9Sstevel@tonic-gate * lwp directory free list might have changed. 4297c478bd9Sstevel@tonic-gate */ 4307c478bd9Sstevel@tonic-gate if (p->p_lwpfree == NULL) 4317c478bd9Sstevel@tonic-gate goto grow; 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate 4347c478bd9Sstevel@tonic-gate kpreempt_disable(); /* can't grab cpu_lock here */ 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate /* 4377c478bd9Sstevel@tonic-gate * Inherit processor and processor set bindings from curthread, 4387c478bd9Sstevel@tonic-gate * unless we're creating a new kernel process, in which case 4397c478bd9Sstevel@tonic-gate * clear all bindings. 4407c478bd9Sstevel@tonic-gate */ 4417c478bd9Sstevel@tonic-gate if (cid == syscid) { 4427c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding = PBIND_NONE; 4437c478bd9Sstevel@tonic-gate t->t_cpupart = oldpart = &cp_default; 4447c478bd9Sstevel@tonic-gate t->t_bind_pset = PS_NONE; 4450b70c467Sakolb t->t_bindflag = (uchar_t)default_binding_mode; 4467c478bd9Sstevel@tonic-gate } else { 4477c478bd9Sstevel@tonic-gate binding = curthread->t_bind_cpu; 4487c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding; 4497c478bd9Sstevel@tonic-gate oldpart = t->t_cpupart; 4507c478bd9Sstevel@tonic-gate t->t_cpupart = curthread->t_cpupart; 4517c478bd9Sstevel@tonic-gate t->t_bind_pset = curthread->t_bind_pset; 4520b70c467Sakolb t->t_bindflag = curthread->t_bindflag | 4530b70c467Sakolb (uchar_t)default_binding_mode; 4547c478bd9Sstevel@tonic-gate } 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate /* 4577c478bd9Sstevel@tonic-gate * thread_create() initializes this thread's home lgroup to the root. 4587c478bd9Sstevel@tonic-gate * Choose a more suitable lgroup, since this thread is associated 4597c478bd9Sstevel@tonic-gate * with an lwp. 4607c478bd9Sstevel@tonic-gate */ 4617c478bd9Sstevel@tonic-gate ASSERT(oldpart != NULL); 4627c478bd9Sstevel@tonic-gate if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 4637c478bd9Sstevel@tonic-gate t->t_bound_cpu = cpu[binding]; 4647c478bd9Sstevel@tonic-gate if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 4657c478bd9Sstevel@tonic-gate lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 4667c478bd9Sstevel@tonic-gate } else { 4677c478bd9Sstevel@tonic-gate lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate kpreempt_enable(); 4717c478bd9Sstevel@tonic-gate 4727c478bd9Sstevel@tonic-gate /* 4737c478bd9Sstevel@tonic-gate * make sure lpl points to our own partition 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 4767c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 4777c478bd9Sstevel@tonic-gate t->t_cpupart->cp_nlgrploads); 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate /* 4807c478bd9Sstevel@tonic-gate * If we're creating a new process, then inherit the project from our 4817c478bd9Sstevel@tonic-gate * parent. If we're only creating an additional lwp then use the 4827c478bd9Sstevel@tonic-gate * project pointer of the target process. 4837c478bd9Sstevel@tonic-gate */ 4847c478bd9Sstevel@tonic-gate if (p->p_task == NULL) 4857c478bd9Sstevel@tonic-gate newkpj = ttoproj(curthread); 4867c478bd9Sstevel@tonic-gate else 4877c478bd9Sstevel@tonic-gate newkpj = p->p_task->tk_proj; 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate /* 4907c478bd9Sstevel@tonic-gate * It is safe to point the thread to the new project without holding it 4917c478bd9Sstevel@tonic-gate * since we're holding the target process' p_lock here and therefore 4927c478bd9Sstevel@tonic-gate * we're guaranteed that it will not move to another project. 4937c478bd9Sstevel@tonic-gate */ 4947c478bd9Sstevel@tonic-gate oldkpj = ttoproj(t); 4957c478bd9Sstevel@tonic-gate if (newkpj != oldkpj) { 4967c478bd9Sstevel@tonic-gate t->t_proj = newkpj; 4977c478bd9Sstevel@tonic-gate (void) project_hold(newkpj); 4987c478bd9Sstevel@tonic-gate project_rele(oldkpj); 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate if (cid != NOCLASS) { 5027c478bd9Sstevel@tonic-gate /* 5037c478bd9Sstevel@tonic-gate * If the lwp is being created in the current process 5047c478bd9Sstevel@tonic-gate * and matches the current thread's scheduling class, 5057c478bd9Sstevel@tonic-gate * we should propagate the current thread's scheduling 5067c478bd9Sstevel@tonic-gate * parameters by calling CL_FORK. Otherwise just use 5077c478bd9Sstevel@tonic-gate * the defaults by calling CL_ENTERCLASS. 5087c478bd9Sstevel@tonic-gate */ 5097c478bd9Sstevel@tonic-gate if (p != curproc || curthread->t_cid != cid) { 5107c478bd9Sstevel@tonic-gate err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 5117c478bd9Sstevel@tonic-gate t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 512d4204c85Sraf /* 513d4204c85Sraf * We don't call schedctl_set_cidpri(t) here 514d4204c85Sraf * because the schedctl data is not yet set 515d4204c85Sraf * up for the newly-created lwp. 516d4204c85Sraf */ 5177c478bd9Sstevel@tonic-gate } else { 5187c478bd9Sstevel@tonic-gate t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 5197c478bd9Sstevel@tonic-gate err = CL_FORK(curthread, t, bufp); 5207c478bd9Sstevel@tonic-gate t->t_cid = cid; 5217c478bd9Sstevel@tonic-gate } 5227c478bd9Sstevel@tonic-gate if (err) 5237c478bd9Sstevel@tonic-gate goto error; 5247c478bd9Sstevel@tonic-gate else 5257c478bd9Sstevel@tonic-gate bufp = NULL; 5267c478bd9Sstevel@tonic-gate } 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate /* 5297c478bd9Sstevel@tonic-gate * If we were given an lwpid then use it, else allocate one. 5307c478bd9Sstevel@tonic-gate */ 5317c478bd9Sstevel@tonic-gate if (lwpid != 0) 5327c478bd9Sstevel@tonic-gate t->t_tid = lwpid; 5337c478bd9Sstevel@tonic-gate else { 5347c478bd9Sstevel@tonic-gate /* 5357c478bd9Sstevel@tonic-gate * lwp/thread id 0 is never valid; reserved for special checks. 5367c478bd9Sstevel@tonic-gate * lwp/thread id 1 is reserved for the main thread. 5377c478bd9Sstevel@tonic-gate * Start again at 2 when INT_MAX has been reached 5387c478bd9Sstevel@tonic-gate * (id_t is a signed 32-bit integer). 5397c478bd9Sstevel@tonic-gate */ 5407c478bd9Sstevel@tonic-gate id_t prev_id = p->p_lwpid; /* last allocated tid */ 5417c478bd9Sstevel@tonic-gate 5427c478bd9Sstevel@tonic-gate do { /* avoid lwpid duplication */ 5437c478bd9Sstevel@tonic-gate if (p->p_lwpid == INT_MAX) { 5447c478bd9Sstevel@tonic-gate p->p_flag |= SLWPWRAP; 5457c478bd9Sstevel@tonic-gate p->p_lwpid = 1; 5467c478bd9Sstevel@tonic-gate } 5477c478bd9Sstevel@tonic-gate if ((t->t_tid = ++p->p_lwpid) == prev_id) { 5487c478bd9Sstevel@tonic-gate /* 5497c478bd9Sstevel@tonic-gate * All lwpids are allocated; fail the request. 5507c478bd9Sstevel@tonic-gate */ 5517c478bd9Sstevel@tonic-gate err = 1; 5527c478bd9Sstevel@tonic-gate goto error; 5537c478bd9Sstevel@tonic-gate } 5547c478bd9Sstevel@tonic-gate /* 5557c478bd9Sstevel@tonic-gate * We only need to worry about colliding with an id 5567c478bd9Sstevel@tonic-gate * that's already in use if this process has 5577c478bd9Sstevel@tonic-gate * cycled through all available lwp ids. 5587c478bd9Sstevel@tonic-gate */ 5597c478bd9Sstevel@tonic-gate if ((p->p_flag & SLWPWRAP) == 0) 5607c478bd9Sstevel@tonic-gate break; 5617c478bd9Sstevel@tonic-gate } while (lwp_hash_lookup(p, t->t_tid) != NULL); 5627c478bd9Sstevel@tonic-gate } 5639acbbeafSnn 5649acbbeafSnn /* 5659acbbeafSnn * If this is a branded process, let the brand do any necessary lwp 5669acbbeafSnn * initialization. 5679acbbeafSnn */ 5689acbbeafSnn if (PROC_IS_BRANDED(p)) { 5699acbbeafSnn if (BROP(p)->b_initlwp(lwp)) { 5709acbbeafSnn err = 1; 5719acbbeafSnn goto error; 5729acbbeafSnn } 5739acbbeafSnn branded = 1; 5749acbbeafSnn } 5759acbbeafSnn 5762cb27123Saguzovsk if (t->t_tid == 1) { 5772cb27123Saguzovsk kpreempt_disable(); 5782cb27123Saguzovsk ASSERT(t->t_lpl != NULL); 5792cb27123Saguzovsk p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 5802cb27123Saguzovsk kpreempt_enable(); 5812cb27123Saguzovsk if (p->p_tr_lgrpid != LGRP_NONE && 5822cb27123Saguzovsk p->p_tr_lgrpid != p->p_t1_lgrpid) { 5832cb27123Saguzovsk lgrp_update_trthr_migrations(1); 5842cb27123Saguzovsk } 5852cb27123Saguzovsk } 5862cb27123Saguzovsk 5877c478bd9Sstevel@tonic-gate p->p_lwpcnt++; 5887c478bd9Sstevel@tonic-gate t->t_waitfor = -1; 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate /* 5917c478bd9Sstevel@tonic-gate * Turn microstate accounting on for thread if on for process. 5927c478bd9Sstevel@tonic-gate */ 5937c478bd9Sstevel@tonic-gate if (p->p_flag & SMSACCT) 5947c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_MSACCT; 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate /* 5977c478bd9Sstevel@tonic-gate * If the process has watchpoints, mark the new thread as such. 5987c478bd9Sstevel@tonic-gate */ 5997c478bd9Sstevel@tonic-gate if (pr_watch_active(p)) 6007c478bd9Sstevel@tonic-gate watch_enable(t); 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate /* 6037c478bd9Sstevel@tonic-gate * The lwp is being created in the stopped state. 6047c478bd9Sstevel@tonic-gate * We set all the necessary flags to indicate that fact here. 6057c478bd9Sstevel@tonic-gate * We omit the TS_CREATE flag from t_schedflag so that the lwp 6067c478bd9Sstevel@tonic-gate * cannot be set running until the caller is finished with it, 6077c478bd9Sstevel@tonic-gate * even if lwp_continue() is called on it after we drop p->p_lock. 6087c478bd9Sstevel@tonic-gate * When the caller is finished with the newly-created lwp, 6097c478bd9Sstevel@tonic-gate * the caller must call lwp_create_done() to allow the lwp 6107c478bd9Sstevel@tonic-gate * to be set running. If the TP_HOLDLWP is left set, the 6117c478bd9Sstevel@tonic-gate * lwp will suspend itself after reaching system call exit. 6127c478bd9Sstevel@tonic-gate */ 6137c478bd9Sstevel@tonic-gate init_mstate(t, LMS_STOPPED); 6147c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 6157c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 6167c478bd9Sstevel@tonic-gate t->t_whystop = PR_SUSPENDED; 6177c478bd9Sstevel@tonic-gate t->t_whatstop = SUSPEND_NORMAL; 6187c478bd9Sstevel@tonic-gate t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate /* 6217c478bd9Sstevel@tonic-gate * Set system call processing flags in case tracing or profiling 6227c478bd9Sstevel@tonic-gate * is set. The first system call will evaluate these and turn 6237c478bd9Sstevel@tonic-gate * them off if they aren't needed. 6247c478bd9Sstevel@tonic-gate */ 6257c478bd9Sstevel@tonic-gate t->t_pre_sys = 1; 6267c478bd9Sstevel@tonic-gate t->t_post_sys = 1; 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate /* 6297c478bd9Sstevel@tonic-gate * Insert the new thread into the list of all threads. 6307c478bd9Sstevel@tonic-gate */ 6317c478bd9Sstevel@tonic-gate if ((tx = p->p_tlist) == NULL) { 6327c478bd9Sstevel@tonic-gate t->t_back = t; 6337c478bd9Sstevel@tonic-gate t->t_forw = t; 6347c478bd9Sstevel@tonic-gate p->p_tlist = t; 6357c478bd9Sstevel@tonic-gate } else { 6367c478bd9Sstevel@tonic-gate t->t_forw = tx; 6377c478bd9Sstevel@tonic-gate t->t_back = tx->t_back; 6387c478bd9Sstevel@tonic-gate tx->t_back->t_forw = t; 6397c478bd9Sstevel@tonic-gate tx->t_back = t; 6407c478bd9Sstevel@tonic-gate } 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate /* 6437c478bd9Sstevel@tonic-gate * Insert the new lwp into an lwp directory slot position 6447c478bd9Sstevel@tonic-gate * and into the lwpid hash table. 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate lep->le_thread = t; 6477c478bd9Sstevel@tonic-gate lep->le_lwpid = t->t_tid; 6487c478bd9Sstevel@tonic-gate lep->le_start = t->t_start; 6496eb30ec3SRoger A. Faulkner lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate if (state == TS_RUN) { 6527c478bd9Sstevel@tonic-gate /* 6537c478bd9Sstevel@tonic-gate * We set the new lwp running immediately. 6547c478bd9Sstevel@tonic-gate */ 6557c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 6567c478bd9Sstevel@tonic-gate lwp_create_done(t); 6577c478bd9Sstevel@tonic-gate } 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate error: 6607c478bd9Sstevel@tonic-gate if (err) { 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * We have failed to create an lwp, so decrement the number 6637c478bd9Sstevel@tonic-gate * of lwps in the task and let the lgroup load averages know 6647c478bd9Sstevel@tonic-gate * that this thread isn't going to show up. 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate kpreempt_disable(); 6677c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 6687c478bd9Sstevel@tonic-gate kpreempt_enable(); 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 6717c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 6727c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 6737c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 6747c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 6757c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 6767c478bd9Sstevel@tonic-gate if (cid != NOCLASS && bufp != NULL) 6777c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 6787c478bd9Sstevel@tonic-gate 6799acbbeafSnn if (branded) 6809acbbeafSnn BROP(p)->b_freelwp(lwp); 6819acbbeafSnn 6827c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 6837c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 6847c478bd9Sstevel@tonic-gate thread_rele(t); 6857c478bd9Sstevel@tonic-gate 6867c478bd9Sstevel@tonic-gate /* 6877c478bd9Sstevel@tonic-gate * We need to remove t from the list of all threads 6887c478bd9Sstevel@tonic-gate * because thread_exit()/lwp_exit() isn't called on t. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 6917c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 6927c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 6937c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 6947c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate thread_free(t); 6977c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 6987c478bd9Sstevel@tonic-gate lwp = NULL; 6997c478bd9Sstevel@tonic-gate } else { 7007c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7017c478bd9Sstevel@tonic-gate } 7027c478bd9Sstevel@tonic-gate 7036eb30ec3SRoger A. Faulkner if (old_dir != NULL) 7047c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 7056eb30ec3SRoger A. Faulkner if (old_hash != NULL) 7067c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 7076eb30ec3SRoger A. Faulkner if (ret_tidhash != NULL) 7086eb30ec3SRoger A. Faulkner kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate DTRACE_PROC1(lwp__create, kthread_t *, t); 7117c478bd9Sstevel@tonic-gate return (lwp); 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * lwp_create_done() is called by the caller of lwp_create() to set the 7167c478bd9Sstevel@tonic-gate * newly-created lwp running after the caller has finished manipulating it. 7177c478bd9Sstevel@tonic-gate */ 7187c478bd9Sstevel@tonic-gate void 7197c478bd9Sstevel@tonic-gate lwp_create_done(kthread_t *t) 7207c478bd9Sstevel@tonic-gate { 7217c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 7227c478bd9Sstevel@tonic-gate 7237c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7247c478bd9Sstevel@tonic-gate 7257c478bd9Sstevel@tonic-gate /* 7267c478bd9Sstevel@tonic-gate * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 7277c478bd9Sstevel@tonic-gate * (The absence of the TS_CREATE flag prevents the lwp from running 7287c478bd9Sstevel@tonic-gate * until we are finished with it, even if lwp_continue() is called on 7297c478bd9Sstevel@tonic-gate * it by some other lwp in the process or elsewhere in the kernel.) 7307c478bd9Sstevel@tonic-gate */ 7317c478bd9Sstevel@tonic-gate thread_lock(t); 7327c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 7337c478bd9Sstevel@tonic-gate /* 7347c478bd9Sstevel@tonic-gate * If TS_CSTART is set, lwp_continue(t) has been called and 7357c478bd9Sstevel@tonic-gate * has already incremented p_lwprcnt; avoid doing this twice. 7367c478bd9Sstevel@tonic-gate */ 7377c478bd9Sstevel@tonic-gate if (!(t->t_schedflag & TS_CSTART)) 7387c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 7397c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_CSTART | TS_CREATE); 7407c478bd9Sstevel@tonic-gate setrun_locked(t); 7417c478bd9Sstevel@tonic-gate thread_unlock(t); 7427c478bd9Sstevel@tonic-gate } 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate /* 7457c478bd9Sstevel@tonic-gate * Copy an LWP's active templates, and clear the latest contracts. 7467c478bd9Sstevel@tonic-gate */ 7477c478bd9Sstevel@tonic-gate void 7487c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 7497c478bd9Sstevel@tonic-gate { 7507c478bd9Sstevel@tonic-gate int i; 7517c478bd9Sstevel@tonic-gate 7527c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 7537c478bd9Sstevel@tonic-gate dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 7547c478bd9Sstevel@tonic-gate dst->lwp_ct_latest[i] = NULL; 7557c478bd9Sstevel@tonic-gate } 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate /* 7597c478bd9Sstevel@tonic-gate * Clear an LWP's contract template state. 7607c478bd9Sstevel@tonic-gate */ 7617c478bd9Sstevel@tonic-gate void 7627c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(klwp_t *lwp) 7637c478bd9Sstevel@tonic-gate { 7647c478bd9Sstevel@tonic-gate ct_template_t *tmpl; 7657c478bd9Sstevel@tonic-gate int i; 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 7687c478bd9Sstevel@tonic-gate if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 7697c478bd9Sstevel@tonic-gate ctmpl_free(tmpl); 7707c478bd9Sstevel@tonic-gate lwp->lwp_ct_active[i] = NULL; 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate if (lwp->lwp_ct_latest[i] != NULL) { 7747c478bd9Sstevel@tonic-gate contract_rele(lwp->lwp_ct_latest[i]); 7757c478bd9Sstevel@tonic-gate lwp->lwp_ct_latest[i] = NULL; 7767c478bd9Sstevel@tonic-gate } 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate } 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate /* 7817c478bd9Sstevel@tonic-gate * Individual lwp exit. 7827c478bd9Sstevel@tonic-gate * If this is the last lwp, exit the whole process. 7837c478bd9Sstevel@tonic-gate */ 7847c478bd9Sstevel@tonic-gate void 7857c478bd9Sstevel@tonic-gate lwp_exit(void) 7867c478bd9Sstevel@tonic-gate { 7877c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 7887c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 7897c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7927c478bd9Sstevel@tonic-gate 7937c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate #if defined(__sparc) 7967c478bd9Sstevel@tonic-gate /* 7977c478bd9Sstevel@tonic-gate * Ensure that the user stack is fully abandoned.. 7987c478bd9Sstevel@tonic-gate */ 7997c478bd9Sstevel@tonic-gate trash_user_windows(); 8007c478bd9Sstevel@tonic-gate #endif 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate tsd_exit(); /* free thread specific data */ 8037c478bd9Sstevel@tonic-gate 8047c478bd9Sstevel@tonic-gate kcpc_passivate(); /* Clean up performance counter state */ 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate pollcleanup(); 8077c478bd9Sstevel@tonic-gate 8087c478bd9Sstevel@tonic-gate if (t->t_door) 8097c478bd9Sstevel@tonic-gate door_slam(); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate if (t->t_schedctl != NULL) 8127c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(t); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate if (t->t_upimutex != NULL) 8157c478bd9Sstevel@tonic-gate upimutex_cleanup(); 8167c478bd9Sstevel@tonic-gate 8179acbbeafSnn /* 8189acbbeafSnn * Perform any brand specific exit processing, then release any 8199acbbeafSnn * brand data associated with the lwp 8209acbbeafSnn */ 8219acbbeafSnn if (PROC_IS_BRANDED(p)) 8229acbbeafSnn BROP(p)->b_lwpexit(lwp); 8239acbbeafSnn 8247c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 8257c478bd9Sstevel@tonic-gate lwp_cleanup(); 8267c478bd9Sstevel@tonic-gate 8277c478bd9Sstevel@tonic-gate /* 8287c478bd9Sstevel@tonic-gate * When this process is dumping core, its lwps are held here 8297c478bd9Sstevel@tonic-gate * until the core dump is finished. Then exitlwps() is called 8307c478bd9Sstevel@tonic-gate * again to release these lwps so that they can finish exiting. 8317c478bd9Sstevel@tonic-gate */ 8327c478bd9Sstevel@tonic-gate if (p->p_flag & SCOREDUMP) 8337c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * Block the process against /proc now that we have really acquired 8377c478bd9Sstevel@tonic-gate * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 8387c478bd9Sstevel@tonic-gate */ 8397c478bd9Sstevel@tonic-gate prbarrier(p); 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate /* 8427c478bd9Sstevel@tonic-gate * Call proc_exit() if this is the last non-daemon lwp in the process. 8437c478bd9Sstevel@tonic-gate */ 8447c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_DAEMON) && 8457c478bd9Sstevel@tonic-gate p->p_lwpcnt == p->p_lwpdaemon + 1) { 8467c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 84797eda132Sraf if (proc_exit(CLD_EXITED, 0) == 0) { 8487c478bd9Sstevel@tonic-gate /* Restarting init. */ 8497c478bd9Sstevel@tonic-gate return; 8507c478bd9Sstevel@tonic-gate } 8517c478bd9Sstevel@tonic-gate 8527c478bd9Sstevel@tonic-gate /* 8537c478bd9Sstevel@tonic-gate * proc_exit() returns a non-zero value when some other 8547c478bd9Sstevel@tonic-gate * lwp got there first. We just have to continue in 8557c478bd9Sstevel@tonic-gate * lwp_exit(). 8567c478bd9Sstevel@tonic-gate */ 8577c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 8587c478bd9Sstevel@tonic-gate ASSERT(curproc->p_flag & SEXITLWPS); 8597c478bd9Sstevel@tonic-gate prbarrier(p); 8607c478bd9Sstevel@tonic-gate } 8617c478bd9Sstevel@tonic-gate 8627c478bd9Sstevel@tonic-gate DTRACE_PROC(lwp__exit); 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate /* 8657c478bd9Sstevel@tonic-gate * If the lwp is a detached lwp or if the process is exiting, 8667c478bd9Sstevel@tonic-gate * remove (lwp_hash_out()) the lwp from the lwp directory. 8677c478bd9Sstevel@tonic-gate * Otherwise null out the lwp's le_thread pointer in the lwp 8687c478bd9Sstevel@tonic-gate * directory so that other threads will see it as a zombie lwp. 8697c478bd9Sstevel@tonic-gate */ 8707c478bd9Sstevel@tonic-gate prlwpexit(t); /* notify /proc */ 8717c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 8727c478bd9Sstevel@tonic-gate lwp_hash_out(p, t->t_tid); 8737c478bd9Sstevel@tonic-gate else { 8747c478bd9Sstevel@tonic-gate ASSERT(!(t->t_proc_flag & TP_DAEMON)); 8757c478bd9Sstevel@tonic-gate p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 8767c478bd9Sstevel@tonic-gate p->p_zombcnt++; 8777c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 8787c478bd9Sstevel@tonic-gate } 8797c478bd9Sstevel@tonic-gate if (t->t_proc_flag & TP_DAEMON) { 8807c478bd9Sstevel@tonic-gate p->p_lwpdaemon--; 8817c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_DAEMON; 8827c478bd9Sstevel@tonic-gate } 8837c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate /* 8867c478bd9Sstevel@tonic-gate * Maintain accurate lwp count for task.max-lwps resource control. 8877c478bd9Sstevel@tonic-gate */ 8887c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 8897c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 8907c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 8917c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 8927c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 8937c478bd9Sstevel@tonic-gate 8947c478bd9Sstevel@tonic-gate CL_EXIT(t); /* tell the scheduler that t is exiting */ 8957c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt != 0); 8967c478bd9Sstevel@tonic-gate p->p_lwpcnt--; 8977c478bd9Sstevel@tonic-gate 8987c478bd9Sstevel@tonic-gate /* 8997c478bd9Sstevel@tonic-gate * If all remaining non-daemon lwps are waiting in lwp_wait(), 9007c478bd9Sstevel@tonic-gate * wake them up so someone can return EDEADLK. 9017c478bd9Sstevel@tonic-gate * (See the block comment preceeding lwp_wait().) 9027c478bd9Sstevel@tonic-gate */ 9037c478bd9Sstevel@tonic-gate if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 9047c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_LWPEXIT; 9077c478bd9Sstevel@tonic-gate term_mstate(t); 908c97ad5cdSakolb 9097c478bd9Sstevel@tonic-gate #ifndef NPROBE 9107c478bd9Sstevel@tonic-gate /* Kernel probe */ 9117c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 9127c478bd9Sstevel@tonic-gate tnf_thread_exit(); 9137c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 9147c478bd9Sstevel@tonic-gate 9157c478bd9Sstevel@tonic-gate t->t_forw->t_back = t->t_back; 9167c478bd9Sstevel@tonic-gate t->t_back->t_forw = t->t_forw; 9177c478bd9Sstevel@tonic-gate if (t == p->p_tlist) 9187c478bd9Sstevel@tonic-gate p->p_tlist = t->t_forw; 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate /* 9217c478bd9Sstevel@tonic-gate * Clean up the signal state. 9227c478bd9Sstevel@tonic-gate */ 9237c478bd9Sstevel@tonic-gate if (t->t_sigqueue != NULL) 9247c478bd9Sstevel@tonic-gate sigdelq(p, t, 0); 9257c478bd9Sstevel@tonic-gate if (lwp->lwp_curinfo != NULL) { 9267c478bd9Sstevel@tonic-gate siginfofree(lwp->lwp_curinfo); 9277c478bd9Sstevel@tonic-gate lwp->lwp_curinfo = NULL; 9287c478bd9Sstevel@tonic-gate } 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate thread_rele(t); 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * Terminated lwps are associated with process zero and are put onto 9347c478bd9Sstevel@tonic-gate * death-row by resume(). Avoid preemption after resetting t->t_procp. 9357c478bd9Sstevel@tonic-gate */ 9367c478bd9Sstevel@tonic-gate t->t_preempt++; 9370baeff3dSrab 9380baeff3dSrab if (t->t_ctx != NULL) 9390baeff3dSrab exitctx(t); 9400baeff3dSrab if (p->p_pctx != NULL) 9410baeff3dSrab exitpctx(p); 9420baeff3dSrab 9437c478bd9Sstevel@tonic-gate t->t_procp = &p0; 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate /* 9467c478bd9Sstevel@tonic-gate * Notify the HAT about the change of address space 9477c478bd9Sstevel@tonic-gate */ 9487c478bd9Sstevel@tonic-gate hat_thread_exit(t); 9497c478bd9Sstevel@tonic-gate /* 9507c478bd9Sstevel@tonic-gate * When this is the last running lwp in this process and some lwp is 9517c478bd9Sstevel@tonic-gate * waiting for this condition to become true, or this thread was being 9527c478bd9Sstevel@tonic-gate * suspended, then the waiting lwp is awakened. 9537c478bd9Sstevel@tonic-gate * 9547c478bd9Sstevel@tonic-gate * Also, if the process is exiting, we may have a thread waiting in 9557c478bd9Sstevel@tonic-gate * exitlwps() that needs to be notified. 9567c478bd9Sstevel@tonic-gate */ 9577c478bd9Sstevel@tonic-gate if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 9587c478bd9Sstevel@tonic-gate (p->p_flag & SEXITLWPS)) 9597c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate /* 9627c478bd9Sstevel@tonic-gate * Need to drop p_lock so we can reacquire pidlock. 9637c478bd9Sstevel@tonic-gate */ 9647c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 9657c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 9667c478bd9Sstevel@tonic-gate 9677c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 9687c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 9697c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 9707c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 9717c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate lwp_pcb_exit(); 9747c478bd9Sstevel@tonic-gate 9757c478bd9Sstevel@tonic-gate t->t_state = TS_ZOMB; 9767c478bd9Sstevel@tonic-gate swtch_from_zombie(); 9777c478bd9Sstevel@tonic-gate /* never returns */ 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate 9817c478bd9Sstevel@tonic-gate /* 9827c478bd9Sstevel@tonic-gate * Cleanup function for an exiting lwp. 9837c478bd9Sstevel@tonic-gate * Called both from lwp_exit() and from proc_exit(). 9847c478bd9Sstevel@tonic-gate * p->p_lock is repeatedly released and grabbed in this function. 9857c478bd9Sstevel@tonic-gate */ 9867c478bd9Sstevel@tonic-gate void 9877c478bd9Sstevel@tonic-gate lwp_cleanup(void) 9887c478bd9Sstevel@tonic-gate { 9897c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 9907c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate /* untimeout any lwp-bound realtime timers */ 9957c478bd9Sstevel@tonic-gate if (p->p_itimer != NULL) 9967c478bd9Sstevel@tonic-gate timer_lwpexit(); 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate /* 9997c478bd9Sstevel@tonic-gate * If this is the /proc agent lwp that is exiting, readjust p_lwpid 10007c478bd9Sstevel@tonic-gate * so it appears that the agent never existed, and clear p_agenttp. 10017c478bd9Sstevel@tonic-gate */ 10027c478bd9Sstevel@tonic-gate if (t == p->p_agenttp) { 10037c478bd9Sstevel@tonic-gate ASSERT(t->t_tid == p->p_lwpid); 10047c478bd9Sstevel@tonic-gate p->p_lwpid--; 10057c478bd9Sstevel@tonic-gate p->p_agenttp = NULL; 10067c478bd9Sstevel@tonic-gate } 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate /* 10097c478bd9Sstevel@tonic-gate * Do lgroup bookkeeping to account for thread exiting. 10107c478bd9Sstevel@tonic-gate */ 10117c478bd9Sstevel@tonic-gate kpreempt_disable(); 10127c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 10132cb27123Saguzovsk if (t->t_tid == 1) { 10142cb27123Saguzovsk p->p_t1_lgrpid = LGRP_NONE; 10152cb27123Saguzovsk } 10167c478bd9Sstevel@tonic-gate kpreempt_enable(); 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(ttolwp(t)); 10197c478bd9Sstevel@tonic-gate } 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate int 10227c478bd9Sstevel@tonic-gate lwp_suspend(kthread_t *t) 10237c478bd9Sstevel@tonic-gate { 10247c478bd9Sstevel@tonic-gate int tid; 10257c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate /* 10307c478bd9Sstevel@tonic-gate * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 10317c478bd9Sstevel@tonic-gate * If an lwp is stopping itself, there is no need to wait. 10327c478bd9Sstevel@tonic-gate */ 10338132eb48Sraf top: 10347c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 10357c478bd9Sstevel@tonic-gate if (t == curthread) { 10367c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 10377c478bd9Sstevel@tonic-gate } else { 10387c478bd9Sstevel@tonic-gate /* 10397c478bd9Sstevel@tonic-gate * Make sure the lwp stops promptly. 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate thread_lock(t); 10427c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 10437c478bd9Sstevel@tonic-gate /* 10447c478bd9Sstevel@tonic-gate * XXX Should use virtual stop like /proc does instead of 10457c478bd9Sstevel@tonic-gate * XXX waking the thread to get it to stop. 10467c478bd9Sstevel@tonic-gate */ 1047c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 10487c478bd9Sstevel@tonic-gate setrun_locked(t); 1049c97ad5cdSakolb } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 10507c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 1051c97ad5cdSakolb } 1052c97ad5cdSakolb 10537c478bd9Sstevel@tonic-gate tid = t->t_tid; /* remember thread ID */ 10547c478bd9Sstevel@tonic-gate /* 10557c478bd9Sstevel@tonic-gate * Wait for lwp to stop 10567c478bd9Sstevel@tonic-gate */ 10577c478bd9Sstevel@tonic-gate while (!SUSPENDED(t)) { 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * Drop the thread lock before waiting and reacquire it 10607c478bd9Sstevel@tonic-gate * afterwards, so the thread can change its t_state 10617c478bd9Sstevel@tonic-gate * field. 10627c478bd9Sstevel@tonic-gate */ 10637c478bd9Sstevel@tonic-gate thread_unlock(t); 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate /* 10667c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 10677c478bd9Sstevel@tonic-gate */ 10687c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 10697c478bd9Sstevel@tonic-gate lwp_exit(); 10707c478bd9Sstevel@tonic-gate 10717c478bd9Sstevel@tonic-gate /* 10727c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping 10737c478bd9Sstevel@tonic-gate * by calling cv_wait_sig() to wait for the target 10747c478bd9Sstevel@tonic-gate * lwp to stop. Just using cv_wait() can lead to 10757c478bd9Sstevel@tonic-gate * deadlock because, if some other lwp has stopped 10767c478bd9Sstevel@tonic-gate * by either of these mechanisms, then p_lwprcnt will 10777c478bd9Sstevel@tonic-gate * never become zero if we do a cv_wait(). 10787c478bd9Sstevel@tonic-gate */ 10797c478bd9Sstevel@tonic-gate if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 10807c478bd9Sstevel@tonic-gate return (EINTR); 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate /* 10837c478bd9Sstevel@tonic-gate * Check to see if thread died while we were 10847c478bd9Sstevel@tonic-gate * waiting for it to suspend. 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate if (idtot(p, tid) == NULL) 10877c478bd9Sstevel@tonic-gate return (ESRCH); 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate thread_lock(t); 10907c478bd9Sstevel@tonic-gate /* 10918132eb48Sraf * If the TP_HOLDLWP flag went away, lwp_continue() 10928132eb48Sraf * or vfork() must have been called while we were 10938132eb48Sraf * waiting, so start over again. 10947c478bd9Sstevel@tonic-gate */ 10957c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 10967c478bd9Sstevel@tonic-gate thread_unlock(t); 10978132eb48Sraf goto top; 10987c478bd9Sstevel@tonic-gate } 10997c478bd9Sstevel@tonic-gate } 11007c478bd9Sstevel@tonic-gate thread_unlock(t); 11017c478bd9Sstevel@tonic-gate } 11027c478bd9Sstevel@tonic-gate return (0); 11037c478bd9Sstevel@tonic-gate } 11047c478bd9Sstevel@tonic-gate 11057c478bd9Sstevel@tonic-gate /* 11067c478bd9Sstevel@tonic-gate * continue a lwp that's been stopped by lwp_suspend(). 11077c478bd9Sstevel@tonic-gate */ 11087c478bd9Sstevel@tonic-gate void 11097c478bd9Sstevel@tonic-gate lwp_continue(kthread_t *t) 11107c478bd9Sstevel@tonic-gate { 11117c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 11127c478bd9Sstevel@tonic-gate int was_suspended = t->t_proc_flag & TP_HOLDLWP; 11137c478bd9Sstevel@tonic-gate 11147c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 11157c478bd9Sstevel@tonic-gate 11167c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 11177c478bd9Sstevel@tonic-gate thread_lock(t); 11187c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && 11197c478bd9Sstevel@tonic-gate !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 11207c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 11217c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 11227c478bd9Sstevel@tonic-gate setrun_locked(t); 11237c478bd9Sstevel@tonic-gate } 11247c478bd9Sstevel@tonic-gate thread_unlock(t); 11257c478bd9Sstevel@tonic-gate /* 11267c478bd9Sstevel@tonic-gate * Wakeup anyone waiting for this thread to be suspended 11277c478bd9Sstevel@tonic-gate */ 11287c478bd9Sstevel@tonic-gate if (was_suspended) 11297c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 11307c478bd9Sstevel@tonic-gate } 11317c478bd9Sstevel@tonic-gate 11327c478bd9Sstevel@tonic-gate /* 11337c478bd9Sstevel@tonic-gate * ******************************** 11347c478bd9Sstevel@tonic-gate * Miscellaneous lwp routines * 11357c478bd9Sstevel@tonic-gate * ******************************** 11367c478bd9Sstevel@tonic-gate */ 11377c478bd9Sstevel@tonic-gate /* 11387c478bd9Sstevel@tonic-gate * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 11397c478bd9Sstevel@tonic-gate * This will cause the process's lwps to stop at a hold point. A hold 11407c478bd9Sstevel@tonic-gate * point is where a kernel thread has a flat stack. This is at the 11417c478bd9Sstevel@tonic-gate * return from a system call and at the return from a user level trap. 11427c478bd9Sstevel@tonic-gate * 11437c478bd9Sstevel@tonic-gate * When a process is undergoing a fork1() or vfork(), its p_flag is set to 11447c478bd9Sstevel@tonic-gate * SHOLDFORK1. This will cause the process's lwps to stop at a modified 11457c478bd9Sstevel@tonic-gate * hold point. The lwps in the process are not being cloned, so they 11467c478bd9Sstevel@tonic-gate * are held at the usual hold points and also within issig_forreal(). 11477c478bd9Sstevel@tonic-gate * This has the side-effect that their system calls do not return 11487c478bd9Sstevel@tonic-gate * showing EINTR. 11497c478bd9Sstevel@tonic-gate * 11507c478bd9Sstevel@tonic-gate * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 11517c478bd9Sstevel@tonic-gate * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 11527c478bd9Sstevel@tonic-gate * lwp is waiting for the target lwp to be stopped. 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate void 11557c478bd9Sstevel@tonic-gate holdlwp(void) 11567c478bd9Sstevel@tonic-gate { 11577c478bd9Sstevel@tonic-gate proc_t *p = curproc; 11587c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 11617c478bd9Sstevel@tonic-gate /* 11627c478bd9Sstevel@tonic-gate * Don't terminate immediately if the process is dumping core. 11637c478bd9Sstevel@tonic-gate * Once the process has dumped core, all lwps are terminated. 11647c478bd9Sstevel@tonic-gate */ 11657c478bd9Sstevel@tonic-gate if (!(p->p_flag & SCOREDUMP)) { 11667c478bd9Sstevel@tonic-gate if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 11677c478bd9Sstevel@tonic-gate lwp_exit(); 11687c478bd9Sstevel@tonic-gate } 11697c478bd9Sstevel@tonic-gate if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 11707c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 11717c478bd9Sstevel@tonic-gate return; 11727c478bd9Sstevel@tonic-gate } 11737c478bd9Sstevel@tonic-gate /* 11747c478bd9Sstevel@tonic-gate * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 11757c478bd9Sstevel@tonic-gate * when p->p_lwprcnt becomes zero. 11767c478bd9Sstevel@tonic-gate */ 11777c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 11787c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 11797c478bd9Sstevel@tonic-gate lwp_exit(); 11807c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate /* 11847c478bd9Sstevel@tonic-gate * Have all lwps within the process hold at a point where they are 11857c478bd9Sstevel@tonic-gate * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 11867c478bd9Sstevel@tonic-gate */ 11877c478bd9Sstevel@tonic-gate int 11887c478bd9Sstevel@tonic-gate holdlwps(int holdflag) 11897c478bd9Sstevel@tonic-gate { 11907c478bd9Sstevel@tonic-gate proc_t *p = curproc; 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 11937c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 11947c478bd9Sstevel@tonic-gate schedctl_finish_sigblock(curthread); 11957c478bd9Sstevel@tonic-gate again: 11967c478bd9Sstevel@tonic-gate while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 11977c478bd9Sstevel@tonic-gate /* 11987c478bd9Sstevel@tonic-gate * If another lwp is doing a forkall() or proc_exit(), bail out. 11997c478bd9Sstevel@tonic-gate */ 12007c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 12017c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12027c478bd9Sstevel@tonic-gate return (0); 12037c478bd9Sstevel@tonic-gate } 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * Another lwp is doing a fork1() or is undergoing 12067c478bd9Sstevel@tonic-gate * watchpoint activity. We hold here for it to complete. 12077c478bd9Sstevel@tonic-gate */ 12087c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12097c478bd9Sstevel@tonic-gate } 12107c478bd9Sstevel@tonic-gate p->p_flag |= holdflag; 12117c478bd9Sstevel@tonic-gate pokelwps(p); 12127c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 12137c478bd9Sstevel@tonic-gate /* 12147c478bd9Sstevel@tonic-gate * Wait for the process to become quiescent (p->p_lwprcnt == 0). 12157c478bd9Sstevel@tonic-gate */ 12167c478bd9Sstevel@tonic-gate while (p->p_lwprcnt > 0) { 12177c478bd9Sstevel@tonic-gate /* 12187c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 12197c478bd9Sstevel@tonic-gate * Also check if SHOLDWATCH is set; it takes precedence. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 12227c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12237c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12247c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 12257c478bd9Sstevel@tonic-gate goto again; 12267c478bd9Sstevel@tonic-gate } 12277c478bd9Sstevel@tonic-gate /* 12287c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping. 12297c478bd9Sstevel@tonic-gate * If some other lwp has stopped by either of these 12307c478bd9Sstevel@tonic-gate * mechanisms, then p_lwprcnt will never become zero 12317c478bd9Sstevel@tonic-gate * and the process will appear deadlocked unless we 12327c478bd9Sstevel@tonic-gate * stop here in sympathy with the other lwp before 12337c478bd9Sstevel@tonic-gate * doing the cv_wait() below. 12347c478bd9Sstevel@tonic-gate * 12357c478bd9Sstevel@tonic-gate * If the other lwp stops after we do the cv_wait(), it 12367c478bd9Sstevel@tonic-gate * will wake us up to loop around and do the sympathy stop. 12377c478bd9Sstevel@tonic-gate * 12387c478bd9Sstevel@tonic-gate * Since stop() drops p->p_lock, we must start from 12397c478bd9Sstevel@tonic-gate * the top again on returning from stop(). 12407c478bd9Sstevel@tonic-gate */ 12417c478bd9Sstevel@tonic-gate if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 12427c478bd9Sstevel@tonic-gate int whystop = p->p_stopsig? PR_JOBCONTROL : 12437c478bd9Sstevel@tonic-gate PR_REQUESTED; 12447c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12457c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12467c478bd9Sstevel@tonic-gate stop(whystop, p->p_stopsig); 12477c478bd9Sstevel@tonic-gate goto again; 12487c478bd9Sstevel@tonic-gate } 12497c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 12507c478bd9Sstevel@tonic-gate } 12517c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12527c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 12537c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12547c478bd9Sstevel@tonic-gate return (1); 12557c478bd9Sstevel@tonic-gate } 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate /* 12587c478bd9Sstevel@tonic-gate * See comments for holdwatch(), below. 12597c478bd9Sstevel@tonic-gate */ 12607c478bd9Sstevel@tonic-gate static int 12617c478bd9Sstevel@tonic-gate holdcheck(int clearflags) 12627c478bd9Sstevel@tonic-gate { 12637c478bd9Sstevel@tonic-gate proc_t *p = curproc; 12647c478bd9Sstevel@tonic-gate 12657c478bd9Sstevel@tonic-gate /* 12667c478bd9Sstevel@tonic-gate * If we are trying to exit, that takes precedence over anything else. 12677c478bd9Sstevel@tonic-gate */ 12687c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 12697c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12707c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 12717c478bd9Sstevel@tonic-gate lwp_exit(); 12727c478bd9Sstevel@tonic-gate } 12737c478bd9Sstevel@tonic-gate 12747c478bd9Sstevel@tonic-gate /* 12757c478bd9Sstevel@tonic-gate * If another thread is calling fork1(), stop the current thread so the 12767c478bd9Sstevel@tonic-gate * other can complete. 12777c478bd9Sstevel@tonic-gate */ 12787c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK1) { 12797c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12807c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12817c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 12827c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 12837c478bd9Sstevel@tonic-gate lwp_exit(); 12847c478bd9Sstevel@tonic-gate } 12857c478bd9Sstevel@tonic-gate return (-1); 12867c478bd9Sstevel@tonic-gate } 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * If another thread is calling fork(), then indicate we are doing 12907c478bd9Sstevel@tonic-gate * watchpoint activity. This will cause holdlwps() above to stop the 12917c478bd9Sstevel@tonic-gate * forking thread, at which point we can continue with watchpoint 12927c478bd9Sstevel@tonic-gate * activity. 12937c478bd9Sstevel@tonic-gate */ 12947c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK) { 12957c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12967c478bd9Sstevel@tonic-gate while (p->p_flag & SHOLDFORK) { 12977c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 12987c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 12997c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 13007c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate return (-1); 13037c478bd9Sstevel@tonic-gate } 13047c478bd9Sstevel@tonic-gate 13057c478bd9Sstevel@tonic-gate return (0); 13067c478bd9Sstevel@tonic-gate } 13077c478bd9Sstevel@tonic-gate 13087c478bd9Sstevel@tonic-gate /* 13097c478bd9Sstevel@tonic-gate * Stop all lwps within the process, holding themselves in the kernel while the 13107c478bd9Sstevel@tonic-gate * active lwp undergoes watchpoint activity. This is more complicated than 13117c478bd9Sstevel@tonic-gate * expected because stop() relies on calling holdwatch() in order to copyin data 13127c478bd9Sstevel@tonic-gate * from the user's address space. A double barrier is used to prevent an 13137c478bd9Sstevel@tonic-gate * infinite loop. 13147c478bd9Sstevel@tonic-gate * 13157c478bd9Sstevel@tonic-gate * o The first thread into holdwatch() is the 'master' thread and does 13167c478bd9Sstevel@tonic-gate * the following: 13177c478bd9Sstevel@tonic-gate * 13187c478bd9Sstevel@tonic-gate * - Sets SHOLDWATCH on the current process 13197c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 13207c478bd9Sstevel@tonic-gate * - Waits for all threads to be either stopped or have 13217c478bd9Sstevel@tonic-gate * TP_WATCHSTOP set. 13227c478bd9Sstevel@tonic-gate * - Sets the SWATCHOK flag on the process 13237c478bd9Sstevel@tonic-gate * - Unsets TP_WATCHSTOP 13247c478bd9Sstevel@tonic-gate * - Waits for the other threads to completely stop 13257c478bd9Sstevel@tonic-gate * - Unsets SWATCHOK 13267c478bd9Sstevel@tonic-gate * 13277c478bd9Sstevel@tonic-gate * o If SHOLDWATCH is already set when we enter this function, then another 13287c478bd9Sstevel@tonic-gate * thread is already trying to stop this thread. This 'slave' thread 13297c478bd9Sstevel@tonic-gate * does the following: 13307c478bd9Sstevel@tonic-gate * 13317c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 13327c478bd9Sstevel@tonic-gate * - Waits for SWATCHOK flag to be set 13337c478bd9Sstevel@tonic-gate * - Calls stop() 13347c478bd9Sstevel@tonic-gate * 13357c478bd9Sstevel@tonic-gate * o If SWATCHOK is set on the process, then this function immediately 13367c478bd9Sstevel@tonic-gate * returns, as we must have been called via stop(). 13377c478bd9Sstevel@tonic-gate * 13387c478bd9Sstevel@tonic-gate * In addition, there are other flags that take precedence over SHOLDWATCH: 13397c478bd9Sstevel@tonic-gate * 13407c478bd9Sstevel@tonic-gate * o If SEXITLWPS is set, exit immediately. 13417c478bd9Sstevel@tonic-gate * 13427c478bd9Sstevel@tonic-gate * o If SHOLDFORK1 is set, wait for fork1() to complete. 13437c478bd9Sstevel@tonic-gate * 13447c478bd9Sstevel@tonic-gate * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 13457c478bd9Sstevel@tonic-gate * case, set SHOLDWATCH, signalling the forking thread to stop first. 13467c478bd9Sstevel@tonic-gate * 13477c478bd9Sstevel@tonic-gate * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 13487c478bd9Sstevel@tonic-gate * stop the current thread. 13497c478bd9Sstevel@tonic-gate * 13507c478bd9Sstevel@tonic-gate * Returns 0 if all threads have been quiesced. Returns non-zero if not all 13517c478bd9Sstevel@tonic-gate * threads were stopped, or the list of watched pages has changed. 13527c478bd9Sstevel@tonic-gate */ 13537c478bd9Sstevel@tonic-gate int 13547c478bd9Sstevel@tonic-gate holdwatch(void) 13557c478bd9Sstevel@tonic-gate { 13567c478bd9Sstevel@tonic-gate proc_t *p = curproc; 13577c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 13587c478bd9Sstevel@tonic-gate int ret = 0; 13597c478bd9Sstevel@tonic-gate 13607c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 13617c478bd9Sstevel@tonic-gate 13627c478bd9Sstevel@tonic-gate p->p_lwprcnt--; 13637c478bd9Sstevel@tonic-gate 13647c478bd9Sstevel@tonic-gate /* 13657c478bd9Sstevel@tonic-gate * Check for bail-out conditions as outlined above. 13667c478bd9Sstevel@tonic-gate */ 13677c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 13687c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 13697c478bd9Sstevel@tonic-gate return (-1); 13707c478bd9Sstevel@tonic-gate } 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate if (!(p->p_flag & SHOLDWATCH)) { 13737c478bd9Sstevel@tonic-gate /* 13747c478bd9Sstevel@tonic-gate * We are the master watchpoint thread. Set SHOLDWATCH and poke 13757c478bd9Sstevel@tonic-gate * the other threads. 13767c478bd9Sstevel@tonic-gate */ 13777c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 13787c478bd9Sstevel@tonic-gate pokelwps(p); 13797c478bd9Sstevel@tonic-gate 13807c478bd9Sstevel@tonic-gate /* 13817c478bd9Sstevel@tonic-gate * Wait for all threads to be stopped or have TP_WATCHSTOP set. 13827c478bd9Sstevel@tonic-gate */ 13837c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 1) > 0) { 13847c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH) != 0) { 13857c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 13867c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 13877c478bd9Sstevel@tonic-gate return (-1); 13887c478bd9Sstevel@tonic-gate } 13897c478bd9Sstevel@tonic-gate 13907c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate 13937c478bd9Sstevel@tonic-gate /* 13947c478bd9Sstevel@tonic-gate * All threads are now stopped or in the process of stopping. 13957c478bd9Sstevel@tonic-gate * Set SWATCHOK and let them stop completely. 13967c478bd9Sstevel@tonic-gate */ 13977c478bd9Sstevel@tonic-gate p->p_flag |= SWATCHOK; 13987c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 13997c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 14007c478bd9Sstevel@tonic-gate 14017c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 0) > 0) { 14027c478bd9Sstevel@tonic-gate /* 14037c478bd9Sstevel@tonic-gate * At first glance, it may appear that we don't need a 14047c478bd9Sstevel@tonic-gate * call to holdcheck() here. But if the process gets a 14057c478bd9Sstevel@tonic-gate * SIGKILL signal, one of our stopped threads may have 14067c478bd9Sstevel@tonic-gate * been awakened and is waiting in exitlwps(), which 14077c478bd9Sstevel@tonic-gate * takes precedence over watchpoints. 14087c478bd9Sstevel@tonic-gate */ 14097c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 14107c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 14117c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14127c478bd9Sstevel@tonic-gate return (-1); 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14167c478bd9Sstevel@tonic-gate } 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate /* 14197c478bd9Sstevel@tonic-gate * All threads are now completely stopped. 14207c478bd9Sstevel@tonic-gate */ 14217c478bd9Sstevel@tonic-gate p->p_flag &= ~SWATCHOK; 14227c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 14237c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate } else if (!(p->p_flag & SWATCHOK)) { 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * SHOLDWATCH is set, so another thread is trying to do 14297c478bd9Sstevel@tonic-gate * watchpoint activity. Indicate this thread is stopping, and 14307c478bd9Sstevel@tonic-gate * wait for the OK from the master thread. 14317c478bd9Sstevel@tonic-gate */ 14327c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_WATCHSTOP; 14337c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate while (!(p->p_flag & SWATCHOK)) { 14367c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 14377c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14387c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14397c478bd9Sstevel@tonic-gate return (-1); 14407c478bd9Sstevel@tonic-gate } 14417c478bd9Sstevel@tonic-gate 14427c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14437c478bd9Sstevel@tonic-gate } 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate /* 14467c478bd9Sstevel@tonic-gate * Once the master thread has given the OK, this thread can 14477c478bd9Sstevel@tonic-gate * actually call stop(). 14487c478bd9Sstevel@tonic-gate */ 14497c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14507c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 14517c478bd9Sstevel@tonic-gate 14527c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate /* 14557c478bd9Sstevel@tonic-gate * It's not OK to do watchpoint activity, notify caller to 14567c478bd9Sstevel@tonic-gate * retry. 14577c478bd9Sstevel@tonic-gate */ 14587c478bd9Sstevel@tonic-gate ret = -1; 14597c478bd9Sstevel@tonic-gate 14607c478bd9Sstevel@tonic-gate } else { 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate /* 14637c478bd9Sstevel@tonic-gate * The only way we can hit the case where SHOLDWATCH is set and 14647c478bd9Sstevel@tonic-gate * SWATCHOK is set is if we are triggering this from within a 14657c478bd9Sstevel@tonic-gate * stop() call. Assert that this is the case. 14667c478bd9Sstevel@tonic-gate */ 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate ASSERT(t->t_proc_flag & TP_STOPPING); 14697c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 14707c478bd9Sstevel@tonic-gate } 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate return (ret); 14757c478bd9Sstevel@tonic-gate } 14767c478bd9Sstevel@tonic-gate 14777c478bd9Sstevel@tonic-gate /* 14787c478bd9Sstevel@tonic-gate * force all interruptible lwps to trap into the kernel. 14797c478bd9Sstevel@tonic-gate */ 14807c478bd9Sstevel@tonic-gate void 14817c478bd9Sstevel@tonic-gate pokelwps(proc_t *p) 14827c478bd9Sstevel@tonic-gate { 14837c478bd9Sstevel@tonic-gate kthread_t *t; 14847c478bd9Sstevel@tonic-gate 14857c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate t = p->p_tlist; 14887c478bd9Sstevel@tonic-gate do { 14897c478bd9Sstevel@tonic-gate if (t == curthread) 14907c478bd9Sstevel@tonic-gate continue; 14917c478bd9Sstevel@tonic-gate thread_lock(t); 14927c478bd9Sstevel@tonic-gate aston(t); /* make thread trap or do post_syscall */ 1493c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 1494c97ad5cdSakolb setrun_locked(t); 14957c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_STOPPED) { 14967c478bd9Sstevel@tonic-gate /* 14977c478bd9Sstevel@tonic-gate * Ensure that proc_exit() is not blocked by lwps 14987c478bd9Sstevel@tonic-gate * that were stopped via jobcontrol or /proc. 14997c478bd9Sstevel@tonic-gate */ 15007c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 15017c478bd9Sstevel@tonic-gate p->p_stopsig = 0; 15027c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_XSTART | TS_PSTART); 15037c478bd9Sstevel@tonic-gate setrun_locked(t); 15047c478bd9Sstevel@tonic-gate } 15057c478bd9Sstevel@tonic-gate /* 15067c478bd9Sstevel@tonic-gate * If we are holding lwps for a forkall(), 15077c478bd9Sstevel@tonic-gate * force lwps that have been suspended via 15087c478bd9Sstevel@tonic-gate * lwp_suspend() and are suspended inside 15097c478bd9Sstevel@tonic-gate * of a system call to proceed to their 15107c478bd9Sstevel@tonic-gate * holdlwp() points where they are clonable. 15117c478bd9Sstevel@tonic-gate */ 15127c478bd9Sstevel@tonic-gate if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 15137c478bd9Sstevel@tonic-gate if ((t->t_schedflag & TS_CSTART) == 0) { 15147c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15157c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 15167c478bd9Sstevel@tonic-gate setrun_locked(t); 15177c478bd9Sstevel@tonic-gate } 15187c478bd9Sstevel@tonic-gate } 15197c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_ONPROC) { 15207c478bd9Sstevel@tonic-gate if (t->t_cpu != CPU) 15217c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 15227c478bd9Sstevel@tonic-gate } 15237c478bd9Sstevel@tonic-gate thread_unlock(t); 15247c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 15257c478bd9Sstevel@tonic-gate } 15267c478bd9Sstevel@tonic-gate 15277c478bd9Sstevel@tonic-gate /* 15287c478bd9Sstevel@tonic-gate * undo the effects of holdlwps() or holdwatch(). 15297c478bd9Sstevel@tonic-gate */ 15307c478bd9Sstevel@tonic-gate void 15317c478bd9Sstevel@tonic-gate continuelwps(proc_t *p) 15327c478bd9Sstevel@tonic-gate { 15337c478bd9Sstevel@tonic-gate kthread_t *t; 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate /* 15367c478bd9Sstevel@tonic-gate * If this flag is set, then the original holdwatch() didn't actually 15377c478bd9Sstevel@tonic-gate * stop the process. See comments for holdwatch(). 15387c478bd9Sstevel@tonic-gate */ 15397c478bd9Sstevel@tonic-gate if (p->p_flag & SWATCHOK) { 15407c478bd9Sstevel@tonic-gate ASSERT(curthread->t_proc_flag & TP_STOPPING); 15417c478bd9Sstevel@tonic-gate return; 15427c478bd9Sstevel@tonic-gate } 15437c478bd9Sstevel@tonic-gate 15447c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 15457c478bd9Sstevel@tonic-gate ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 15467c478bd9Sstevel@tonic-gate 15477c478bd9Sstevel@tonic-gate t = p->p_tlist; 15487c478bd9Sstevel@tonic-gate do { 15497c478bd9Sstevel@tonic-gate thread_lock(t); /* SUSPENDED looks at t_schedflag */ 15507c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 15517c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15527c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 15537c478bd9Sstevel@tonic-gate setrun_locked(t); 15547c478bd9Sstevel@tonic-gate } 15557c478bd9Sstevel@tonic-gate thread_unlock(t); 15567c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate /* 15607c478bd9Sstevel@tonic-gate * Force all other LWPs in the current process other than the caller to exit, 15617c478bd9Sstevel@tonic-gate * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 15627c478bd9Sstevel@tonic-gate * is typically used in these situations: 15637c478bd9Sstevel@tonic-gate * 15647c478bd9Sstevel@tonic-gate * (a) prior to an exec() system call 15657c478bd9Sstevel@tonic-gate * (b) prior to dumping a core file 15667c478bd9Sstevel@tonic-gate * (c) prior to a uadmin() shutdown 15677c478bd9Sstevel@tonic-gate * 15687c478bd9Sstevel@tonic-gate * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 15697c478bd9Sstevel@tonic-gate * Multiple threads in the process can call this function at one time by 15707c478bd9Sstevel@tonic-gate * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 15717c478bd9Sstevel@tonic-gate * to declare one particular thread the winner who gets to kill the others. 15727c478bd9Sstevel@tonic-gate * If a thread wins the exitlwps() dance, zero is returned; otherwise an 15737c478bd9Sstevel@tonic-gate * appropriate errno value is returned to caller for its system call to return. 15747c478bd9Sstevel@tonic-gate */ 15757c478bd9Sstevel@tonic-gate int 15767c478bd9Sstevel@tonic-gate exitlwps(int coredump) 15777c478bd9Sstevel@tonic-gate { 15787c478bd9Sstevel@tonic-gate proc_t *p = curproc; 15797c478bd9Sstevel@tonic-gate int heldcnt; 15807c478bd9Sstevel@tonic-gate 15817c478bd9Sstevel@tonic-gate if (curthread->t_door) 15827c478bd9Sstevel@tonic-gate door_slam(); 15837c478bd9Sstevel@tonic-gate if (p->p_door_list) 15847c478bd9Sstevel@tonic-gate door_revoke_all(); 15857c478bd9Sstevel@tonic-gate if (curthread->t_schedctl != NULL) 15867c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(curthread); 15877c478bd9Sstevel@tonic-gate 15887c478bd9Sstevel@tonic-gate /* 15897c478bd9Sstevel@tonic-gate * Ensure that before starting to wait for other lwps to exit, 15907c478bd9Sstevel@tonic-gate * cleanup all upimutexes held by curthread. Otherwise, some other 15917c478bd9Sstevel@tonic-gate * lwp could be waiting (uninterruptibly) for a upimutex held by 15927c478bd9Sstevel@tonic-gate * curthread, and the call to pokelwps() below would deadlock. 15937c478bd9Sstevel@tonic-gate * Even if a blocked upimutex_lock is made interruptible, 15947c478bd9Sstevel@tonic-gate * curthread's upimutexes need to be unlocked: do it here. 15957c478bd9Sstevel@tonic-gate */ 15967c478bd9Sstevel@tonic-gate if (curthread->t_upimutex != NULL) 15977c478bd9Sstevel@tonic-gate upimutex_cleanup(); 15987c478bd9Sstevel@tonic-gate 15997c478bd9Sstevel@tonic-gate /* 16007c478bd9Sstevel@tonic-gate * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 16017c478bd9Sstevel@tonic-gate * We must also block any further /proc access from this point forward. 16027c478bd9Sstevel@tonic-gate */ 16037c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 16047c478bd9Sstevel@tonic-gate prbarrier(p); 16057c478bd9Sstevel@tonic-gate 16067c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 16077c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 16087c478bd9Sstevel@tonic-gate aston(curthread); /* force a trip through post_syscall */ 16097c478bd9Sstevel@tonic-gate return (set_errno(EINTR)); 16107c478bd9Sstevel@tonic-gate } 16117c478bd9Sstevel@tonic-gate 16127c478bd9Sstevel@tonic-gate p->p_flag |= SEXITLWPS; 16137c478bd9Sstevel@tonic-gate if (coredump) /* tell other lwps to stop, not exit */ 16147c478bd9Sstevel@tonic-gate p->p_flag |= SCOREDUMP; 16157c478bd9Sstevel@tonic-gate 16167c478bd9Sstevel@tonic-gate /* 16177c478bd9Sstevel@tonic-gate * Give precedence to exitlwps() if a holdlwps() is 16187c478bd9Sstevel@tonic-gate * in progress. The lwp doing the holdlwps() operation 16197c478bd9Sstevel@tonic-gate * is aborted when it is awakened. 16207c478bd9Sstevel@tonic-gate */ 16217c478bd9Sstevel@tonic-gate while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 16227c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 16237c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 162497eda132Sraf prbarrier(p); 16257c478bd9Sstevel@tonic-gate } 16267c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDFORK; 16277c478bd9Sstevel@tonic-gate pokelwps(p); 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate /* 16307c478bd9Sstevel@tonic-gate * Wait for process to become quiescent. 16317c478bd9Sstevel@tonic-gate */ 16327c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 163397eda132Sraf while (p->p_lwprcnt > 0) { 16347c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 163597eda132Sraf prbarrier(p); 163697eda132Sraf } 16377c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 16387c478bd9Sstevel@tonic-gate ASSERT(p->p_lwprcnt == 1); 16397c478bd9Sstevel@tonic-gate 16407c478bd9Sstevel@tonic-gate /* 16417c478bd9Sstevel@tonic-gate * The SCOREDUMP flag puts the process into a quiescent 16427c478bd9Sstevel@tonic-gate * state. The process's lwps remain attached to this 16437c478bd9Sstevel@tonic-gate * process until exitlwps() is called again without the 16447c478bd9Sstevel@tonic-gate * 'coredump' flag set, then the lwps are terminated 16457c478bd9Sstevel@tonic-gate * and the process can exit. 16467c478bd9Sstevel@tonic-gate */ 16477c478bd9Sstevel@tonic-gate if (coredump) { 16487c478bd9Sstevel@tonic-gate p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 16497c478bd9Sstevel@tonic-gate goto out; 16507c478bd9Sstevel@tonic-gate } 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate /* 16537c478bd9Sstevel@tonic-gate * Determine if there are any lwps left dangling in 16547c478bd9Sstevel@tonic-gate * the stopped state. This happens when exitlwps() 16557c478bd9Sstevel@tonic-gate * aborts a holdlwps() operation. 16567c478bd9Sstevel@tonic-gate */ 16577c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDFORK; 16587c478bd9Sstevel@tonic-gate if ((heldcnt = p->p_lwpcnt) > 1) { 16597c478bd9Sstevel@tonic-gate kthread_t *t; 16607c478bd9Sstevel@tonic-gate for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 16617c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 16627c478bd9Sstevel@tonic-gate lwp_continue(t); 16637c478bd9Sstevel@tonic-gate } 16647c478bd9Sstevel@tonic-gate } 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate /* 16677c478bd9Sstevel@tonic-gate * Wait for all other lwps to exit. 16687c478bd9Sstevel@tonic-gate */ 16697c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 167097eda132Sraf while (p->p_lwpcnt > 1) { 16717c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 167297eda132Sraf prbarrier(p); 167397eda132Sraf } 16747c478bd9Sstevel@tonic-gate ++p->p_lwprcnt; 16757c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 16767c478bd9Sstevel@tonic-gate 16777c478bd9Sstevel@tonic-gate p->p_flag &= ~SEXITLWPS; 16787c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_TWAIT; 16797c478bd9Sstevel@tonic-gate 16807c478bd9Sstevel@tonic-gate out: 16817c478bd9Sstevel@tonic-gate if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 16827c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 16837c478bd9Sstevel@tonic-gate lwpent_t *lep; 16847c478bd9Sstevel@tonic-gate int i; 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 16877c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 16887c478bd9Sstevel@tonic-gate if (lep != NULL && lep->le_thread != curthread) { 16897c478bd9Sstevel@tonic-gate ASSERT(lep->le_thread == NULL); 16907c478bd9Sstevel@tonic-gate p->p_zombcnt--; 16917c478bd9Sstevel@tonic-gate lwp_hash_out(p, lep->le_lwpid); 16927c478bd9Sstevel@tonic-gate } 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate ASSERT(p->p_zombcnt == 0); 16957c478bd9Sstevel@tonic-gate } 16967c478bd9Sstevel@tonic-gate 16977c478bd9Sstevel@tonic-gate /* 16987c478bd9Sstevel@tonic-gate * If some other LWP in the process wanted us to suspend ourself, 16997c478bd9Sstevel@tonic-gate * then we will not do it. The other LWP is now terminated and 17007c478bd9Sstevel@tonic-gate * no one will ever continue us again if we suspend ourself. 17017c478bd9Sstevel@tonic-gate */ 17027c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_HOLDLWP; 17037c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 17047c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 17057c478bd9Sstevel@tonic-gate return (0); 17067c478bd9Sstevel@tonic-gate } 17077c478bd9Sstevel@tonic-gate 17087c478bd9Sstevel@tonic-gate /* 17097c478bd9Sstevel@tonic-gate * duplicate a lwp. 17107c478bd9Sstevel@tonic-gate */ 17117c478bd9Sstevel@tonic-gate klwp_t * 17127c478bd9Sstevel@tonic-gate forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 17137c478bd9Sstevel@tonic-gate { 17147c478bd9Sstevel@tonic-gate klwp_t *clwp; 17157c478bd9Sstevel@tonic-gate void *tregs, *tfpu; 17167c478bd9Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 17177c478bd9Sstevel@tonic-gate kthread_t *ct; 17187c478bd9Sstevel@tonic-gate proc_t *p = lwptoproc(lwp); 17197c478bd9Sstevel@tonic-gate int cid; 17207c478bd9Sstevel@tonic-gate void *bufp; 17219acbbeafSnn void *brand_data; 17227c478bd9Sstevel@tonic-gate int val; 17237c478bd9Sstevel@tonic-gate 17247c478bd9Sstevel@tonic-gate ASSERT(p == curproc); 17257c478bd9Sstevel@tonic-gate ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 17267c478bd9Sstevel@tonic-gate 17277c478bd9Sstevel@tonic-gate #if defined(__sparc) 17287c478bd9Sstevel@tonic-gate if (t == curthread) 17297c478bd9Sstevel@tonic-gate (void) flush_user_windows_to_stack(NULL); 17307c478bd9Sstevel@tonic-gate #endif 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate if (t == curthread) 17337c478bd9Sstevel@tonic-gate /* copy args out of registers first */ 17347c478bd9Sstevel@tonic-gate (void) save_syscall_args(); 17359acbbeafSnn 17367c478bd9Sstevel@tonic-gate clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 17377c478bd9Sstevel@tonic-gate NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 17387c478bd9Sstevel@tonic-gate if (clwp == NULL) 17397c478bd9Sstevel@tonic-gate return (NULL); 17407c478bd9Sstevel@tonic-gate 17417c478bd9Sstevel@tonic-gate /* 17427c478bd9Sstevel@tonic-gate * most of the parent's lwp can be copied to its duplicate, 17437c478bd9Sstevel@tonic-gate * except for the fields that are unique to each lwp, like 17447c478bd9Sstevel@tonic-gate * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 17457c478bd9Sstevel@tonic-gate */ 17467c478bd9Sstevel@tonic-gate ct = clwp->lwp_thread; 17477c478bd9Sstevel@tonic-gate tregs = clwp->lwp_regs; 17487c478bd9Sstevel@tonic-gate tfpu = clwp->lwp_fpu; 17499acbbeafSnn brand_data = clwp->lwp_brand; 17507c478bd9Sstevel@tonic-gate 17515d3ff519Sjohansen /* 17525d3ff519Sjohansen * Copy parent lwp to child lwp. Hold child's p_lock to prevent 17535d3ff519Sjohansen * mstate_aggr_state() from reading stale mstate entries copied 17545d3ff519Sjohansen * from lwp to clwp. 17555d3ff519Sjohansen */ 17565d3ff519Sjohansen mutex_enter(&cp->p_lock); 17577c478bd9Sstevel@tonic-gate *clwp = *lwp; 17587c478bd9Sstevel@tonic-gate 17595d3ff519Sjohansen /* clear microstate and resource usage data in new lwp */ 17605d3ff519Sjohansen init_mstate(ct, LMS_STOPPED); 17615d3ff519Sjohansen bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 17625d3ff519Sjohansen mutex_exit(&cp->p_lock); 17635d3ff519Sjohansen 17647c478bd9Sstevel@tonic-gate /* fix up child's lwp */ 17657c478bd9Sstevel@tonic-gate 17667712e92cSsudheer clwp->lwp_pcb.pcb_flags = 0; 17677712e92cSsudheer #if defined(__sparc) 17687c478bd9Sstevel@tonic-gate clwp->lwp_pcb.pcb_step = STEP_NONE; 17697c478bd9Sstevel@tonic-gate #endif 17707c478bd9Sstevel@tonic-gate clwp->lwp_cursig = 0; 17717c478bd9Sstevel@tonic-gate clwp->lwp_extsig = 0; 17727c478bd9Sstevel@tonic-gate clwp->lwp_curinfo = (struct sigqueue *)0; 17737c478bd9Sstevel@tonic-gate clwp->lwp_thread = ct; 17747c478bd9Sstevel@tonic-gate ct->t_sysnum = t->t_sysnum; 17757c478bd9Sstevel@tonic-gate clwp->lwp_regs = tregs; 17767c478bd9Sstevel@tonic-gate clwp->lwp_fpu = tfpu; 17779acbbeafSnn clwp->lwp_brand = brand_data; 17787c478bd9Sstevel@tonic-gate clwp->lwp_ap = clwp->lwp_arg; 17797c478bd9Sstevel@tonic-gate clwp->lwp_procp = cp; 17807c478bd9Sstevel@tonic-gate bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 17817c478bd9Sstevel@tonic-gate clwp->lwp_lastfault = 0; 17827c478bd9Sstevel@tonic-gate clwp->lwp_lastfaddr = 0; 17837c478bd9Sstevel@tonic-gate 17847c478bd9Sstevel@tonic-gate /* copy parent's struct regs to child. */ 17857c478bd9Sstevel@tonic-gate lwp_forkregs(lwp, clwp); 17867c478bd9Sstevel@tonic-gate 17877c478bd9Sstevel@tonic-gate /* 17880baeff3dSrab * Fork thread context ops, if any. 17897c478bd9Sstevel@tonic-gate */ 17907c478bd9Sstevel@tonic-gate if (t->t_ctx) 17917c478bd9Sstevel@tonic-gate forkctx(t, ct); 17927c478bd9Sstevel@tonic-gate 17937c478bd9Sstevel@tonic-gate /* fix door state in the child */ 17947c478bd9Sstevel@tonic-gate if (t->t_door) 17957c478bd9Sstevel@tonic-gate door_fork(t, ct); 17967c478bd9Sstevel@tonic-gate 17977c478bd9Sstevel@tonic-gate /* copy current contract templates, clear latest contracts */ 17987c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(clwp, lwp); 17997c478bd9Sstevel@tonic-gate 18007c478bd9Sstevel@tonic-gate mutex_enter(&cp->p_lock); 18017c478bd9Sstevel@tonic-gate /* lwp_create() set the TP_HOLDLWP flag */ 18027c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_HOLDLWP)) 18037c478bd9Sstevel@tonic-gate ct->t_proc_flag &= ~TP_HOLDLWP; 18047c478bd9Sstevel@tonic-gate if (cp->p_flag & SMSACCT) 18057c478bd9Sstevel@tonic-gate ct->t_proc_flag |= TP_MSACCT; 18067c478bd9Sstevel@tonic-gate mutex_exit(&cp->p_lock); 18077c478bd9Sstevel@tonic-gate 18089acbbeafSnn /* Allow brand to propagate brand-specific state */ 18099acbbeafSnn if (PROC_IS_BRANDED(p)) 18109acbbeafSnn BROP(p)->b_forklwp(lwp, clwp); 18119acbbeafSnn 18127c478bd9Sstevel@tonic-gate retry: 18137c478bd9Sstevel@tonic-gate cid = t->t_cid; 18147c478bd9Sstevel@tonic-gate 18157c478bd9Sstevel@tonic-gate val = CL_ALLOC(&bufp, cid, KM_SLEEP); 18167c478bd9Sstevel@tonic-gate ASSERT(val == 0); 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 18197c478bd9Sstevel@tonic-gate if (cid != t->t_cid) { 18207c478bd9Sstevel@tonic-gate /* 18217c478bd9Sstevel@tonic-gate * Someone just changed this thread's scheduling class, 18227c478bd9Sstevel@tonic-gate * so try pre-allocating the buffer again. Hopefully we 18237c478bd9Sstevel@tonic-gate * don't hit this often. 18247c478bd9Sstevel@tonic-gate */ 18257c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 18267c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 18277c478bd9Sstevel@tonic-gate goto retry; 18287c478bd9Sstevel@tonic-gate } 18297c478bd9Sstevel@tonic-gate 18307c478bd9Sstevel@tonic-gate ct->t_unpark = t->t_unpark; 18317c478bd9Sstevel@tonic-gate ct->t_clfuncs = t->t_clfuncs; 18327c478bd9Sstevel@tonic-gate CL_FORK(t, ct, bufp); 18337c478bd9Sstevel@tonic-gate ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 18347c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 18357c478bd9Sstevel@tonic-gate 18367c478bd9Sstevel@tonic-gate return (clwp); 18377c478bd9Sstevel@tonic-gate } 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate /* 18407c478bd9Sstevel@tonic-gate * Add a new lwp entry to the lwp directory and to the lwpid hash table. 18417c478bd9Sstevel@tonic-gate */ 18427c478bd9Sstevel@tonic-gate void 18436eb30ec3SRoger A. Faulkner lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 18446eb30ec3SRoger A. Faulkner int do_lock) 18457c478bd9Sstevel@tonic-gate { 18466eb30ec3SRoger A. Faulkner tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 18477c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 18487c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 18497c478bd9Sstevel@tonic-gate kthread_t *t; 18507c478bd9Sstevel@tonic-gate 18517c478bd9Sstevel@tonic-gate /* 18527c478bd9Sstevel@tonic-gate * Allocate a directory element from the free list. 18537c478bd9Sstevel@tonic-gate * Code elsewhere guarantees a free slot. 18547c478bd9Sstevel@tonic-gate */ 18557c478bd9Sstevel@tonic-gate ldp = p->p_lwpfree; 18567c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp->ld_next; 18577c478bd9Sstevel@tonic-gate ASSERT(ldp->ld_entry == NULL); 18587c478bd9Sstevel@tonic-gate ldp->ld_entry = lep; 18597c478bd9Sstevel@tonic-gate 18606eb30ec3SRoger A. Faulkner if (do_lock) 18616eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 18626eb30ec3SRoger A. Faulkner 18637c478bd9Sstevel@tonic-gate /* 18647c478bd9Sstevel@tonic-gate * Insert it into the lwpid hash table. 18657c478bd9Sstevel@tonic-gate */ 18666eb30ec3SRoger A. Faulkner ldpp = &thp->th_list; 18677c478bd9Sstevel@tonic-gate ldp->ld_next = *ldpp; 18687c478bd9Sstevel@tonic-gate *ldpp = ldp; 18697c478bd9Sstevel@tonic-gate 18707c478bd9Sstevel@tonic-gate /* 18717c478bd9Sstevel@tonic-gate * Set the active thread's directory slot entry. 18727c478bd9Sstevel@tonic-gate */ 18737c478bd9Sstevel@tonic-gate if ((t = lep->le_thread) != NULL) { 18747c478bd9Sstevel@tonic-gate ASSERT(lep->le_lwpid == t->t_tid); 18757c478bd9Sstevel@tonic-gate t->t_dslot = (int)(ldp - p->p_lwpdir); 18767c478bd9Sstevel@tonic-gate } 18776eb30ec3SRoger A. Faulkner 18786eb30ec3SRoger A. Faulkner if (do_lock) 18796eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 18807c478bd9Sstevel@tonic-gate } 18817c478bd9Sstevel@tonic-gate 18827c478bd9Sstevel@tonic-gate /* 18837c478bd9Sstevel@tonic-gate * Remove an lwp from the lwpid hash table and free its directory entry. 18847c478bd9Sstevel@tonic-gate * This is done when a detached lwp exits in lwp_exit() or 18857c478bd9Sstevel@tonic-gate * when a non-detached lwp is waited for in lwp_wait() or 18867c478bd9Sstevel@tonic-gate * when a zombie lwp is detached in lwp_detach(). 18877c478bd9Sstevel@tonic-gate */ 18887c478bd9Sstevel@tonic-gate void 18897c478bd9Sstevel@tonic-gate lwp_hash_out(proc_t *p, id_t lwpid) 18907c478bd9Sstevel@tonic-gate { 18916eb30ec3SRoger A. Faulkner tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 18927c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 18937c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 18947c478bd9Sstevel@tonic-gate lwpent_t *lep; 18957c478bd9Sstevel@tonic-gate 18966eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 18976eb30ec3SRoger A. Faulkner for (ldpp = &thp->th_list; 18987c478bd9Sstevel@tonic-gate (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 18997c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 19007c478bd9Sstevel@tonic-gate if (lep->le_lwpid == lwpid) { 19017c478bd9Sstevel@tonic-gate prlwpfree(p, lep); /* /proc deals with le_trace */ 19027c478bd9Sstevel@tonic-gate *ldpp = ldp->ld_next; 19037c478bd9Sstevel@tonic-gate ldp->ld_entry = NULL; 19047c478bd9Sstevel@tonic-gate ldp->ld_next = p->p_lwpfree; 19057c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp; 19067c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 19077c478bd9Sstevel@tonic-gate break; 19087c478bd9Sstevel@tonic-gate } 19097c478bd9Sstevel@tonic-gate } 19106eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19117c478bd9Sstevel@tonic-gate } 19127c478bd9Sstevel@tonic-gate 19137c478bd9Sstevel@tonic-gate /* 19147c478bd9Sstevel@tonic-gate * Lookup an lwp in the lwpid hash table by lwpid. 19157c478bd9Sstevel@tonic-gate */ 19167c478bd9Sstevel@tonic-gate lwpdir_t * 19177c478bd9Sstevel@tonic-gate lwp_hash_lookup(proc_t *p, id_t lwpid) 19187c478bd9Sstevel@tonic-gate { 19196eb30ec3SRoger A. Faulkner tidhash_t *thp; 19207c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 19217c478bd9Sstevel@tonic-gate 19227c478bd9Sstevel@tonic-gate /* 19237c478bd9Sstevel@tonic-gate * The process may be exiting, after p_tidhash has been set to NULL in 19247c478bd9Sstevel@tonic-gate * proc_exit() but before prfee() has been called. Return failure in 19257c478bd9Sstevel@tonic-gate * this case. 19267c478bd9Sstevel@tonic-gate */ 19277c478bd9Sstevel@tonic-gate if (p->p_tidhash == NULL) 19287c478bd9Sstevel@tonic-gate return (NULL); 19297c478bd9Sstevel@tonic-gate 19306eb30ec3SRoger A. Faulkner thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 19316eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 19327c478bd9Sstevel@tonic-gate if (ldp->ld_entry->le_lwpid == lwpid) 19337c478bd9Sstevel@tonic-gate return (ldp); 19347c478bd9Sstevel@tonic-gate } 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate return (NULL); 19377c478bd9Sstevel@tonic-gate } 19387c478bd9Sstevel@tonic-gate 19396eb30ec3SRoger A. Faulkner /* 19406eb30ec3SRoger A. Faulkner * Same as lwp_hash_lookup(), but acquire and return 19416eb30ec3SRoger A. Faulkner * the tid hash table entry lock on success. 19426eb30ec3SRoger A. Faulkner */ 19436eb30ec3SRoger A. Faulkner lwpdir_t * 19446eb30ec3SRoger A. Faulkner lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 19456eb30ec3SRoger A. Faulkner { 19466eb30ec3SRoger A. Faulkner tidhash_t *tidhash; 19476eb30ec3SRoger A. Faulkner uint_t tidhash_sz; 19486eb30ec3SRoger A. Faulkner tidhash_t *thp; 19496eb30ec3SRoger A. Faulkner lwpdir_t *ldp; 19506eb30ec3SRoger A. Faulkner 19516eb30ec3SRoger A. Faulkner top: 19526eb30ec3SRoger A. Faulkner tidhash_sz = p->p_tidhash_sz; 19536eb30ec3SRoger A. Faulkner membar_consumer(); 19546eb30ec3SRoger A. Faulkner if ((tidhash = p->p_tidhash) == NULL) 19556eb30ec3SRoger A. Faulkner return (NULL); 19566eb30ec3SRoger A. Faulkner 19576eb30ec3SRoger A. Faulkner thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 19586eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 19596eb30ec3SRoger A. Faulkner 19606eb30ec3SRoger A. Faulkner /* 19616eb30ec3SRoger A. Faulkner * Since we are not holding p->p_lock, the tid hash table 19626eb30ec3SRoger A. Faulkner * may have changed. If so, start over. If not, then 19636eb30ec3SRoger A. Faulkner * it cannot change until after we drop &thp->th_lock; 19646eb30ec3SRoger A. Faulkner */ 19656eb30ec3SRoger A. Faulkner if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 19666eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19676eb30ec3SRoger A. Faulkner goto top; 19686eb30ec3SRoger A. Faulkner } 19696eb30ec3SRoger A. Faulkner 19706eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 19716eb30ec3SRoger A. Faulkner if (ldp->ld_entry->le_lwpid == lwpid) { 19726eb30ec3SRoger A. Faulkner *mpp = &thp->th_lock; 19736eb30ec3SRoger A. Faulkner return (ldp); 19746eb30ec3SRoger A. Faulkner } 19756eb30ec3SRoger A. Faulkner } 19766eb30ec3SRoger A. Faulkner 19776eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19786eb30ec3SRoger A. Faulkner return (NULL); 19796eb30ec3SRoger A. Faulkner } 19806eb30ec3SRoger A. Faulkner 19817c478bd9Sstevel@tonic-gate /* 19827c478bd9Sstevel@tonic-gate * Update the indicated LWP usage statistic for the current LWP. 19837c478bd9Sstevel@tonic-gate */ 19847c478bd9Sstevel@tonic-gate void 19857c478bd9Sstevel@tonic-gate lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 19867c478bd9Sstevel@tonic-gate { 19877c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate if (lwp == NULL) 19907c478bd9Sstevel@tonic-gate return; 19917c478bd9Sstevel@tonic-gate 19927c478bd9Sstevel@tonic-gate switch (lwp_stat_id) { 19937c478bd9Sstevel@tonic-gate case LWP_STAT_INBLK: 19947c478bd9Sstevel@tonic-gate lwp->lwp_ru.inblock += inc; 19957c478bd9Sstevel@tonic-gate break; 19967c478bd9Sstevel@tonic-gate case LWP_STAT_OUBLK: 19977c478bd9Sstevel@tonic-gate lwp->lwp_ru.oublock += inc; 19987c478bd9Sstevel@tonic-gate break; 19997c478bd9Sstevel@tonic-gate case LWP_STAT_MSGRCV: 20007c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgrcv += inc; 20017c478bd9Sstevel@tonic-gate break; 20027c478bd9Sstevel@tonic-gate case LWP_STAT_MSGSND: 20037c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgsnd += inc; 20047c478bd9Sstevel@tonic-gate break; 20057c478bd9Sstevel@tonic-gate default: 20067c478bd9Sstevel@tonic-gate panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 20077c478bd9Sstevel@tonic-gate } 20087c478bd9Sstevel@tonic-gate } 2009