17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ab761399Sesaxe * Common Development and Distribution License (the "License"). 6ab761399Sesaxe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 220e751525SEric Saxe * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 277c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #include <sys/types.h> 317c478bd9Sstevel@tonic-gate #include <sys/param.h> 327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 337c478bd9Sstevel@tonic-gate #include <sys/signal.h> 347c478bd9Sstevel@tonic-gate #include <sys/user.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h> 377c478bd9Sstevel@tonic-gate #include <sys/var.h> 387c478bd9Sstevel@tonic-gate #include <sys/errno.h> 397c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 407c478bd9Sstevel@tonic-gate #include <sys/debug.h> 417c478bd9Sstevel@tonic-gate #include <sys/inline.h> 427c478bd9Sstevel@tonic-gate #include <sys/disp.h> 437c478bd9Sstevel@tonic-gate #include <sys/class.h> 447c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 457c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 467c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 477c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 487c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 497c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 507c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 51fb2f18f8Sesaxe #include <sys/pg.h> 52fb2f18f8Sesaxe #include <sys/cmt.h> 53fb2f18f8Sesaxe #include <sys/bitset.h> 547c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 557c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 567c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 577c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 58057452c6Sjj #include <sys/archsystm.h> 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate #include <vm/as.h> 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate #define BOUND_CPU 0x1 637c478bd9Sstevel@tonic-gate #define BOUND_PARTITION 0x2 647c478bd9Sstevel@tonic-gate #define BOUND_INTR 0x4 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate /* Dispatch queue allocation structure and functions */ 677c478bd9Sstevel@tonic-gate struct disp_queue_info { 687c478bd9Sstevel@tonic-gate disp_t *dp; 697c478bd9Sstevel@tonic-gate dispq_t *olddispq; 707c478bd9Sstevel@tonic-gate dispq_t *newdispq; 717c478bd9Sstevel@tonic-gate ulong_t *olddqactmap; 727c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 737c478bd9Sstevel@tonic-gate int oldnglobpris; 747c478bd9Sstevel@tonic-gate }; 757c478bd9Sstevel@tonic-gate static void disp_dq_alloc(struct disp_queue_info *dptr, int numpris, 767c478bd9Sstevel@tonic-gate disp_t *dp); 777c478bd9Sstevel@tonic-gate static void disp_dq_assign(struct disp_queue_info *dptr, int numpris); 787c478bd9Sstevel@tonic-gate static void disp_dq_free(struct disp_queue_info *dptr); 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate /* platform-specific routine to call when processor is idle */ 817c478bd9Sstevel@tonic-gate static void generic_idle_cpu(); 827c478bd9Sstevel@tonic-gate void (*idle_cpu)() = generic_idle_cpu; 837c478bd9Sstevel@tonic-gate 847c478bd9Sstevel@tonic-gate /* routines invoked when a CPU enters/exits the idle loop */ 857c478bd9Sstevel@tonic-gate static void idle_enter(); 867c478bd9Sstevel@tonic-gate static void idle_exit(); 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate /* platform-specific routine to call when thread is enqueued */ 897c478bd9Sstevel@tonic-gate static void generic_enq_thread(cpu_t *, int); 907c478bd9Sstevel@tonic-gate void (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread; 917c478bd9Sstevel@tonic-gate 92685679f7Sakolb pri_t kpreemptpri; /* priority where kernel preemption applies */ 93685679f7Sakolb pri_t upreemptpri = 0; /* priority where normal preemption applies */ 94685679f7Sakolb pri_t intr_pri; /* interrupt thread priority base level */ 957c478bd9Sstevel@tonic-gate 96685679f7Sakolb #define KPQPRI -1 /* pri where cpu affinity is dropped for kpq */ 97685679f7Sakolb pri_t kpqpri = KPQPRI; /* can be set in /etc/system */ 98685679f7Sakolb disp_t cpu0_disp; /* boot CPU's dispatch queue */ 997c478bd9Sstevel@tonic-gate disp_lock_t swapped_lock; /* lock swapped threads and swap queue */ 100685679f7Sakolb int nswapped; /* total number of swapped threads */ 1017c478bd9Sstevel@tonic-gate void disp_swapped_enq(kthread_t *tp); 1027c478bd9Sstevel@tonic-gate static void disp_swapped_setrun(kthread_t *tp); 1037c478bd9Sstevel@tonic-gate static void cpu_resched(cpu_t *cp, pri_t tpri); 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate /* 1067c478bd9Sstevel@tonic-gate * If this is set, only interrupt threads will cause kernel preemptions. 1077c478bd9Sstevel@tonic-gate * This is done by changing the value of kpreemptpri. kpreemptpri 1087c478bd9Sstevel@tonic-gate * will either be the max sysclass pri + 1 or the min interrupt pri. 1097c478bd9Sstevel@tonic-gate */ 1107c478bd9Sstevel@tonic-gate int only_intr_kpreempt; 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gate extern void set_idle_cpu(int cpun); 1137c478bd9Sstevel@tonic-gate extern void unset_idle_cpu(int cpun); 1147c478bd9Sstevel@tonic-gate static void setkpdq(kthread_t *tp, int borf); 1157c478bd9Sstevel@tonic-gate #define SETKP_BACK 0 1167c478bd9Sstevel@tonic-gate #define SETKP_FRONT 1 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * Parameter that determines how recently a thread must have run 1197c478bd9Sstevel@tonic-gate * on the CPU to be considered loosely-bound to that CPU to reduce 1207c478bd9Sstevel@tonic-gate * cold cache effects. The interval is in hertz. 1217c478bd9Sstevel@tonic-gate */ 122fb2f18f8Sesaxe #define RECHOOSE_INTERVAL 3 1237c478bd9Sstevel@tonic-gate int rechoose_interval = RECHOOSE_INTERVAL; 1247c478bd9Sstevel@tonic-gate 125685679f7Sakolb /* 126685679f7Sakolb * Parameter that determines how long (in nanoseconds) a thread must 127685679f7Sakolb * be sitting on a run queue before it can be stolen by another CPU 128685679f7Sakolb * to reduce migrations. The interval is in nanoseconds. 129685679f7Sakolb * 13081588590Sbholler * The nosteal_nsec should be set by platform code cmp_set_nosteal_interval() 13181588590Sbholler * to an appropriate value. nosteal_nsec is set to NOSTEAL_UNINITIALIZED 13281588590Sbholler * here indicating it is uninitiallized. 13381588590Sbholler * Setting nosteal_nsec to 0 effectively disables the nosteal 'protection'. 13481588590Sbholler * 135685679f7Sakolb */ 13681588590Sbholler #define NOSTEAL_UNINITIALIZED (-1) 13781588590Sbholler hrtime_t nosteal_nsec = NOSTEAL_UNINITIALIZED; 13881588590Sbholler extern void cmp_set_nosteal_interval(void); 139685679f7Sakolb 1407c478bd9Sstevel@tonic-gate id_t defaultcid; /* system "default" class; see dispadmin(1M) */ 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate disp_lock_t transition_lock; /* lock on transitioning threads */ 1437c478bd9Sstevel@tonic-gate disp_lock_t stop_lock; /* lock on stopped threads */ 1447c478bd9Sstevel@tonic-gate 145685679f7Sakolb static void cpu_dispqalloc(int numpris); 146685679f7Sakolb 147685679f7Sakolb /* 148685679f7Sakolb * This gets returned by disp_getwork/disp_getbest if we couldn't steal 149685679f7Sakolb * a thread because it was sitting on its run queue for a very short 150685679f7Sakolb * period of time. 151685679f7Sakolb */ 152685679f7Sakolb #define T_DONTSTEAL (kthread_t *)(-1) /* returned by disp_getwork/getbest */ 1537c478bd9Sstevel@tonic-gate 1547c478bd9Sstevel@tonic-gate static kthread_t *disp_getwork(cpu_t *to); 1557c478bd9Sstevel@tonic-gate static kthread_t *disp_getbest(disp_t *from); 1567c478bd9Sstevel@tonic-gate static kthread_t *disp_ratify(kthread_t *tp, disp_t *kpq); 1577c478bd9Sstevel@tonic-gate 1587c478bd9Sstevel@tonic-gate void swtch_to(kthread_t *); 1597c478bd9Sstevel@tonic-gate 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * dispatcher and scheduler initialization 1627c478bd9Sstevel@tonic-gate */ 1637c478bd9Sstevel@tonic-gate 1647c478bd9Sstevel@tonic-gate /* 1657c478bd9Sstevel@tonic-gate * disp_setup - Common code to calculate and allocate dispatcher 1667c478bd9Sstevel@tonic-gate * variables and structures based on the maximum priority. 1677c478bd9Sstevel@tonic-gate */ 1687c478bd9Sstevel@tonic-gate static void 1697c478bd9Sstevel@tonic-gate disp_setup(pri_t maxglobpri, pri_t oldnglobpris) 1707c478bd9Sstevel@tonic-gate { 1717c478bd9Sstevel@tonic-gate pri_t newnglobpris; 1727c478bd9Sstevel@tonic-gate 1737c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate newnglobpris = maxglobpri + 1 + LOCK_LEVEL; 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate if (newnglobpris > oldnglobpris) { 1787c478bd9Sstevel@tonic-gate /* 1797c478bd9Sstevel@tonic-gate * Allocate new kp queues for each CPU partition. 1807c478bd9Sstevel@tonic-gate */ 1817c478bd9Sstevel@tonic-gate cpupart_kpqalloc(newnglobpris); 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate /* 1847c478bd9Sstevel@tonic-gate * Allocate new dispatch queues for each CPU. 1857c478bd9Sstevel@tonic-gate */ 1867c478bd9Sstevel@tonic-gate cpu_dispqalloc(newnglobpris); 1877c478bd9Sstevel@tonic-gate 1887c478bd9Sstevel@tonic-gate /* 1897c478bd9Sstevel@tonic-gate * compute new interrupt thread base priority 1907c478bd9Sstevel@tonic-gate */ 1917c478bd9Sstevel@tonic-gate intr_pri = maxglobpri; 1927c478bd9Sstevel@tonic-gate if (only_intr_kpreempt) { 1937c478bd9Sstevel@tonic-gate kpreemptpri = intr_pri + 1; 1947c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 1957c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 1967c478bd9Sstevel@tonic-gate } 1977c478bd9Sstevel@tonic-gate v.v_nglobpris = newnglobpris; 1987c478bd9Sstevel@tonic-gate } 1997c478bd9Sstevel@tonic-gate } 2007c478bd9Sstevel@tonic-gate 2017c478bd9Sstevel@tonic-gate /* 2027c478bd9Sstevel@tonic-gate * dispinit - Called to initialize all loaded classes and the 2037c478bd9Sstevel@tonic-gate * dispatcher framework. 2047c478bd9Sstevel@tonic-gate */ 2057c478bd9Sstevel@tonic-gate void 2067c478bd9Sstevel@tonic-gate dispinit(void) 2077c478bd9Sstevel@tonic-gate { 2087c478bd9Sstevel@tonic-gate id_t cid; 2097c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2107c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate maxglobpri = -1; 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate /* 2157c478bd9Sstevel@tonic-gate * Initialize transition lock, which will always be set. 2167c478bd9Sstevel@tonic-gate */ 2177c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&transition_lock); 2187c478bd9Sstevel@tonic-gate disp_lock_enter_high(&transition_lock); 2197c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&stop_lock); 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2227c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_maxrunpri = -1; 2237c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_max_unbound_pri = -1; 224fb2f18f8Sesaxe 2257c478bd9Sstevel@tonic-gate /* 2267c478bd9Sstevel@tonic-gate * Initialize the default CPU partition. 2277c478bd9Sstevel@tonic-gate */ 2287c478bd9Sstevel@tonic-gate cpupart_initialize_default(); 2297c478bd9Sstevel@tonic-gate /* 2307c478bd9Sstevel@tonic-gate * Call the class specific initialization functions for 2317c478bd9Sstevel@tonic-gate * all pre-installed schedulers. 2327c478bd9Sstevel@tonic-gate * 2337c478bd9Sstevel@tonic-gate * We pass the size of a class specific parameter 2347c478bd9Sstevel@tonic-gate * buffer to each of the initialization functions 2357c478bd9Sstevel@tonic-gate * to try to catch problems with backward compatibility 2367c478bd9Sstevel@tonic-gate * of class modules. 2377c478bd9Sstevel@tonic-gate * 2387c478bd9Sstevel@tonic-gate * For example a new class module running on an old system 2397c478bd9Sstevel@tonic-gate * which didn't provide sufficiently large parameter buffers 2407c478bd9Sstevel@tonic-gate * would be bad news. Class initialization modules can check for 2417c478bd9Sstevel@tonic-gate * this and take action if they detect a problem. 2427c478bd9Sstevel@tonic-gate */ 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate for (cid = 0; cid < nclass; cid++) { 2457c478bd9Sstevel@tonic-gate sclass_t *sc; 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate sc = &sclass[cid]; 2487c478bd9Sstevel@tonic-gate if (SCHED_INSTALLED(sc)) { 2497c478bd9Sstevel@tonic-gate cl_maxglobpri = sc->cl_init(cid, PC_CLPARMSZ, 2507c478bd9Sstevel@tonic-gate &sc->cl_funcs); 2517c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2527c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 2537c478bd9Sstevel@tonic-gate } 2547c478bd9Sstevel@tonic-gate } 2557c478bd9Sstevel@tonic-gate kpreemptpri = (pri_t)v.v_maxsyspri + 1; 2567c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 2577c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 2587c478bd9Sstevel@tonic-gate 2597c478bd9Sstevel@tonic-gate ASSERT(maxglobpri >= 0); 2607c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, 0); 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2637c478bd9Sstevel@tonic-gate 26481588590Sbholler /* 26581588590Sbholler * Platform specific sticky scheduler setup. 26681588590Sbholler */ 26781588590Sbholler if (nosteal_nsec == NOSTEAL_UNINITIALIZED) 26881588590Sbholler cmp_set_nosteal_interval(); 26981588590Sbholler 2707c478bd9Sstevel@tonic-gate /* 2717c478bd9Sstevel@tonic-gate * Get the default class ID; this may be later modified via 2727c478bd9Sstevel@tonic-gate * dispadmin(1M). This will load the class (normally TS) and that will 2737c478bd9Sstevel@tonic-gate * call disp_add(), which is why we had to drop cpu_lock first. 2747c478bd9Sstevel@tonic-gate */ 2757c478bd9Sstevel@tonic-gate if (getcid(defaultclass, &defaultcid) != 0) { 2767c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Couldn't load default scheduling class '%s'", 2777c478bd9Sstevel@tonic-gate defaultclass); 2787c478bd9Sstevel@tonic-gate } 2797c478bd9Sstevel@tonic-gate } 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate /* 2827c478bd9Sstevel@tonic-gate * disp_add - Called with class pointer to initialize the dispatcher 2837c478bd9Sstevel@tonic-gate * for a newly loaded class. 2847c478bd9Sstevel@tonic-gate */ 2857c478bd9Sstevel@tonic-gate void 2867c478bd9Sstevel@tonic-gate disp_add(sclass_t *clp) 2877c478bd9Sstevel@tonic-gate { 2887c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2897c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2927c478bd9Sstevel@tonic-gate /* 2937c478bd9Sstevel@tonic-gate * Initialize the scheduler class. 2947c478bd9Sstevel@tonic-gate */ 2957c478bd9Sstevel@tonic-gate maxglobpri = (pri_t)(v.v_nglobpris - LOCK_LEVEL - 1); 2967c478bd9Sstevel@tonic-gate cl_maxglobpri = clp->cl_init(clp - sclass, PC_CLPARMSZ, &clp->cl_funcs); 2977c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2987c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 2997c478bd9Sstevel@tonic-gate 3007c478bd9Sstevel@tonic-gate /* 3017c478bd9Sstevel@tonic-gate * Save old queue information. Since we're initializing a 3027c478bd9Sstevel@tonic-gate * new scheduling class which has just been loaded, then 3037c478bd9Sstevel@tonic-gate * the size of the dispq may have changed. We need to handle 3047c478bd9Sstevel@tonic-gate * that here. 3057c478bd9Sstevel@tonic-gate */ 3067c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, v.v_nglobpris); 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 3097c478bd9Sstevel@tonic-gate } 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate /* 3137c478bd9Sstevel@tonic-gate * For each CPU, allocate new dispatch queues 3147c478bd9Sstevel@tonic-gate * with the stated number of priorities. 3157c478bd9Sstevel@tonic-gate */ 3167c478bd9Sstevel@tonic-gate static void 3177c478bd9Sstevel@tonic-gate cpu_dispqalloc(int numpris) 3187c478bd9Sstevel@tonic-gate { 3197c478bd9Sstevel@tonic-gate cpu_t *cpup; 3207c478bd9Sstevel@tonic-gate struct disp_queue_info *disp_mem; 3217c478bd9Sstevel@tonic-gate int i, num; 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate disp_mem = kmem_zalloc(NCPU * 3267c478bd9Sstevel@tonic-gate sizeof (struct disp_queue_info), KM_SLEEP); 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate /* 3297c478bd9Sstevel@tonic-gate * This routine must allocate all of the memory before stopping 3307c478bd9Sstevel@tonic-gate * the cpus because it must not sleep in kmem_alloc while the 3317c478bd9Sstevel@tonic-gate * CPUs are stopped. Locks they hold will not be freed until they 3327c478bd9Sstevel@tonic-gate * are restarted. 3337c478bd9Sstevel@tonic-gate */ 3347c478bd9Sstevel@tonic-gate i = 0; 3357c478bd9Sstevel@tonic-gate cpup = cpu_list; 3367c478bd9Sstevel@tonic-gate do { 3377c478bd9Sstevel@tonic-gate disp_dq_alloc(&disp_mem[i], numpris, cpup->cpu_disp); 3387c478bd9Sstevel@tonic-gate i++; 3397c478bd9Sstevel@tonic-gate cpup = cpup->cpu_next; 3407c478bd9Sstevel@tonic-gate } while (cpup != cpu_list); 3417c478bd9Sstevel@tonic-gate num = i; 3427c478bd9Sstevel@tonic-gate 3437c478bd9Sstevel@tonic-gate pause_cpus(NULL); 3447c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3457c478bd9Sstevel@tonic-gate disp_dq_assign(&disp_mem[i], numpris); 3467c478bd9Sstevel@tonic-gate start_cpus(); 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate /* 3497c478bd9Sstevel@tonic-gate * I must free all of the memory after starting the cpus because 3507c478bd9Sstevel@tonic-gate * I can not risk sleeping in kmem_free while the cpus are stopped. 3517c478bd9Sstevel@tonic-gate */ 3527c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3537c478bd9Sstevel@tonic-gate disp_dq_free(&disp_mem[i]); 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate kmem_free(disp_mem, NCPU * sizeof (struct disp_queue_info)); 3567c478bd9Sstevel@tonic-gate } 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate static void 3597c478bd9Sstevel@tonic-gate disp_dq_alloc(struct disp_queue_info *dptr, int numpris, disp_t *dp) 3607c478bd9Sstevel@tonic-gate { 3617c478bd9Sstevel@tonic-gate dptr->newdispq = kmem_zalloc(numpris * sizeof (dispq_t), KM_SLEEP); 3627c478bd9Sstevel@tonic-gate dptr->newdqactmap = kmem_zalloc(((numpris / BT_NBIPUL) + 1) * 3637c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 3647c478bd9Sstevel@tonic-gate dptr->dp = dp; 3657c478bd9Sstevel@tonic-gate } 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate static void 3687c478bd9Sstevel@tonic-gate disp_dq_assign(struct disp_queue_info *dptr, int numpris) 3697c478bd9Sstevel@tonic-gate { 3707c478bd9Sstevel@tonic-gate disp_t *dp; 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate dp = dptr->dp; 3737c478bd9Sstevel@tonic-gate dptr->olddispq = dp->disp_q; 3747c478bd9Sstevel@tonic-gate dptr->olddqactmap = dp->disp_qactmap; 3757c478bd9Sstevel@tonic-gate dptr->oldnglobpris = dp->disp_npri; 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate ASSERT(dptr->oldnglobpris < numpris); 3787c478bd9Sstevel@tonic-gate 3797c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) { 3807c478bd9Sstevel@tonic-gate /* 3817c478bd9Sstevel@tonic-gate * Use kcopy because bcopy is platform-specific 3827c478bd9Sstevel@tonic-gate * and could block while we might have paused the cpus. 3837c478bd9Sstevel@tonic-gate */ 3847c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddispq, dptr->newdispq, 3857c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 3867c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddqactmap, dptr->newdqactmap, 3877c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * 3887c478bd9Sstevel@tonic-gate sizeof (long)); 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate dp->disp_q = dptr->newdispq; 3917c478bd9Sstevel@tonic-gate dp->disp_qactmap = dptr->newdqactmap; 3927c478bd9Sstevel@tonic-gate dp->disp_q_limit = &dptr->newdispq[numpris]; 3937c478bd9Sstevel@tonic-gate dp->disp_npri = numpris; 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate static void 3977c478bd9Sstevel@tonic-gate disp_dq_free(struct disp_queue_info *dptr) 3987c478bd9Sstevel@tonic-gate { 3997c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) 4007c478bd9Sstevel@tonic-gate kmem_free(dptr->olddispq, 4017c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 4027c478bd9Sstevel@tonic-gate if (dptr->olddqactmap != NULL) 4037c478bd9Sstevel@tonic-gate kmem_free(dptr->olddqactmap, 4047c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * sizeof (long)); 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate /* 4087c478bd9Sstevel@tonic-gate * For a newly created CPU, initialize the dispatch queue. 4097c478bd9Sstevel@tonic-gate * This is called before the CPU is known through cpu[] or on any lists. 4107c478bd9Sstevel@tonic-gate */ 4117c478bd9Sstevel@tonic-gate void 4127c478bd9Sstevel@tonic-gate disp_cpu_init(cpu_t *cp) 4137c478bd9Sstevel@tonic-gate { 4147c478bd9Sstevel@tonic-gate disp_t *dp; 4157c478bd9Sstevel@tonic-gate dispq_t *newdispq; 4167c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); /* protect dispatcher queue sizes */ 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate if (cp == cpu0_disp.disp_cpu) 4217c478bd9Sstevel@tonic-gate dp = &cpu0_disp; 4227c478bd9Sstevel@tonic-gate else 4237c478bd9Sstevel@tonic-gate dp = kmem_alloc(sizeof (disp_t), KM_SLEEP); 4247c478bd9Sstevel@tonic-gate bzero(dp, sizeof (disp_t)); 4257c478bd9Sstevel@tonic-gate cp->cpu_disp = dp; 4267c478bd9Sstevel@tonic-gate dp->disp_cpu = cp; 4277c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 4287c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 4297c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&cp->cpu_thread_lock); 4307c478bd9Sstevel@tonic-gate /* 4317c478bd9Sstevel@tonic-gate * Allocate memory for the dispatcher queue headers 4327c478bd9Sstevel@tonic-gate * and the active queue bitmap. 4337c478bd9Sstevel@tonic-gate */ 4347c478bd9Sstevel@tonic-gate newdispq = kmem_zalloc(v.v_nglobpris * sizeof (dispq_t), KM_SLEEP); 4357c478bd9Sstevel@tonic-gate newdqactmap = kmem_zalloc(((v.v_nglobpris / BT_NBIPUL) + 1) * 4367c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 4377c478bd9Sstevel@tonic-gate dp->disp_q = newdispq; 4387c478bd9Sstevel@tonic-gate dp->disp_qactmap = newdqactmap; 4397c478bd9Sstevel@tonic-gate dp->disp_q_limit = &newdispq[v.v_nglobpris]; 4407c478bd9Sstevel@tonic-gate dp->disp_npri = v.v_nglobpris; 4417c478bd9Sstevel@tonic-gate } 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate void 4447c478bd9Sstevel@tonic-gate disp_cpu_fini(cpu_t *cp) 4457c478bd9Sstevel@tonic-gate { 4467c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate disp_kp_free(cp->cpu_disp); 4497c478bd9Sstevel@tonic-gate if (cp->cpu_disp != &cpu0_disp) 4507c478bd9Sstevel@tonic-gate kmem_free(cp->cpu_disp, sizeof (disp_t)); 4517c478bd9Sstevel@tonic-gate } 4527c478bd9Sstevel@tonic-gate 4537c478bd9Sstevel@tonic-gate /* 4547c478bd9Sstevel@tonic-gate * Allocate new, larger kpreempt dispatch queue to replace the old one. 4557c478bd9Sstevel@tonic-gate */ 4567c478bd9Sstevel@tonic-gate void 4577c478bd9Sstevel@tonic-gate disp_kp_alloc(disp_t *dq, pri_t npri) 4587c478bd9Sstevel@tonic-gate { 4597c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate if (npri > dq->disp_npri) { 4627c478bd9Sstevel@tonic-gate /* 4637c478bd9Sstevel@tonic-gate * Allocate memory for the new array. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate disp_dq_alloc(&mem_info, npri, dq); 4667c478bd9Sstevel@tonic-gate 4677c478bd9Sstevel@tonic-gate /* 4687c478bd9Sstevel@tonic-gate * We need to copy the old structures to the new 4697c478bd9Sstevel@tonic-gate * and free the old. 4707c478bd9Sstevel@tonic-gate */ 4717c478bd9Sstevel@tonic-gate disp_dq_assign(&mem_info, npri); 4727c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4737c478bd9Sstevel@tonic-gate } 4747c478bd9Sstevel@tonic-gate } 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate /* 4777c478bd9Sstevel@tonic-gate * Free dispatch queue. 4787c478bd9Sstevel@tonic-gate * Used for the kpreempt queues for a removed CPU partition and 4797c478bd9Sstevel@tonic-gate * for the per-CPU queues of deleted CPUs. 4807c478bd9Sstevel@tonic-gate */ 4817c478bd9Sstevel@tonic-gate void 4827c478bd9Sstevel@tonic-gate disp_kp_free(disp_t *dq) 4837c478bd9Sstevel@tonic-gate { 4847c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4857c478bd9Sstevel@tonic-gate 4867c478bd9Sstevel@tonic-gate mem_info.olddispq = dq->disp_q; 4877c478bd9Sstevel@tonic-gate mem_info.olddqactmap = dq->disp_qactmap; 4887c478bd9Sstevel@tonic-gate mem_info.oldnglobpris = dq->disp_npri; 4897c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4907c478bd9Sstevel@tonic-gate } 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 4937c478bd9Sstevel@tonic-gate * End dispatcher and scheduler initialization. 4947c478bd9Sstevel@tonic-gate */ 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate /* 4977c478bd9Sstevel@tonic-gate * See if there's anything to do other than remain idle. 4987c478bd9Sstevel@tonic-gate * Return non-zero if there is. 4997c478bd9Sstevel@tonic-gate * 5007c478bd9Sstevel@tonic-gate * This function must be called with high spl, or with 5017c478bd9Sstevel@tonic-gate * kernel preemption disabled to prevent the partition's 5027c478bd9Sstevel@tonic-gate * active cpu list from changing while being traversed. 5037c478bd9Sstevel@tonic-gate * 5046890d023SEric Saxe * This is essentially a simpler version of disp_getwork() 5056890d023SEric Saxe * to be called by CPUs preparing to "halt". 5067c478bd9Sstevel@tonic-gate */ 5077c478bd9Sstevel@tonic-gate int 5087c478bd9Sstevel@tonic-gate disp_anywork(void) 5097c478bd9Sstevel@tonic-gate { 5106890d023SEric Saxe cpu_t *cp = CPU; 5116890d023SEric Saxe cpu_t *ocp; 5126890d023SEric Saxe volatile int *local_nrunnable = &cp->cpu_disp->disp_nrunnable; 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_OFFLINE)) { 5157c478bd9Sstevel@tonic-gate if (CP_MAXRUNPRI(cp->cpu_part) >= 0) 5167c478bd9Sstevel@tonic-gate return (1); 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate for (ocp = cp->cpu_next_part; ocp != cp; 5197c478bd9Sstevel@tonic-gate ocp = ocp->cpu_next_part) { 5207c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 5217c478bd9Sstevel@tonic-gate 5226890d023SEric Saxe /* 5236890d023SEric Saxe * Something has appeared on the local run queue. 5246890d023SEric Saxe */ 5256890d023SEric Saxe if (*local_nrunnable > 0) 5266890d023SEric Saxe return (1); 5276890d023SEric Saxe /* 5286890d023SEric Saxe * If we encounter another idle CPU that will 5296890d023SEric Saxe * soon be trolling around through disp_anywork() 5306890d023SEric Saxe * terminate our walk here and let this other CPU 5316890d023SEric Saxe * patrol the next part of the list. 5326890d023SEric Saxe */ 5336890d023SEric Saxe if (ocp->cpu_dispatch_pri == -1 && 5346890d023SEric Saxe (ocp->cpu_disp_flags & CPU_DISP_HALTED) == 0) 5356890d023SEric Saxe return (0); 5366890d023SEric Saxe /* 5376890d023SEric Saxe * Work can be taken from another CPU if: 5386890d023SEric Saxe * - There is unbound work on the run queue 5396890d023SEric Saxe * - That work isn't a thread undergoing a 5406890d023SEric Saxe * - context switch on an otherwise empty queue. 5416890d023SEric Saxe * - The CPU isn't running the idle loop. 5426890d023SEric Saxe */ 5437c478bd9Sstevel@tonic-gate if (ocp->cpu_disp->disp_max_unbound_pri != -1 && 5447c478bd9Sstevel@tonic-gate !((ocp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 5457c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) && 5467c478bd9Sstevel@tonic-gate ocp->cpu_dispatch_pri != -1) 5477c478bd9Sstevel@tonic-gate return (1); 5487c478bd9Sstevel@tonic-gate } 5497c478bd9Sstevel@tonic-gate } 5507c478bd9Sstevel@tonic-gate return (0); 5517c478bd9Sstevel@tonic-gate } 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate /* 5547c478bd9Sstevel@tonic-gate * Called when CPU enters the idle loop 5557c478bd9Sstevel@tonic-gate */ 5567c478bd9Sstevel@tonic-gate static void 5577c478bd9Sstevel@tonic-gate idle_enter() 5587c478bd9Sstevel@tonic-gate { 5597c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5607c478bd9Sstevel@tonic-gate 561eda89462Sesolom new_cpu_mstate(CMS_IDLE, gethrtime_unscaled()); 5627c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, idlethread, 1); 5637c478bd9Sstevel@tonic-gate set_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5647c478bd9Sstevel@tonic-gate } 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate /* 5677c478bd9Sstevel@tonic-gate * Called when CPU exits the idle loop 5687c478bd9Sstevel@tonic-gate */ 5697c478bd9Sstevel@tonic-gate static void 5707c478bd9Sstevel@tonic-gate idle_exit() 5717c478bd9Sstevel@tonic-gate { 5727c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5737c478bd9Sstevel@tonic-gate 574eda89462Sesolom new_cpu_mstate(CMS_SYSTEM, gethrtime_unscaled()); 5757c478bd9Sstevel@tonic-gate unset_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5767c478bd9Sstevel@tonic-gate } 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate /* 5797c478bd9Sstevel@tonic-gate * Idle loop. 5807c478bd9Sstevel@tonic-gate */ 5817c478bd9Sstevel@tonic-gate void 5827c478bd9Sstevel@tonic-gate idle() 5837c478bd9Sstevel@tonic-gate { 5847c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; /* pointer to this CPU */ 5857c478bd9Sstevel@tonic-gate kthread_t *t; /* taken thread */ 5867c478bd9Sstevel@tonic-gate 5877c478bd9Sstevel@tonic-gate idle_enter(); 5887c478bd9Sstevel@tonic-gate 5897c478bd9Sstevel@tonic-gate /* 5907c478bd9Sstevel@tonic-gate * Uniprocessor version of idle loop. 5917c478bd9Sstevel@tonic-gate * Do this until notified that we're on an actual multiprocessor. 5927c478bd9Sstevel@tonic-gate */ 5937c478bd9Sstevel@tonic-gate while (ncpus == 1) { 5947c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable == 0) { 5957c478bd9Sstevel@tonic-gate (*idle_cpu)(); 5967c478bd9Sstevel@tonic-gate continue; 5977c478bd9Sstevel@tonic-gate } 5987c478bd9Sstevel@tonic-gate idle_exit(); 5997c478bd9Sstevel@tonic-gate swtch(); 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch */ 6027c478bd9Sstevel@tonic-gate } 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate /* 6057c478bd9Sstevel@tonic-gate * Multiprocessor idle loop. 6067c478bd9Sstevel@tonic-gate */ 6077c478bd9Sstevel@tonic-gate for (;;) { 6087c478bd9Sstevel@tonic-gate /* 6097c478bd9Sstevel@tonic-gate * If CPU is completely quiesced by p_online(2), just wait 6107c478bd9Sstevel@tonic-gate * here with minimal bus traffic until put online. 6117c478bd9Sstevel@tonic-gate */ 6127c478bd9Sstevel@tonic-gate while (cp->cpu_flags & CPU_QUIESCED) 6137c478bd9Sstevel@tonic-gate (*idle_cpu)(); 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) { 6167c478bd9Sstevel@tonic-gate idle_exit(); 6177c478bd9Sstevel@tonic-gate swtch(); 6187c478bd9Sstevel@tonic-gate } else { 6197c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_OFFLINE) 6207c478bd9Sstevel@tonic-gate continue; 6217c478bd9Sstevel@tonic-gate if ((t = disp_getwork(cp)) == NULL) { 6227c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level != -1) { 6237c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 6247c478bd9Sstevel@tonic-gate disp_t *kpq; 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 6277c478bd9Sstevel@tonic-gate /* 6287c478bd9Sstevel@tonic-gate * Set kpq under lock to prevent 6297c478bd9Sstevel@tonic-gate * migration between partitions. 6307c478bd9Sstevel@tonic-gate */ 6317c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 6327c478bd9Sstevel@tonic-gate if (kpq->disp_maxrunpri == -1) 6337c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = -1; 6347c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 6357c478bd9Sstevel@tonic-gate } 6367c478bd9Sstevel@tonic-gate (*idle_cpu)(); 6377c478bd9Sstevel@tonic-gate continue; 6387c478bd9Sstevel@tonic-gate } 639685679f7Sakolb /* 640685679f7Sakolb * If there was a thread but we couldn't steal 641685679f7Sakolb * it, then keep trying. 642685679f7Sakolb */ 643685679f7Sakolb if (t == T_DONTSTEAL) 644685679f7Sakolb continue; 6457c478bd9Sstevel@tonic-gate idle_exit(); 6467c478bd9Sstevel@tonic-gate swtch_to(t); 6477c478bd9Sstevel@tonic-gate } 6487c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch/swtch_to */ 6497c478bd9Sstevel@tonic-gate } 6507c478bd9Sstevel@tonic-gate } 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate /* 6547c478bd9Sstevel@tonic-gate * Preempt the currently running thread in favor of the highest 6557c478bd9Sstevel@tonic-gate * priority thread. The class of the current thread controls 6567c478bd9Sstevel@tonic-gate * where it goes on the dispatcher queues. If panicking, turn 6577c478bd9Sstevel@tonic-gate * preemption off. 6587c478bd9Sstevel@tonic-gate */ 6597c478bd9Sstevel@tonic-gate void 6607c478bd9Sstevel@tonic-gate preempt() 6617c478bd9Sstevel@tonic-gate { 6627c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 6637c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate if (panicstr) 6667c478bd9Sstevel@tonic-gate return; 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_START, "preempt_start"); 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate thread_lock(t); 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate if (t->t_state != TS_ONPROC || t->t_disp_queue != CPU->cpu_disp) { 6737c478bd9Sstevel@tonic-gate /* 6747c478bd9Sstevel@tonic-gate * this thread has already been chosen to be run on 6757c478bd9Sstevel@tonic-gate * another CPU. Clear kprunrun on this CPU since we're 6767c478bd9Sstevel@tonic-gate * already headed for swtch(). 6777c478bd9Sstevel@tonic-gate */ 6787c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 0; 6797c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6807c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6817c478bd9Sstevel@tonic-gate } else { 6827c478bd9Sstevel@tonic-gate if (lwp != NULL) 6837c478bd9Sstevel@tonic-gate lwp->lwp_ru.nivcsw++; 6847c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, inv_swtch, 1); 6857c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 6867c478bd9Sstevel@tonic-gate CL_PREEMPT(t); 6877c478bd9Sstevel@tonic-gate DTRACE_SCHED(preempt); 6887c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate swtch(); /* clears CPU->cpu_runrun via disp() */ 6937c478bd9Sstevel@tonic-gate } 6947c478bd9Sstevel@tonic-gate } 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate extern kthread_t *thread_unpin(); 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate /* 6997c478bd9Sstevel@tonic-gate * disp() - find the highest priority thread for this processor to run, and 7007c478bd9Sstevel@tonic-gate * set it in TS_ONPROC state so that resume() can be called to run it. 7017c478bd9Sstevel@tonic-gate */ 7027c478bd9Sstevel@tonic-gate static kthread_t * 7037c478bd9Sstevel@tonic-gate disp() 7047c478bd9Sstevel@tonic-gate { 7057c478bd9Sstevel@tonic-gate cpu_t *cpup; 7067c478bd9Sstevel@tonic-gate disp_t *dp; 7077c478bd9Sstevel@tonic-gate kthread_t *tp; 7087c478bd9Sstevel@tonic-gate dispq_t *dq; 7097c478bd9Sstevel@tonic-gate int maxrunword; 7107c478bd9Sstevel@tonic-gate pri_t pri; 7117c478bd9Sstevel@tonic-gate disp_t *kpq; 7127c478bd9Sstevel@tonic-gate 7137c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_DISP_START, "disp_start"); 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate cpup = CPU; 7167c478bd9Sstevel@tonic-gate /* 7177c478bd9Sstevel@tonic-gate * Find the highest priority loaded, runnable thread. 7187c478bd9Sstevel@tonic-gate */ 7197c478bd9Sstevel@tonic-gate dp = cpup->cpu_disp; 7207c478bd9Sstevel@tonic-gate 7217c478bd9Sstevel@tonic-gate reschedule: 7227c478bd9Sstevel@tonic-gate /* 7237c478bd9Sstevel@tonic-gate * If there is more important work on the global queue with a better 7247c478bd9Sstevel@tonic-gate * priority than the maximum on this CPU, take it now. 7257c478bd9Sstevel@tonic-gate */ 7267c478bd9Sstevel@tonic-gate kpq = &cpup->cpu_part->cp_kp_queue; 7277c478bd9Sstevel@tonic-gate while ((pri = kpq->disp_maxrunpri) >= 0 && 7287c478bd9Sstevel@tonic-gate pri >= dp->disp_maxrunpri && 7297c478bd9Sstevel@tonic-gate (cpup->cpu_flags & CPU_OFFLINE) == 0 && 7307c478bd9Sstevel@tonic-gate (tp = disp_getbest(kpq)) != NULL) { 7317c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) != NULL) { 7327c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 7337c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 7347c478bd9Sstevel@tonic-gate return (tp); 7357c478bd9Sstevel@tonic-gate } 7367c478bd9Sstevel@tonic-gate } 7377c478bd9Sstevel@tonic-gate 7387c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 7397c478bd9Sstevel@tonic-gate pri = dp->disp_maxrunpri; 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate /* 7427c478bd9Sstevel@tonic-gate * If there is nothing to run, look at what's runnable on other queues. 7437c478bd9Sstevel@tonic-gate * Choose the idle thread if the CPU is quiesced. 7447c478bd9Sstevel@tonic-gate * Note that CPUs that have the CPU_OFFLINE flag set can still run 7457c478bd9Sstevel@tonic-gate * interrupt threads, which will be the only threads on the CPU's own 7467c478bd9Sstevel@tonic-gate * queue, but cannot run threads from other queues. 7477c478bd9Sstevel@tonic-gate */ 7487c478bd9Sstevel@tonic-gate if (pri == -1) { 7497c478bd9Sstevel@tonic-gate if (!(cpup->cpu_flags & CPU_OFFLINE)) { 7507c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 751685679f7Sakolb if ((tp = disp_getwork(cpup)) == NULL || 752685679f7Sakolb tp == T_DONTSTEAL) { 7537c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7547c478bd9Sstevel@tonic-gate (void) splhigh(); 7557c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7567c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7577c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7587c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7597c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7607c478bd9Sstevel@tonic-gate } 7617c478bd9Sstevel@tonic-gate } else { 7627c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 7637c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7647c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7657c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7667c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7677c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7687c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7697c478bd9Sstevel@tonic-gate } 7707c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 771d129bde2Sesaxe "disp_end:tid %p", tp); 7727c478bd9Sstevel@tonic-gate return (tp); 7737c478bd9Sstevel@tonic-gate } 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 7767c478bd9Sstevel@tonic-gate tp = dq->dq_first; 7777c478bd9Sstevel@tonic-gate 7787c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 7797c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); /* thread must be swapped in */ 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate /* 7847c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 7857c478bd9Sstevel@tonic-gate */ 7867c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 7877c478bd9Sstevel@tonic-gate dq->dq_sruncnt--; 7887c478bd9Sstevel@tonic-gate if ((dq->dq_first = tp->t_link) == NULL) { 7897c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate ASSERT(dq->dq_sruncnt == 0); 7927c478bd9Sstevel@tonic-gate dq->dq_last = NULL; 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate /* 7957c478bd9Sstevel@tonic-gate * The queue is empty, so the corresponding bit needs to be 7967c478bd9Sstevel@tonic-gate * turned off in dqactmap. If nrunnable != 0 just took the 7977c478bd9Sstevel@tonic-gate * last runnable thread off the 7987c478bd9Sstevel@tonic-gate * highest queue, so recompute disp_maxrunpri. 7997c478bd9Sstevel@tonic-gate */ 8007c478bd9Sstevel@tonic-gate maxrunword = pri >> BT_ULSHIFT; 8017c478bd9Sstevel@tonic-gate dqactmap[maxrunword] &= ~BT_BIW(pri); 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 8047c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 8057c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 8067c478bd9Sstevel@tonic-gate } else { 8077c478bd9Sstevel@tonic-gate int ipri; 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dqactmap, maxrunword); 8107c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 8117c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 8127c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 8137c478bd9Sstevel@tonic-gate } 8147c478bd9Sstevel@tonic-gate } else { 8157c478bd9Sstevel@tonic-gate tp->t_link = NULL; 8167c478bd9Sstevel@tonic-gate } 8177c478bd9Sstevel@tonic-gate 8187c478bd9Sstevel@tonic-gate /* 8197c478bd9Sstevel@tonic-gate * Set TS_DONT_SWAP flag to prevent another processor from swapping 8207c478bd9Sstevel@tonic-gate * out this thread before we have a chance to run it. 8217c478bd9Sstevel@tonic-gate * While running, it is protected against swapping by t_lock. 8227c478bd9Sstevel@tonic-gate */ 8237c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 8247c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; /* protected by spl only */ 8257c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = pri; 8267c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 8277c478bd9Sstevel@tonic-gate thread_onproc(tp, cpup); /* set t_state to TS_ONPROC */ 8287c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); /* drop run queue lock */ 8297c478bd9Sstevel@tonic-gate 8307c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 8317c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 832d129bde2Sesaxe "disp_end:tid %p", tp); 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) == NULL) 8357c478bd9Sstevel@tonic-gate goto reschedule; 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate return (tp); 8387c478bd9Sstevel@tonic-gate } 8397c478bd9Sstevel@tonic-gate 8407c478bd9Sstevel@tonic-gate /* 8417c478bd9Sstevel@tonic-gate * swtch() 8427c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 8437c478bd9Sstevel@tonic-gate * Called with the current thread already switched to a new state, 8447c478bd9Sstevel@tonic-gate * on a sleep queue, run queue, stopped, and not zombied. 8457c478bd9Sstevel@tonic-gate * May be called at any spl level less than or equal to LOCK_LEVEL. 8467c478bd9Sstevel@tonic-gate * Always drops spl to the base level (spl0()). 8477c478bd9Sstevel@tonic-gate */ 8487c478bd9Sstevel@tonic-gate void 8497c478bd9Sstevel@tonic-gate swtch() 8507c478bd9Sstevel@tonic-gate { 8517c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 8527c478bd9Sstevel@tonic-gate kthread_t *next; 8537c478bd9Sstevel@tonic-gate cpu_t *cp; 8547c478bd9Sstevel@tonic-gate 8557c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 8587c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(t); 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate if (t->t_intr != NULL) { 8617c478bd9Sstevel@tonic-gate /* 8627c478bd9Sstevel@tonic-gate * We are an interrupt thread. Setup and return 8637c478bd9Sstevel@tonic-gate * the interrupted thread to be resumed. 8647c478bd9Sstevel@tonic-gate */ 8657c478bd9Sstevel@tonic-gate (void) splhigh(); /* block other scheduler action */ 8667c478bd9Sstevel@tonic-gate cp = CPU; /* now protected against migration */ 8677c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8687c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 8697c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, intrblk, 1); 8707c478bd9Sstevel@tonic-gate next = thread_unpin(); 8717c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 8727c478bd9Sstevel@tonic-gate resume_from_intr(next); 8737c478bd9Sstevel@tonic-gate } else { 8747c478bd9Sstevel@tonic-gate #ifdef DEBUG 8757c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC && 8767c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu == CPU && 8777c478bd9Sstevel@tonic-gate t->t_preempt == 0) { 8787c478bd9Sstevel@tonic-gate thread_lock(t); 8797c478bd9Sstevel@tonic-gate ASSERT(t->t_state != TS_ONPROC || 8807c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu != CPU || 8817c478bd9Sstevel@tonic-gate t->t_preempt != 0); /* cannot migrate */ 8827c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 8837c478bd9Sstevel@tonic-gate } 8847c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 8857c478bd9Sstevel@tonic-gate cp = CPU; 8867c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 8877c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8887c478bd9Sstevel@tonic-gate 8897c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 8907c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 8917c478bd9Sstevel@tonic-gate 8927c478bd9Sstevel@tonic-gate if (next != t) { 8930e751525SEric Saxe hrtime_t now; 8940e751525SEric Saxe 8950e751525SEric Saxe now = gethrtime_unscaled(); 8960e751525SEric Saxe pg_ev_thread_swtch(cp, now, t, next); 8977c478bd9Sstevel@tonic-gate 898f2bd4627Sjohansen /* 899f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, 900f2bd4627Sjohansen * setfrontdq and setbackdq won't have set its t_waitrq. 901f2bd4627Sjohansen * Since we now finally know that we're switching away 902f2bd4627Sjohansen * from this thread, set its t_waitrq if it is on a run 903f2bd4627Sjohansen * queue. 904f2bd4627Sjohansen */ 905f2bd4627Sjohansen if ((t->t_state == TS_RUN) && (t->t_waitrq == 0)) { 9060e751525SEric Saxe t->t_waitrq = now; 907f2bd4627Sjohansen } 908f2bd4627Sjohansen 909f2bd4627Sjohansen /* 910f2bd4627Sjohansen * restore mstate of thread that we are switching to 911f2bd4627Sjohansen */ 912f2bd4627Sjohansen restore_mstate(next); 913f2bd4627Sjohansen 9147c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 915d3d50737SRafael Vanoni cp->cpu_last_swtch = t->t_disp_time = ddi_get_lbolt(); 9167c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 9197c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate resume(next); 9227c478bd9Sstevel@tonic-gate /* 9237c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9247c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 9257c478bd9Sstevel@tonic-gate * return here 9267c478bd9Sstevel@tonic-gate */ 9277c478bd9Sstevel@tonic-gate } else { 9287c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 9297c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(t); 930*1dbbbf76SSudheer A /* 931*1dbbbf76SSudheer A * Threads that enqueue themselves on a run queue defer 932*1dbbbf76SSudheer A * setting t_waitrq. It is then either set in swtch() 933*1dbbbf76SSudheer A * when the CPU is actually yielded, or not at all if it 934*1dbbbf76SSudheer A * is remaining on the CPU. 935*1dbbbf76SSudheer A * There is however a window between where the thread 936*1dbbbf76SSudheer A * placed itself on a run queue, and where it selects 937*1dbbbf76SSudheer A * itself in disp(), where a third party (eg. clock() 938*1dbbbf76SSudheer A * doing tick processing) may have re-enqueued this 939*1dbbbf76SSudheer A * thread, setting t_waitrq in the process. We detect 940*1dbbbf76SSudheer A * this race by noticing that despite switching to 941*1dbbbf76SSudheer A * ourself, our t_waitrq has been set, and should be 942*1dbbbf76SSudheer A * cleared. 943*1dbbbf76SSudheer A */ 944*1dbbbf76SSudheer A if (t->t_waitrq != 0) 945*1dbbbf76SSudheer A t->t_waitrq = 0; 9467c478bd9Sstevel@tonic-gate 9470e751525SEric Saxe pg_ev_thread_remain(cp, t); 9480e751525SEric Saxe 9497c478bd9Sstevel@tonic-gate DTRACE_SCHED(remain__cpu); 9507c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_END, "swtch_end"); 9517c478bd9Sstevel@tonic-gate (void) spl0(); 9527c478bd9Sstevel@tonic-gate } 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate } 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate /* 9577c478bd9Sstevel@tonic-gate * swtch_from_zombie() 9587c478bd9Sstevel@tonic-gate * Special case of swtch(), which allows checks for TS_ZOMB to be 9597c478bd9Sstevel@tonic-gate * eliminated from normal resume. 9607c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 9617c478bd9Sstevel@tonic-gate * Called with the current thread zombied. 9627c478bd9Sstevel@tonic-gate * Zombies cannot migrate, so CPU references are safe. 9637c478bd9Sstevel@tonic-gate */ 9647c478bd9Sstevel@tonic-gate void 9657c478bd9Sstevel@tonic-gate swtch_from_zombie() 9667c478bd9Sstevel@tonic-gate { 9677c478bd9Sstevel@tonic-gate kthread_t *next; 9687c478bd9Sstevel@tonic-gate cpu_t *cpu = CPU; 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 9717c478bd9Sstevel@tonic-gate 9727c478bd9Sstevel@tonic-gate ASSERT(curthread->t_state == TS_ZOMB); 9737c478bd9Sstevel@tonic-gate 9747c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 9757c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); /* not called with PIL > 10 */ 9767c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, pswitch, 1); 9777c478bd9Sstevel@tonic-gate ASSERT(next != curthread); 9787c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 9797c478bd9Sstevel@tonic-gate 9800e751525SEric Saxe pg_ev_thread_swtch(cpu, gethrtime_unscaled(), curthread, next); 9817c478bd9Sstevel@tonic-gate 982f2bd4627Sjohansen restore_mstate(next); 983f2bd4627Sjohansen 9847c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 9857c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate resume_from_zombie(next); 9887c478bd9Sstevel@tonic-gate /* 9897c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9907c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we certainly will not 9917c478bd9Sstevel@tonic-gate * return here 9927c478bd9Sstevel@tonic-gate */ 9937c478bd9Sstevel@tonic-gate } 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate #if defined(DEBUG) && (defined(DISP_DEBUG) || defined(lint)) 996057452c6Sjj 997057452c6Sjj /* 998057452c6Sjj * search_disp_queues() 999057452c6Sjj * Search the given dispatch queues for thread tp. 1000057452c6Sjj * Return 1 if tp is found, otherwise return 0. 1001057452c6Sjj */ 10027c478bd9Sstevel@tonic-gate static int 1003057452c6Sjj search_disp_queues(disp_t *dp, kthread_t *tp) 10047c478bd9Sstevel@tonic-gate { 1005057452c6Sjj dispq_t *dq; 1006057452c6Sjj dispq_t *eq; 10077c478bd9Sstevel@tonic-gate 1008057452c6Sjj disp_lock_enter_high(&dp->disp_lock); 1009057452c6Sjj 1010057452c6Sjj for (dq = dp->disp_q, eq = dp->disp_q_limit; dq < eq; ++dq) { 1011057452c6Sjj kthread_t *rp; 1012057452c6Sjj 1013057452c6Sjj ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 1014057452c6Sjj 1015057452c6Sjj for (rp = dq->dq_first; rp; rp = rp->t_link) 1016057452c6Sjj if (tp == rp) { 1017057452c6Sjj disp_lock_exit_high(&dp->disp_lock); 1018057452c6Sjj return (1); 1019057452c6Sjj } 10207c478bd9Sstevel@tonic-gate } 1021057452c6Sjj disp_lock_exit_high(&dp->disp_lock); 1022057452c6Sjj 1023057452c6Sjj return (0); 1024057452c6Sjj } 1025057452c6Sjj 1026057452c6Sjj /* 1027057452c6Sjj * thread_on_queue() 1028057452c6Sjj * Search all per-CPU dispatch queues and all partition-wide kpreempt 1029057452c6Sjj * queues for thread tp. Return 1 if tp is found, otherwise return 0. 1030057452c6Sjj */ 1031057452c6Sjj static int 1032057452c6Sjj thread_on_queue(kthread_t *tp) 1033057452c6Sjj { 1034057452c6Sjj cpu_t *cp; 1035057452c6Sjj struct cpupart *part; 1036057452c6Sjj 1037057452c6Sjj ASSERT(getpil() >= DISP_LEVEL); 1038057452c6Sjj 1039057452c6Sjj /* 1040057452c6Sjj * Search the per-CPU dispatch queues for tp. 1041057452c6Sjj */ 1042057452c6Sjj cp = CPU; 1043057452c6Sjj do { 1044057452c6Sjj if (search_disp_queues(cp->cpu_disp, tp)) 1045057452c6Sjj return (1); 1046057452c6Sjj } while ((cp = cp->cpu_next_onln) != CPU); 1047057452c6Sjj 1048057452c6Sjj /* 1049057452c6Sjj * Search the partition-wide kpreempt queues for tp. 1050057452c6Sjj */ 1051057452c6Sjj part = CPU->cpu_part; 1052057452c6Sjj do { 1053057452c6Sjj if (search_disp_queues(&part->cp_kp_queue, tp)) 1054057452c6Sjj return (1); 1055057452c6Sjj } while ((part = part->cp_next) != CPU->cpu_part); 1056057452c6Sjj 10577c478bd9Sstevel@tonic-gate return (0); 1058057452c6Sjj } 1059057452c6Sjj 10607c478bd9Sstevel@tonic-gate #else 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate #define thread_on_queue(tp) 0 /* ASSERT must be !thread_on_queue */ 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate /* 10677c478bd9Sstevel@tonic-gate * like swtch(), but switch to a specified thread taken from another CPU. 10687c478bd9Sstevel@tonic-gate * called with spl high.. 10697c478bd9Sstevel@tonic-gate */ 10707c478bd9Sstevel@tonic-gate void 10717c478bd9Sstevel@tonic-gate swtch_to(kthread_t *next) 10727c478bd9Sstevel@tonic-gate { 10737c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 10740e751525SEric Saxe hrtime_t now; 10757c478bd9Sstevel@tonic-gate 10767c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate /* 10797c478bd9Sstevel@tonic-gate * Update context switch statistics. 10807c478bd9Sstevel@tonic-gate */ 10817c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 10827c478bd9Sstevel@tonic-gate 10837c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 10847c478bd9Sstevel@tonic-gate 10850e751525SEric Saxe now = gethrtime_unscaled(); 10860e751525SEric Saxe pg_ev_thread_swtch(cp, now, curthread, next); 10877c478bd9Sstevel@tonic-gate 10887c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 10897c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 10907c478bd9Sstevel@tonic-gate 10917c478bd9Sstevel@tonic-gate /* record last execution time */ 1092d3d50737SRafael Vanoni cp->cpu_last_swtch = curthread->t_disp_time = ddi_get_lbolt(); 10937c478bd9Sstevel@tonic-gate 1094f2bd4627Sjohansen /* 1095f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, setfrontdq and setbackdq 1096f2bd4627Sjohansen * won't have set its t_waitrq. Since we now finally know that we're 1097f2bd4627Sjohansen * switching away from this thread, set its t_waitrq if it is on a run 1098f2bd4627Sjohansen * queue. 1099f2bd4627Sjohansen */ 1100f2bd4627Sjohansen if ((curthread->t_state == TS_RUN) && (curthread->t_waitrq == 0)) { 11010e751525SEric Saxe curthread->t_waitrq = now; 1102f2bd4627Sjohansen } 1103f2bd4627Sjohansen 1104f2bd4627Sjohansen /* restore next thread to previously running microstate */ 1105f2bd4627Sjohansen restore_mstate(next); 1106f2bd4627Sjohansen 11077c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 11087c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 11097c478bd9Sstevel@tonic-gate 11107c478bd9Sstevel@tonic-gate resume(next); 11117c478bd9Sstevel@tonic-gate /* 11127c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 11137c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 11147c478bd9Sstevel@tonic-gate * return here 11157c478bd9Sstevel@tonic-gate */ 11167c478bd9Sstevel@tonic-gate } 11177c478bd9Sstevel@tonic-gate 11187c478bd9Sstevel@tonic-gate #define CPU_IDLING(pri) ((pri) == -1) 11197c478bd9Sstevel@tonic-gate 11207c478bd9Sstevel@tonic-gate static void 11217c478bd9Sstevel@tonic-gate cpu_resched(cpu_t *cp, pri_t tpri) 11227c478bd9Sstevel@tonic-gate { 11237c478bd9Sstevel@tonic-gate int call_poke_cpu = 0; 11247c478bd9Sstevel@tonic-gate pri_t cpupri = cp->cpu_dispatch_pri; 11257c478bd9Sstevel@tonic-gate 11267c478bd9Sstevel@tonic-gate if (!CPU_IDLING(cpupri) && (cpupri < tpri)) { 11277c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_RESCHED, 11287c478bd9Sstevel@tonic-gate "CPU_RESCHED:Tpri %d Cpupri %d", tpri, cpupri); 11297c478bd9Sstevel@tonic-gate if (tpri >= upreemptpri && cp->cpu_runrun == 0) { 11307c478bd9Sstevel@tonic-gate cp->cpu_runrun = 1; 11317c478bd9Sstevel@tonic-gate aston(cp->cpu_dispthread); 11327c478bd9Sstevel@tonic-gate if (tpri < kpreemptpri && cp != CPU) 11337c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 11347c478bd9Sstevel@tonic-gate } 11357c478bd9Sstevel@tonic-gate if (tpri >= kpreemptpri && cp->cpu_kprunrun == 0) { 11367c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1; 11377c478bd9Sstevel@tonic-gate if (cp != CPU) 11387c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 11397c478bd9Sstevel@tonic-gate } 11407c478bd9Sstevel@tonic-gate } 11417c478bd9Sstevel@tonic-gate 11427c478bd9Sstevel@tonic-gate /* 11437c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 11447c478bd9Sstevel@tonic-gate */ 11457c478bd9Sstevel@tonic-gate membar_enter(); 11467c478bd9Sstevel@tonic-gate 11477c478bd9Sstevel@tonic-gate if (call_poke_cpu) 11487c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id); 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate /* 11527c478bd9Sstevel@tonic-gate * setbackdq() keeps runqs balanced such that the difference in length 11537c478bd9Sstevel@tonic-gate * between the chosen runq and the next one is no more than RUNQ_MAX_DIFF. 11547c478bd9Sstevel@tonic-gate * For threads with priorities below RUNQ_MATCH_PRI levels, the runq's lengths 11557c478bd9Sstevel@tonic-gate * must match. When per-thread TS_RUNQMATCH flag is set, setbackdq() will 11567c478bd9Sstevel@tonic-gate * try to keep runqs perfectly balanced regardless of the thread priority. 11577c478bd9Sstevel@tonic-gate */ 11587c478bd9Sstevel@tonic-gate #define RUNQ_MATCH_PRI 16 /* pri below which queue lengths must match */ 11597c478bd9Sstevel@tonic-gate #define RUNQ_MAX_DIFF 2 /* maximum runq length difference */ 11607c478bd9Sstevel@tonic-gate #define RUNQ_LEN(cp, pri) ((cp)->cpu_disp->disp_q[pri].dq_sruncnt) 11617c478bd9Sstevel@tonic-gate 11626890d023SEric Saxe /* 11636890d023SEric Saxe * Macro that evaluates to true if it is likely that the thread has cache 11646890d023SEric Saxe * warmth. This is based on the amount of time that has elapsed since the 11656890d023SEric Saxe * thread last ran. If that amount of time is less than "rechoose_interval" 11666890d023SEric Saxe * ticks, then we decide that the thread has enough cache warmth to warrant 11676890d023SEric Saxe * some affinity for t->t_cpu. 11686890d023SEric Saxe */ 11696890d023SEric Saxe #define THREAD_HAS_CACHE_WARMTH(thread) \ 11706890d023SEric Saxe ((thread == curthread) || \ 1171d3d50737SRafael Vanoni ((ddi_get_lbolt() - thread->t_disp_time) <= rechoose_interval)) 11727c478bd9Sstevel@tonic-gate /* 11737c478bd9Sstevel@tonic-gate * Put the specified thread on the back of the dispatcher 11747c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 11757c478bd9Sstevel@tonic-gate * 11767c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 11777c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 11787c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 11797c478bd9Sstevel@tonic-gate */ 11807c478bd9Sstevel@tonic-gate void 11817c478bd9Sstevel@tonic-gate setbackdq(kthread_t *tp) 11827c478bd9Sstevel@tonic-gate { 11837c478bd9Sstevel@tonic-gate dispq_t *dq; 11847c478bd9Sstevel@tonic-gate disp_t *dp; 11857c478bd9Sstevel@tonic-gate cpu_t *cp; 11867c478bd9Sstevel@tonic-gate pri_t tpri; 11877c478bd9Sstevel@tonic-gate int bound; 11886890d023SEric Saxe boolean_t self; 11897c478bd9Sstevel@tonic-gate 11907c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 11917c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 11927c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 11937c478bd9Sstevel@tonic-gate 11947c478bd9Sstevel@tonic-gate /* 11957c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 11967c478bd9Sstevel@tonic-gate * queue it, but wake sched. 11977c478bd9Sstevel@tonic-gate */ 11987c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 11997c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 12007c478bd9Sstevel@tonic-gate return; 12017c478bd9Sstevel@tonic-gate } 12027c478bd9Sstevel@tonic-gate 12036890d023SEric Saxe self = (tp == curthread); 12046890d023SEric Saxe 1205abd41583Sgd if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1206abd41583Sgd bound = 1; 1207abd41583Sgd else 1208abd41583Sgd bound = 0; 1209abd41583Sgd 12107c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 12117c478bd9Sstevel@tonic-gate if (ncpus == 1) 12127c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 1213abd41583Sgd else if (!bound) { 12147c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 12157c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_BACK); 12167c478bd9Sstevel@tonic-gate return; 12177c478bd9Sstevel@tonic-gate } 12186890d023SEric Saxe 12197c478bd9Sstevel@tonic-gate /* 12206890d023SEric Saxe * We'll generally let this thread continue to run where 12216890d023SEric Saxe * it last ran...but will consider migration if: 12226890d023SEric Saxe * - We thread probably doesn't have much cache warmth. 12236890d023SEric Saxe * - The CPU where it last ran is the target of an offline 12246890d023SEric Saxe * request. 12256890d023SEric Saxe * - The thread last ran outside it's home lgroup. 12267c478bd9Sstevel@tonic-gate */ 12276890d023SEric Saxe if ((!THREAD_HAS_CACHE_WARMTH(tp)) || 12286890d023SEric Saxe (tp->t_cpu == cpu_inmotion)) { 12296890d023SEric Saxe cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, NULL); 12306890d023SEric Saxe } else if (!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, tp->t_cpu)) { 12316890d023SEric Saxe cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 12326890d023SEric Saxe self ? tp->t_cpu : NULL); 12336890d023SEric Saxe } else { 12346890d023SEric Saxe cp = tp->t_cpu; 12356890d023SEric Saxe } 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 12387c478bd9Sstevel@tonic-gate int qlen; 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate /* 1241fb2f18f8Sesaxe * Perform any CMT load balancing 12427c478bd9Sstevel@tonic-gate */ 1243fb2f18f8Sesaxe cp = cmt_balance(tp, cp); 12447c478bd9Sstevel@tonic-gate 12457c478bd9Sstevel@tonic-gate /* 12467c478bd9Sstevel@tonic-gate * Balance across the run queues 12477c478bd9Sstevel@tonic-gate */ 12487c478bd9Sstevel@tonic-gate qlen = RUNQ_LEN(cp, tpri); 12497c478bd9Sstevel@tonic-gate if (tpri >= RUNQ_MATCH_PRI && 12507c478bd9Sstevel@tonic-gate !(tp->t_schedflag & TS_RUNQMATCH)) 12517c478bd9Sstevel@tonic-gate qlen -= RUNQ_MAX_DIFF; 12527c478bd9Sstevel@tonic-gate if (qlen > 0) { 1253685679f7Sakolb cpu_t *newcp; 12547c478bd9Sstevel@tonic-gate 1255685679f7Sakolb if (tp->t_lpl->lpl_lgrpid == LGRP_ROOTID) { 1256685679f7Sakolb newcp = cp->cpu_next_part; 1257685679f7Sakolb } else if ((newcp = cp->cpu_next_lpl) == cp) { 1258685679f7Sakolb newcp = cp->cpu_next_part; 1259685679f7Sakolb } 1260685679f7Sakolb 1261685679f7Sakolb if (RUNQ_LEN(newcp, tpri) < qlen) { 1262685679f7Sakolb DTRACE_PROBE3(runq__balance, 1263685679f7Sakolb kthread_t *, tp, 1264685679f7Sakolb cpu_t *, cp, cpu_t *, newcp); 1265685679f7Sakolb cp = newcp; 12667c478bd9Sstevel@tonic-gate } 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate } else { 12697c478bd9Sstevel@tonic-gate /* 12707c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 12717c478bd9Sstevel@tonic-gate */ 12727c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 12737c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 12747c478bd9Sstevel@tonic-gate } 12757c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 12767c478bd9Sstevel@tonic-gate } else { 12777c478bd9Sstevel@tonic-gate /* 12787c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 12797c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 12807c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 12817c478bd9Sstevel@tonic-gate * favour weak binding over strong. 12827c478bd9Sstevel@tonic-gate */ 12837c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 12847c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 12857c478bd9Sstevel@tonic-gate } 1286f2bd4627Sjohansen /* 1287f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1288f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1289f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1290f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1291f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1292f2bd4627Sjohansen * state. 1293f2bd4627Sjohansen */ 12946890d023SEric Saxe if ((!self) && (tp->t_waitrq == 0)) { 1295f2bd4627Sjohansen hrtime_t curtime; 1296f2bd4627Sjohansen 1297f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1298f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1299f2bd4627Sjohansen tp->t_waitrq = curtime; 1300f2bd4627Sjohansen } else { 1301f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1302f2bd4627Sjohansen } 1303f2bd4627Sjohansen 13047c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 13057c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 13067c478bd9Sstevel@tonic-gate 13077c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0); 13087c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p", 1309d129bde2Sesaxe tpri, cp, tp); 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate #ifndef NPROBE 13127c478bd9Sstevel@tonic-gate /* Kernel probe */ 13137c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 13147c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 13157c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 13167c478bd9Sstevel@tonic-gate 13177c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 13187c478bd9Sstevel@tonic-gate 13197c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 13207c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 13217c478bd9Sstevel@tonic-gate tp->t_link = NULL; 13227c478bd9Sstevel@tonic-gate 13237c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 13247c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1325685679f7Sakolb if (!bound) 1326685679f7Sakolb dp->disp_steal = 0; 13277c478bd9Sstevel@tonic-gate membar_enter(); 13287c478bd9Sstevel@tonic-gate 13297c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 13307c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 13317c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 13327c478bd9Sstevel@tonic-gate dq->dq_last = tp; 13337c478bd9Sstevel@tonic-gate } else { 13347c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 13357c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 13367c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 13377c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 13387c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 13397c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 13407c478bd9Sstevel@tonic-gate membar_enter(); 13417c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 13427c478bd9Sstevel@tonic-gate } 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 13466890d023SEric Saxe if (self && dp->disp_max_unbound_pri == -1 && cp == CPU) { 13477c478bd9Sstevel@tonic-gate /* 13487c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 13497c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 13507c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 13517c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 13527c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 13537c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 13547c478bd9Sstevel@tonic-gate */ 13557c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 13567c478bd9Sstevel@tonic-gate } 13577c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 13587c478bd9Sstevel@tonic-gate } 13597c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 13607c478bd9Sstevel@tonic-gate } 13617c478bd9Sstevel@tonic-gate 13627c478bd9Sstevel@tonic-gate /* 13637c478bd9Sstevel@tonic-gate * Put the specified thread on the front of the dispatcher 13647c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 13657c478bd9Sstevel@tonic-gate * 13667c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 13677c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 13687c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 13697c478bd9Sstevel@tonic-gate */ 13707c478bd9Sstevel@tonic-gate void 13717c478bd9Sstevel@tonic-gate setfrontdq(kthread_t *tp) 13727c478bd9Sstevel@tonic-gate { 13737c478bd9Sstevel@tonic-gate disp_t *dp; 13747c478bd9Sstevel@tonic-gate dispq_t *dq; 13757c478bd9Sstevel@tonic-gate cpu_t *cp; 13767c478bd9Sstevel@tonic-gate pri_t tpri; 13777c478bd9Sstevel@tonic-gate int bound; 13787c478bd9Sstevel@tonic-gate 13797c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 13807c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 13817c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 13827c478bd9Sstevel@tonic-gate 13837c478bd9Sstevel@tonic-gate /* 13847c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 13857c478bd9Sstevel@tonic-gate * queue it, but wake sched. 13867c478bd9Sstevel@tonic-gate */ 13877c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 13887c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 13897c478bd9Sstevel@tonic-gate return; 13907c478bd9Sstevel@tonic-gate } 13917c478bd9Sstevel@tonic-gate 1392abd41583Sgd if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1393abd41583Sgd bound = 1; 1394abd41583Sgd else 1395abd41583Sgd bound = 0; 1396abd41583Sgd 13977c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 13987c478bd9Sstevel@tonic-gate if (ncpus == 1) 13997c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 1400abd41583Sgd else if (!bound) { 14017c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 14027c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_FRONT); 14037c478bd9Sstevel@tonic-gate return; 14047c478bd9Sstevel@tonic-gate } 14057c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 14067c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 14077c478bd9Sstevel@tonic-gate /* 14086890d023SEric Saxe * We'll generally let this thread continue to run 14096890d023SEric Saxe * where it last ran, but will consider migration if: 14106890d023SEric Saxe * - The thread last ran outside it's home lgroup. 14116890d023SEric Saxe * - The CPU where it last ran is the target of an 14126890d023SEric Saxe * offline request (a thread_nomigrate() on the in 14136890d023SEric Saxe * motion CPU relies on this when forcing a preempt). 14146890d023SEric Saxe * - The thread isn't the highest priority thread where 14156890d023SEric Saxe * it last ran, and it is considered not likely to 14166890d023SEric Saxe * have significant cache warmth. 14177c478bd9Sstevel@tonic-gate */ 14186890d023SEric Saxe if ((!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp)) || 14196890d023SEric Saxe (cp == cpu_inmotion)) { 14206890d023SEric Saxe cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 14216890d023SEric Saxe (tp == curthread) ? cp : NULL); 14226890d023SEric Saxe } else if ((tpri < cp->cpu_disp->disp_maxrunpri) && 14236890d023SEric Saxe (!THREAD_HAS_CACHE_WARMTH(tp))) { 14246890d023SEric Saxe cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 14256890d023SEric Saxe NULL); 14266890d023SEric Saxe } 14277c478bd9Sstevel@tonic-gate } else { 14287c478bd9Sstevel@tonic-gate /* 14297c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 14307c478bd9Sstevel@tonic-gate */ 14317c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 14327c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 14337c478bd9Sstevel@tonic-gate } 14347c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 14357c478bd9Sstevel@tonic-gate } else { 14367c478bd9Sstevel@tonic-gate /* 14377c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 14387c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 14397c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 14407c478bd9Sstevel@tonic-gate * favour weak binding over strong. 14417c478bd9Sstevel@tonic-gate */ 14427c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 14437c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 14447c478bd9Sstevel@tonic-gate } 1445f2bd4627Sjohansen 1446f2bd4627Sjohansen /* 1447f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1448f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1449f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1450f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1451f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1452f2bd4627Sjohansen * state. 1453f2bd4627Sjohansen */ 1454f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) { 1455f2bd4627Sjohansen hrtime_t curtime; 1456f2bd4627Sjohansen 1457f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1458f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1459f2bd4627Sjohansen tp->t_waitrq = curtime; 1460f2bd4627Sjohansen } else { 1461f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1462f2bd4627Sjohansen } 1463f2bd4627Sjohansen 14647c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 14657c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 14667c478bd9Sstevel@tonic-gate 14677c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 14687c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1); 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate #ifndef NPROBE 14717c478bd9Sstevel@tonic-gate /* Kernel probe */ 14727c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 14737c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 14747c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 14757c478bd9Sstevel@tonic-gate 14767c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 14777c478bd9Sstevel@tonic-gate 14787c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */ 14797c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 14827c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1483685679f7Sakolb if (!bound) 1484685679f7Sakolb dp->disp_steal = 0; 14857c478bd9Sstevel@tonic-gate membar_enter(); 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 14887c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 14897c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 14907c478bd9Sstevel@tonic-gate dq->dq_first = tp; 14917c478bd9Sstevel@tonic-gate } else { 14927c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 14937c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 14947c478bd9Sstevel@tonic-gate tp->t_link = NULL; 14957c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 14967c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 14977c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 14987c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 14997c478bd9Sstevel@tonic-gate membar_enter(); 15007c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 15017c478bd9Sstevel@tonic-gate } 15027c478bd9Sstevel@tonic-gate } 15037c478bd9Sstevel@tonic-gate 15047c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 15057c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 && 15067c478bd9Sstevel@tonic-gate cp == CPU) { 15077c478bd9Sstevel@tonic-gate /* 15087c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 15097c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 15107c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 15117c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 15127c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 15137c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 15147c478bd9Sstevel@tonic-gate */ 15157c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 15167c478bd9Sstevel@tonic-gate } 15177c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 15187c478bd9Sstevel@tonic-gate } 15197c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 15207c478bd9Sstevel@tonic-gate } 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate /* 15237c478bd9Sstevel@tonic-gate * Put a high-priority unbound thread on the kp queue 15247c478bd9Sstevel@tonic-gate */ 15257c478bd9Sstevel@tonic-gate static void 15267c478bd9Sstevel@tonic-gate setkpdq(kthread_t *tp, int borf) 15277c478bd9Sstevel@tonic-gate { 15287c478bd9Sstevel@tonic-gate dispq_t *dq; 15297c478bd9Sstevel@tonic-gate disp_t *dp; 15307c478bd9Sstevel@tonic-gate cpu_t *cp; 15317c478bd9Sstevel@tonic-gate pri_t tpri; 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate dp = &tp->t_cpupart->cp_kp_queue; 15367c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 15377c478bd9Sstevel@tonic-gate 15387c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 15397c478bd9Sstevel@tonic-gate 15407c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 15417c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, borf); 15427c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 15437c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 15447c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 15457c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 15467c478bd9Sstevel@tonic-gate 15477c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 15487c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 15497c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 15507c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15517c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 15527c478bd9Sstevel@tonic-gate dq->dq_last = tp; 15537c478bd9Sstevel@tonic-gate } else { 15547c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 15557c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 15567c478bd9Sstevel@tonic-gate dq->dq_first = tp; 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate } else { 15597c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 15607c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15617c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15627c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15637c478bd9Sstevel@tonic-gate } else { 15647c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15657c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15667c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15677c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 15707c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 15717c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 15727c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 15737c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 15747c478bd9Sstevel@tonic-gate membar_enter(); 15757c478bd9Sstevel@tonic-gate } 15767c478bd9Sstevel@tonic-gate } 15777c478bd9Sstevel@tonic-gate 15787c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 15797c478bd9Sstevel@tonic-gate if (tp->t_cpupart != cp->cpu_part) { 15807c478bd9Sstevel@tonic-gate /* migrate to a cpu in the new partition */ 15817c478bd9Sstevel@tonic-gate cp = tp->t_cpupart->cp_cpulist; 15827c478bd9Sstevel@tonic-gate } 15837c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(cp, tp->t_lpl, tp->t_pri, NULL); 15847c478bd9Sstevel@tonic-gate disp_lock_enter_high(&cp->cpu_disp->disp_lock); 15857c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate #ifndef NPROBE 15887c478bd9Sstevel@tonic-gate /* Kernel probe */ 15897c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 15907c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 15917c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level < tpri) 15947c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = tpri; 15957c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 15967c478bd9Sstevel@tonic-gate disp_lock_exit_high(&cp->cpu_disp->disp_lock); 15977c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, 0); 15987c478bd9Sstevel@tonic-gate } 15997c478bd9Sstevel@tonic-gate 16007c478bd9Sstevel@tonic-gate /* 16017c478bd9Sstevel@tonic-gate * Remove a thread from the dispatcher queue if it is on it. 16027c478bd9Sstevel@tonic-gate * It is not an error if it is not found but we return whether 16037c478bd9Sstevel@tonic-gate * or not it was found in case the caller wants to check. 16047c478bd9Sstevel@tonic-gate */ 16057c478bd9Sstevel@tonic-gate int 16067c478bd9Sstevel@tonic-gate dispdeq(kthread_t *tp) 16077c478bd9Sstevel@tonic-gate { 16087c478bd9Sstevel@tonic-gate disp_t *dp; 16097c478bd9Sstevel@tonic-gate dispq_t *dq; 16107c478bd9Sstevel@tonic-gate kthread_t *rp; 16117c478bd9Sstevel@tonic-gate kthread_t *trp; 16127c478bd9Sstevel@tonic-gate kthread_t **ptp; 16137c478bd9Sstevel@tonic-gate int tpri; 16147c478bd9Sstevel@tonic-gate 16157c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate if (tp->t_state != TS_RUN) 16187c478bd9Sstevel@tonic-gate return (0); 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate /* 16217c478bd9Sstevel@tonic-gate * The thread is "swapped" or is on the swap queue and 16227c478bd9Sstevel@tonic-gate * hence no longer on the run queue, so return true. 16237c478bd9Sstevel@tonic-gate */ 16247c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) 16257c478bd9Sstevel@tonic-gate return (1); 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 16287c478bd9Sstevel@tonic-gate dp = tp->t_disp_queue; 16297c478bd9Sstevel@tonic-gate ASSERT(tpri < dp->disp_npri); 16307c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 16317c478bd9Sstevel@tonic-gate ptp = &dq->dq_first; 16327c478bd9Sstevel@tonic-gate rp = *ptp; 16337c478bd9Sstevel@tonic-gate trp = NULL; 16347c478bd9Sstevel@tonic-gate 16357c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate /* 16387c478bd9Sstevel@tonic-gate * Search for thread in queue. 16397c478bd9Sstevel@tonic-gate * Double links would simplify this at the expense of disp/setrun. 16407c478bd9Sstevel@tonic-gate */ 16417c478bd9Sstevel@tonic-gate while (rp != tp && rp != NULL) { 16427c478bd9Sstevel@tonic-gate trp = rp; 16437c478bd9Sstevel@tonic-gate ptp = &trp->t_link; 16447c478bd9Sstevel@tonic-gate rp = trp->t_link; 16457c478bd9Sstevel@tonic-gate } 16467c478bd9Sstevel@tonic-gate 16477c478bd9Sstevel@tonic-gate if (rp == NULL) { 16487c478bd9Sstevel@tonic-gate panic("dispdeq: thread not on queue"); 16497c478bd9Sstevel@tonic-gate } 16507c478bd9Sstevel@tonic-gate 16517c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 16527c478bd9Sstevel@tonic-gate 16537c478bd9Sstevel@tonic-gate /* 16547c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 16557c478bd9Sstevel@tonic-gate */ 16567c478bd9Sstevel@tonic-gate if ((*ptp = rp->t_link) == NULL) 16577c478bd9Sstevel@tonic-gate dq->dq_last = trp; 16587c478bd9Sstevel@tonic-gate 16597c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 16607c478bd9Sstevel@tonic-gate if (--dq->dq_sruncnt == 0) { 16617c478bd9Sstevel@tonic-gate dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri); 16627c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 16637c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 16647c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 16657c478bd9Sstevel@tonic-gate } else if (tpri == dp->disp_maxrunpri) { 16667c478bd9Sstevel@tonic-gate int ipri; 16677c478bd9Sstevel@tonic-gate 16687c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dp->disp_qactmap, 16697c478bd9Sstevel@tonic-gate dp->disp_maxrunpri >> BT_ULSHIFT); 16707c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 16717c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 16727c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 16737c478bd9Sstevel@tonic-gate } 16747c478bd9Sstevel@tonic-gate } 16757c478bd9Sstevel@tonic-gate tp->t_link = NULL; 16767c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); /* put in intermediate state */ 16777c478bd9Sstevel@tonic-gate return (1); 16787c478bd9Sstevel@tonic-gate } 16797c478bd9Sstevel@tonic-gate 16807c478bd9Sstevel@tonic-gate 16817c478bd9Sstevel@tonic-gate /* 16827c478bd9Sstevel@tonic-gate * dq_sruninc and dq_srundec are public functions for 16837c478bd9Sstevel@tonic-gate * incrementing/decrementing the sruncnts when a thread on 16847c478bd9Sstevel@tonic-gate * a dispatcher queue is made schedulable/unschedulable by 16857c478bd9Sstevel@tonic-gate * resetting the TS_LOAD flag. 16867c478bd9Sstevel@tonic-gate * 16877c478bd9Sstevel@tonic-gate * The caller MUST have the thread lock and therefore the dispatcher 16887c478bd9Sstevel@tonic-gate * queue lock so that the operation which changes 16897c478bd9Sstevel@tonic-gate * the flag, the operation that checks the status of the thread to 16907c478bd9Sstevel@tonic-gate * determine if it's on a disp queue AND the call to this function 16917c478bd9Sstevel@tonic-gate * are one atomic operation with respect to interrupts. 16927c478bd9Sstevel@tonic-gate */ 16937c478bd9Sstevel@tonic-gate 16947c478bd9Sstevel@tonic-gate /* 16957c478bd9Sstevel@tonic-gate * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread. 16967c478bd9Sstevel@tonic-gate */ 16977c478bd9Sstevel@tonic-gate void 16987c478bd9Sstevel@tonic-gate dq_sruninc(kthread_t *t) 16997c478bd9Sstevel@tonic-gate { 17007c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_RUN); 17017c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 17047c478bd9Sstevel@tonic-gate setfrontdq(t); 17057c478bd9Sstevel@tonic-gate } 17067c478bd9Sstevel@tonic-gate 17077c478bd9Sstevel@tonic-gate /* 17087c478bd9Sstevel@tonic-gate * See comment on calling conventions above. 17097c478bd9Sstevel@tonic-gate * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread. 17107c478bd9Sstevel@tonic-gate */ 17117c478bd9Sstevel@tonic-gate void 17127c478bd9Sstevel@tonic-gate dq_srundec(kthread_t *t) 17137c478bd9Sstevel@tonic-gate { 17147c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17157c478bd9Sstevel@tonic-gate 17167c478bd9Sstevel@tonic-gate (void) dispdeq(t); 17177c478bd9Sstevel@tonic-gate disp_swapped_enq(t); 17187c478bd9Sstevel@tonic-gate } 17197c478bd9Sstevel@tonic-gate 17207c478bd9Sstevel@tonic-gate /* 17217c478bd9Sstevel@tonic-gate * Change the dispatcher lock of thread to the "swapped_lock" 17227c478bd9Sstevel@tonic-gate * and return with thread lock still held. 17237c478bd9Sstevel@tonic-gate * 17247c478bd9Sstevel@tonic-gate * Called with thread_lock held, in transition state, and at high spl. 17257c478bd9Sstevel@tonic-gate */ 17267c478bd9Sstevel@tonic-gate void 17277c478bd9Sstevel@tonic-gate disp_swapped_enq(kthread_t *tp) 17287c478bd9Sstevel@tonic-gate { 17297c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 17307c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate switch (tp->t_state) { 17337c478bd9Sstevel@tonic-gate case TS_RUN: 17347c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17357c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17367c478bd9Sstevel@tonic-gate break; 17377c478bd9Sstevel@tonic-gate case TS_ONPROC: 17387c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17397c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 17407c478bd9Sstevel@tonic-gate wake_sched_sec = 1; /* tell clock to wake sched */ 17417c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17427c478bd9Sstevel@tonic-gate break; 17437c478bd9Sstevel@tonic-gate default: 17447c478bd9Sstevel@tonic-gate panic("disp_swapped: tp: %p bad t_state", (void *)tp); 17457c478bd9Sstevel@tonic-gate } 17467c478bd9Sstevel@tonic-gate } 17477c478bd9Sstevel@tonic-gate 17487c478bd9Sstevel@tonic-gate /* 17497c478bd9Sstevel@tonic-gate * This routine is called by setbackdq/setfrontdq if the thread is 17507c478bd9Sstevel@tonic-gate * not loaded or loaded and on the swap queue. 17517c478bd9Sstevel@tonic-gate * 17527c478bd9Sstevel@tonic-gate * Thread state TS_SLEEP implies that a swapped thread 17537c478bd9Sstevel@tonic-gate * has been woken up and needs to be swapped in by the swapper. 17547c478bd9Sstevel@tonic-gate * 17557c478bd9Sstevel@tonic-gate * Thread state TS_RUN, it implies that the priority of a swapped 17567c478bd9Sstevel@tonic-gate * thread is being increased by scheduling class (e.g. ts_update). 17577c478bd9Sstevel@tonic-gate */ 17587c478bd9Sstevel@tonic-gate static void 17597c478bd9Sstevel@tonic-gate disp_swapped_setrun(kthread_t *tp) 17607c478bd9Sstevel@tonic-gate { 17617c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 17627c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD); 17637c478bd9Sstevel@tonic-gate 17647c478bd9Sstevel@tonic-gate switch (tp->t_state) { 17657c478bd9Sstevel@tonic-gate case TS_SLEEP: 17667c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17677c478bd9Sstevel@tonic-gate /* 17687c478bd9Sstevel@tonic-gate * Wakeup sched immediately (i.e., next tick) if the 17697c478bd9Sstevel@tonic-gate * thread priority is above maxclsyspri. 17707c478bd9Sstevel@tonic-gate */ 17717c478bd9Sstevel@tonic-gate if (DISP_PRIO(tp) > maxclsyspri) 17727c478bd9Sstevel@tonic-gate wake_sched = 1; 17737c478bd9Sstevel@tonic-gate else 17747c478bd9Sstevel@tonic-gate wake_sched_sec = 1; 17757c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */ 17767c478bd9Sstevel@tonic-gate break; 17777c478bd9Sstevel@tonic-gate case TS_RUN: /* called from ts_update */ 17787c478bd9Sstevel@tonic-gate break; 17797c478bd9Sstevel@tonic-gate default: 17808793b36bSNick Todd panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp); 17817c478bd9Sstevel@tonic-gate } 17827c478bd9Sstevel@tonic-gate } 17837c478bd9Sstevel@tonic-gate 17847c478bd9Sstevel@tonic-gate /* 17857c478bd9Sstevel@tonic-gate * Make a thread give up its processor. Find the processor on 17867c478bd9Sstevel@tonic-gate * which this thread is executing, and have that processor 17877c478bd9Sstevel@tonic-gate * preempt. 178835a5a358SJonathan Adams * 178935a5a358SJonathan Adams * We allow System Duty Cycle (SDC) threads to be preempted even if 179035a5a358SJonathan Adams * they are running at kernel priorities. To implement this, we always 179135a5a358SJonathan Adams * set cpu_kprunrun; this ensures preempt() will be called. Since SDC 179235a5a358SJonathan Adams * calls cpu_surrender() very often, we only preempt if there is anyone 179335a5a358SJonathan Adams * competing with us. 17947c478bd9Sstevel@tonic-gate */ 17957c478bd9Sstevel@tonic-gate void 17967c478bd9Sstevel@tonic-gate cpu_surrender(kthread_t *tp) 17977c478bd9Sstevel@tonic-gate { 17987c478bd9Sstevel@tonic-gate cpu_t *cpup; 17997c478bd9Sstevel@tonic-gate int max_pri; 18007c478bd9Sstevel@tonic-gate int max_run_pri; 18017c478bd9Sstevel@tonic-gate klwp_t *lwp; 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 18047c478bd9Sstevel@tonic-gate 18057c478bd9Sstevel@tonic-gate if (tp->t_state != TS_ONPROC) 18067c478bd9Sstevel@tonic-gate return; 18077c478bd9Sstevel@tonic-gate cpup = tp->t_disp_queue->disp_cpu; /* CPU thread dispatched to */ 18087c478bd9Sstevel@tonic-gate max_pri = cpup->cpu_disp->disp_maxrunpri; /* best pri of that CPU */ 18097c478bd9Sstevel@tonic-gate max_run_pri = CP_MAXRUNPRI(cpup->cpu_part); 18107c478bd9Sstevel@tonic-gate if (max_pri < max_run_pri) 18117c478bd9Sstevel@tonic-gate max_pri = max_run_pri; 18127c478bd9Sstevel@tonic-gate 181335a5a358SJonathan Adams if (tp->t_cid == sysdccid) { 181435a5a358SJonathan Adams uint_t t_pri = DISP_PRIO(tp); 181535a5a358SJonathan Adams if (t_pri > max_pri) 181635a5a358SJonathan Adams return; /* we are not competing w/ anyone */ 181735a5a358SJonathan Adams cpup->cpu_runrun = cpup->cpu_kprunrun = 1; 181835a5a358SJonathan Adams } else { 181935a5a358SJonathan Adams cpup->cpu_runrun = 1; 182035a5a358SJonathan Adams if (max_pri >= kpreemptpri && cpup->cpu_kprunrun == 0) { 182135a5a358SJonathan Adams cpup->cpu_kprunrun = 1; 182235a5a358SJonathan Adams } 18237c478bd9Sstevel@tonic-gate } 18247c478bd9Sstevel@tonic-gate 18257c478bd9Sstevel@tonic-gate /* 18267c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 18277c478bd9Sstevel@tonic-gate */ 18287c478bd9Sstevel@tonic-gate membar_enter(); 18297c478bd9Sstevel@tonic-gate 18307c478bd9Sstevel@tonic-gate DTRACE_SCHED1(surrender, kthread_t *, tp); 18317c478bd9Sstevel@tonic-gate 18327c478bd9Sstevel@tonic-gate /* 18337c478bd9Sstevel@tonic-gate * Make the target thread take an excursion through trap() 18347c478bd9Sstevel@tonic-gate * to do preempt() (unless we're already in trap or post_syscall, 18357c478bd9Sstevel@tonic-gate * calling cpu_surrender via CL_TRAPRET). 18367c478bd9Sstevel@tonic-gate */ 18377c478bd9Sstevel@tonic-gate if (tp != curthread || (lwp = tp->t_lwp) == NULL || 18387c478bd9Sstevel@tonic-gate lwp->lwp_state != LWP_USER) { 18397c478bd9Sstevel@tonic-gate aston(tp); 18407c478bd9Sstevel@tonic-gate if (cpup != CPU) 18417c478bd9Sstevel@tonic-gate poke_cpu(cpup->cpu_id); 18427c478bd9Sstevel@tonic-gate } 18437c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_SURRENDER, 18447c478bd9Sstevel@tonic-gate "cpu_surrender:tid %p cpu %p", tp, cpup); 18457c478bd9Sstevel@tonic-gate } 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate /* 18487c478bd9Sstevel@tonic-gate * Commit to and ratify a scheduling decision 18497c478bd9Sstevel@tonic-gate */ 18507c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 18517c478bd9Sstevel@tonic-gate static kthread_t * 18527c478bd9Sstevel@tonic-gate disp_ratify(kthread_t *tp, disp_t *kpq) 18537c478bd9Sstevel@tonic-gate { 18547c478bd9Sstevel@tonic-gate pri_t tpri, maxpri; 18557c478bd9Sstevel@tonic-gate pri_t maxkpri; 18567c478bd9Sstevel@tonic-gate cpu_t *cpup; 18577c478bd9Sstevel@tonic-gate 18587c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 18597c478bd9Sstevel@tonic-gate /* 18607c478bd9Sstevel@tonic-gate * Commit to, then ratify scheduling decision 18617c478bd9Sstevel@tonic-gate */ 18627c478bd9Sstevel@tonic-gate cpup = CPU; 18637c478bd9Sstevel@tonic-gate if (cpup->cpu_runrun != 0) 18647c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 0; 18657c478bd9Sstevel@tonic-gate if (cpup->cpu_kprunrun != 0) 18667c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 0; 18677c478bd9Sstevel@tonic-gate if (cpup->cpu_chosen_level != -1) 18687c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 18697c478bd9Sstevel@tonic-gate membar_enter(); 18707c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 18717c478bd9Sstevel@tonic-gate maxpri = cpup->cpu_disp->disp_maxrunpri; 18727c478bd9Sstevel@tonic-gate maxkpri = kpq->disp_maxrunpri; 18737c478bd9Sstevel@tonic-gate if (maxpri < maxkpri) 18747c478bd9Sstevel@tonic-gate maxpri = maxkpri; 18757c478bd9Sstevel@tonic-gate if (tpri < maxpri) { 18767c478bd9Sstevel@tonic-gate /* 18777c478bd9Sstevel@tonic-gate * should have done better 18787c478bd9Sstevel@tonic-gate * put this one back and indicate to try again 18797c478bd9Sstevel@tonic-gate */ 18807c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = curthread; /* fixup dispthread */ 18817c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = DISP_PRIO(curthread); 18827c478bd9Sstevel@tonic-gate thread_lock_high(tp); 18837c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 18847c478bd9Sstevel@tonic-gate setfrontdq(tp); 18857c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp); 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate tp = NULL; 18887c478bd9Sstevel@tonic-gate } 18897c478bd9Sstevel@tonic-gate return (tp); 18907c478bd9Sstevel@tonic-gate } 18917c478bd9Sstevel@tonic-gate 18927c478bd9Sstevel@tonic-gate /* 18937c478bd9Sstevel@tonic-gate * See if there is any work on the dispatcher queue for other CPUs. 18947c478bd9Sstevel@tonic-gate * If there is, dequeue the best thread and return. 18957c478bd9Sstevel@tonic-gate */ 18967c478bd9Sstevel@tonic-gate static kthread_t * 18977c478bd9Sstevel@tonic-gate disp_getwork(cpu_t *cp) 18987c478bd9Sstevel@tonic-gate { 18997c478bd9Sstevel@tonic-gate cpu_t *ocp; /* other CPU */ 19007c478bd9Sstevel@tonic-gate cpu_t *ocp_start; 19017c478bd9Sstevel@tonic-gate cpu_t *tcp; /* target local CPU */ 19027c478bd9Sstevel@tonic-gate kthread_t *tp; 1903685679f7Sakolb kthread_t *retval = NULL; 19047c478bd9Sstevel@tonic-gate pri_t maxpri; 19057c478bd9Sstevel@tonic-gate disp_t *kpq; /* kp queue for this partition */ 19067c478bd9Sstevel@tonic-gate lpl_t *lpl, *lpl_leaf; 19076890d023SEric Saxe int leafidx, startidx; 1908685679f7Sakolb hrtime_t stealtime; 19096890d023SEric Saxe lgrp_id_t local_id; 19107c478bd9Sstevel@tonic-gate 19117c478bd9Sstevel@tonic-gate maxpri = -1; 19127c478bd9Sstevel@tonic-gate tcp = NULL; 19137c478bd9Sstevel@tonic-gate 19147c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 19157c478bd9Sstevel@tonic-gate while (kpq->disp_maxrunpri >= 0) { 19167c478bd9Sstevel@tonic-gate /* 19177c478bd9Sstevel@tonic-gate * Try to take a thread from the kp_queue. 19187c478bd9Sstevel@tonic-gate */ 19197c478bd9Sstevel@tonic-gate tp = (disp_getbest(kpq)); 19207c478bd9Sstevel@tonic-gate if (tp) 19217c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq)); 19227c478bd9Sstevel@tonic-gate } 19237c478bd9Sstevel@tonic-gate 1924ab761399Sesaxe kpreempt_disable(); /* protect the cpu_active list */ 19257c478bd9Sstevel@tonic-gate 19267c478bd9Sstevel@tonic-gate /* 19277c478bd9Sstevel@tonic-gate * Try to find something to do on another CPU's run queue. 19287c478bd9Sstevel@tonic-gate * Loop through all other CPUs looking for the one with the highest 19297c478bd9Sstevel@tonic-gate * priority unbound thread. 19307c478bd9Sstevel@tonic-gate * 19317c478bd9Sstevel@tonic-gate * On NUMA machines, the partition's CPUs are consulted in order of 19327c478bd9Sstevel@tonic-gate * distance from the current CPU. This way, the first available 19337c478bd9Sstevel@tonic-gate * work found is also the closest, and will suffer the least 19347c478bd9Sstevel@tonic-gate * from being migrated. 19357c478bd9Sstevel@tonic-gate */ 19367c478bd9Sstevel@tonic-gate lpl = lpl_leaf = cp->cpu_lpl; 19376890d023SEric Saxe local_id = lpl_leaf->lpl_lgrpid; 19386890d023SEric Saxe leafidx = startidx = 0; 19397c478bd9Sstevel@tonic-gate 19407c478bd9Sstevel@tonic-gate /* 19417c478bd9Sstevel@tonic-gate * This loop traverses the lpl hierarchy. Higher level lpls represent 19427c478bd9Sstevel@tonic-gate * broader levels of locality 19437c478bd9Sstevel@tonic-gate */ 19447c478bd9Sstevel@tonic-gate do { 19457c478bd9Sstevel@tonic-gate /* This loop iterates over the lpl's leaves */ 19467c478bd9Sstevel@tonic-gate do { 19477c478bd9Sstevel@tonic-gate if (lpl_leaf != cp->cpu_lpl) 19487c478bd9Sstevel@tonic-gate ocp = lpl_leaf->lpl_cpus; 19497c478bd9Sstevel@tonic-gate else 19507c478bd9Sstevel@tonic-gate ocp = cp->cpu_next_lpl; 19517c478bd9Sstevel@tonic-gate 19527c478bd9Sstevel@tonic-gate /* This loop iterates over the CPUs in the leaf */ 19537c478bd9Sstevel@tonic-gate ocp_start = ocp; 19547c478bd9Sstevel@tonic-gate do { 19557c478bd9Sstevel@tonic-gate pri_t pri; 19567c478bd9Sstevel@tonic-gate 19577c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate /* 196039bac370Sesaxe * End our stroll around this lpl if: 19617c478bd9Sstevel@tonic-gate * 19627c478bd9Sstevel@tonic-gate * - Something became runnable on the local 196339bac370Sesaxe * queue...which also ends our stroll around 196439bac370Sesaxe * the partition. 19657c478bd9Sstevel@tonic-gate * 196639bac370Sesaxe * - We happen across another idle CPU. 196739bac370Sesaxe * Since it is patrolling the next portion 196839bac370Sesaxe * of the lpl's list (assuming it's not 19696890d023SEric Saxe * halted, or busy servicing an interrupt), 19706890d023SEric Saxe * move to the next higher level of locality. 19717c478bd9Sstevel@tonic-gate */ 197239bac370Sesaxe if (cp->cpu_disp->disp_nrunnable != 0) { 197339bac370Sesaxe kpreempt_enable(); 197439bac370Sesaxe return (NULL); 197539bac370Sesaxe } 19767c478bd9Sstevel@tonic-gate if (ocp->cpu_dispatch_pri == -1) { 19777c478bd9Sstevel@tonic-gate if (ocp->cpu_disp_flags & 19786890d023SEric Saxe CPU_DISP_HALTED || 19796890d023SEric Saxe ocp->cpu_intr_actv != 0) 19807c478bd9Sstevel@tonic-gate continue; 198139bac370Sesaxe else 19826890d023SEric Saxe goto next_level; 19837c478bd9Sstevel@tonic-gate } 19847c478bd9Sstevel@tonic-gate 19857c478bd9Sstevel@tonic-gate /* 19867c478bd9Sstevel@tonic-gate * If there's only one thread and the CPU 19877c478bd9Sstevel@tonic-gate * is in the middle of a context switch, 19887c478bd9Sstevel@tonic-gate * or it's currently running the idle thread, 19897c478bd9Sstevel@tonic-gate * don't steal it. 19907c478bd9Sstevel@tonic-gate */ 19917c478bd9Sstevel@tonic-gate if ((ocp->cpu_disp_flags & 1992d129bde2Sesaxe CPU_DISP_DONTSTEAL) && 19937c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) 19947c478bd9Sstevel@tonic-gate continue; 19957c478bd9Sstevel@tonic-gate 19967c478bd9Sstevel@tonic-gate pri = ocp->cpu_disp->disp_max_unbound_pri; 19977c478bd9Sstevel@tonic-gate if (pri > maxpri) { 1998685679f7Sakolb /* 1999685679f7Sakolb * Don't steal threads that we attempted 2000fb2f18f8Sesaxe * to steal recently until they're ready 2001fb2f18f8Sesaxe * to be stolen again. 2002685679f7Sakolb */ 2003685679f7Sakolb stealtime = ocp->cpu_disp->disp_steal; 2004685679f7Sakolb if (stealtime == 0 || 2005685679f7Sakolb stealtime - gethrtime() <= 0) { 2006685679f7Sakolb maxpri = pri; 2007685679f7Sakolb tcp = ocp; 2008685679f7Sakolb } else { 2009685679f7Sakolb /* 2010685679f7Sakolb * Don't update tcp, just set 2011685679f7Sakolb * the retval to T_DONTSTEAL, so 2012685679f7Sakolb * that if no acceptable CPUs 2013685679f7Sakolb * are found the return value 2014685679f7Sakolb * will be T_DONTSTEAL rather 2015685679f7Sakolb * then NULL. 2016685679f7Sakolb */ 2017685679f7Sakolb retval = T_DONTSTEAL; 2018685679f7Sakolb } 20197c478bd9Sstevel@tonic-gate } 20207c478bd9Sstevel@tonic-gate } while ((ocp = ocp->cpu_next_lpl) != ocp_start); 20217c478bd9Sstevel@tonic-gate 20226890d023SEric Saxe /* 20236890d023SEric Saxe * Iterate to the next leaf lpl in the resource set 20246890d023SEric Saxe * at this level of locality. If we hit the end of 20256890d023SEric Saxe * the set, wrap back around to the beginning. 20266890d023SEric Saxe * 20276890d023SEric Saxe * Note: This iteration is NULL terminated for a reason 20286890d023SEric Saxe * see lpl_topo_bootstrap() in lgrp.c for details. 20296890d023SEric Saxe */ 20307c478bd9Sstevel@tonic-gate if ((lpl_leaf = lpl->lpl_rset[++leafidx]) == NULL) { 20317c478bd9Sstevel@tonic-gate leafidx = 0; 20327c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[leafidx]; 20337c478bd9Sstevel@tonic-gate } 20346890d023SEric Saxe } while (leafidx != startidx); 20357c478bd9Sstevel@tonic-gate 20366890d023SEric Saxe next_level: 20376890d023SEric Saxe /* 20386890d023SEric Saxe * Expand the search to include farther away CPUs (next 20396890d023SEric Saxe * locality level). The closer CPUs that have already been 20406890d023SEric Saxe * checked will be checked again. In doing so, idle CPUs 20416890d023SEric Saxe * will tend to be more aggresive about stealing from CPUs 20426890d023SEric Saxe * that are closer (since the closer CPUs will be considered 20436890d023SEric Saxe * more often). 20446890d023SEric Saxe * Begin at this level with the CPUs local leaf lpl. 20456890d023SEric Saxe */ 20466890d023SEric Saxe if ((lpl = lpl->lpl_parent) != NULL) { 20476890d023SEric Saxe leafidx = startidx = lpl->lpl_id2rset[local_id]; 20486890d023SEric Saxe lpl_leaf = lpl->lpl_rset[leafidx]; 20496890d023SEric Saxe } 20507c478bd9Sstevel@tonic-gate } while (!tcp && lpl); 20517c478bd9Sstevel@tonic-gate 2052ab761399Sesaxe kpreempt_enable(); 20537c478bd9Sstevel@tonic-gate 20547c478bd9Sstevel@tonic-gate /* 20557c478bd9Sstevel@tonic-gate * If another queue looks good, and there is still nothing on 20567c478bd9Sstevel@tonic-gate * the local queue, try to transfer one or more threads 20577c478bd9Sstevel@tonic-gate * from it to our queue. 20587c478bd9Sstevel@tonic-gate */ 20597c478bd9Sstevel@tonic-gate if (tcp && cp->cpu_disp->disp_nrunnable == 0) { 2060685679f7Sakolb tp = disp_getbest(tcp->cpu_disp); 2061685679f7Sakolb if (tp == NULL || tp == T_DONTSTEAL) 2062685679f7Sakolb return (tp); 2063685679f7Sakolb return (disp_ratify(tp, kpq)); 20647c478bd9Sstevel@tonic-gate } 2065685679f7Sakolb return (retval); 20667c478bd9Sstevel@tonic-gate } 20677c478bd9Sstevel@tonic-gate 20687c478bd9Sstevel@tonic-gate 20697c478bd9Sstevel@tonic-gate /* 20707c478bd9Sstevel@tonic-gate * disp_fix_unbound_pri() 20717c478bd9Sstevel@tonic-gate * Determines the maximum priority of unbound threads on the queue. 20727c478bd9Sstevel@tonic-gate * The priority is kept for the queue, but is only increased, never 20737c478bd9Sstevel@tonic-gate * reduced unless some CPU is looking for something on that queue. 20747c478bd9Sstevel@tonic-gate * 20757c478bd9Sstevel@tonic-gate * The priority argument is the known upper limit. 20767c478bd9Sstevel@tonic-gate * 20777c478bd9Sstevel@tonic-gate * Perhaps this should be kept accurately, but that probably means 20787c478bd9Sstevel@tonic-gate * separate bitmaps for bound and unbound threads. Since only idled 20797c478bd9Sstevel@tonic-gate * CPUs will have to do this recalculation, it seems better this way. 20807c478bd9Sstevel@tonic-gate */ 20817c478bd9Sstevel@tonic-gate static void 20827c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(disp_t *dp, pri_t pri) 20837c478bd9Sstevel@tonic-gate { 20847c478bd9Sstevel@tonic-gate kthread_t *tp; 20857c478bd9Sstevel@tonic-gate dispq_t *dq; 20867c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 20877c478bd9Sstevel@tonic-gate ulong_t mapword; 20887c478bd9Sstevel@tonic-gate int wx; 20897c478bd9Sstevel@tonic-gate 20907c478bd9Sstevel@tonic-gate ASSERT(DISP_LOCK_HELD(&dp->disp_lock)); 20917c478bd9Sstevel@tonic-gate 20927c478bd9Sstevel@tonic-gate ASSERT(pri >= 0); /* checked by caller */ 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate /* 20957c478bd9Sstevel@tonic-gate * Start the search at the next lowest priority below the supplied 20967c478bd9Sstevel@tonic-gate * priority. This depends on the bitmap implementation. 20977c478bd9Sstevel@tonic-gate */ 20987c478bd9Sstevel@tonic-gate do { 20997c478bd9Sstevel@tonic-gate wx = pri >> BT_ULSHIFT; /* index of word in map */ 21007c478bd9Sstevel@tonic-gate 21017c478bd9Sstevel@tonic-gate /* 21027c478bd9Sstevel@tonic-gate * Form mask for all lower priorities in the word. 21037c478bd9Sstevel@tonic-gate */ 21047c478bd9Sstevel@tonic-gate mapword = dqactmap[wx] & (BT_BIW(pri) - 1); 21057c478bd9Sstevel@tonic-gate 21067c478bd9Sstevel@tonic-gate /* 21077c478bd9Sstevel@tonic-gate * Get next lower active priority. 21087c478bd9Sstevel@tonic-gate */ 21097c478bd9Sstevel@tonic-gate if (mapword != 0) { 21107c478bd9Sstevel@tonic-gate pri = (wx << BT_ULSHIFT) + highbit(mapword) - 1; 21117c478bd9Sstevel@tonic-gate } else if (wx > 0) { 21127c478bd9Sstevel@tonic-gate pri = bt_gethighbit(dqactmap, wx - 1); /* sign extend */ 21137c478bd9Sstevel@tonic-gate if (pri < 0) 21147c478bd9Sstevel@tonic-gate break; 21157c478bd9Sstevel@tonic-gate } else { 21167c478bd9Sstevel@tonic-gate pri = -1; 21177c478bd9Sstevel@tonic-gate break; 21187c478bd9Sstevel@tonic-gate } 21197c478bd9Sstevel@tonic-gate 21207c478bd9Sstevel@tonic-gate /* 21217c478bd9Sstevel@tonic-gate * Search the queue for unbound, runnable threads. 21227c478bd9Sstevel@tonic-gate */ 21237c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 21247c478bd9Sstevel@tonic-gate tp = dq->dq_first; 21257c478bd9Sstevel@tonic-gate 21267c478bd9Sstevel@tonic-gate while (tp && (tp->t_bound_cpu || tp->t_weakbound_cpu)) { 21277c478bd9Sstevel@tonic-gate tp = tp->t_link; 21287c478bd9Sstevel@tonic-gate } 21297c478bd9Sstevel@tonic-gate 21307c478bd9Sstevel@tonic-gate /* 21317c478bd9Sstevel@tonic-gate * If a thread was found, set the priority and return. 21327c478bd9Sstevel@tonic-gate */ 21337c478bd9Sstevel@tonic-gate } while (tp == NULL); 21347c478bd9Sstevel@tonic-gate 21357c478bd9Sstevel@tonic-gate /* 21367c478bd9Sstevel@tonic-gate * pri holds the maximum unbound thread priority or -1. 21377c478bd9Sstevel@tonic-gate */ 21387c478bd9Sstevel@tonic-gate if (dp->disp_max_unbound_pri != pri) 21397c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = pri; 21407c478bd9Sstevel@tonic-gate } 21417c478bd9Sstevel@tonic-gate 21427c478bd9Sstevel@tonic-gate /* 21437c478bd9Sstevel@tonic-gate * disp_adjust_unbound_pri() - thread is becoming unbound, so we should 21447c478bd9Sstevel@tonic-gate * check if the CPU to which is was previously bound should have 21457c478bd9Sstevel@tonic-gate * its disp_max_unbound_pri increased. 21467c478bd9Sstevel@tonic-gate */ 21477c478bd9Sstevel@tonic-gate void 21487c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(kthread_t *tp) 21497c478bd9Sstevel@tonic-gate { 21507c478bd9Sstevel@tonic-gate disp_t *dp; 21517c478bd9Sstevel@tonic-gate pri_t tpri; 21527c478bd9Sstevel@tonic-gate 21537c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 21547c478bd9Sstevel@tonic-gate 21557c478bd9Sstevel@tonic-gate /* 21567c478bd9Sstevel@tonic-gate * Don't do anything if the thread is not bound, or 21577c478bd9Sstevel@tonic-gate * currently not runnable or swapped out. 21587c478bd9Sstevel@tonic-gate */ 21597c478bd9Sstevel@tonic-gate if (tp->t_bound_cpu == NULL || 21607c478bd9Sstevel@tonic-gate tp->t_state != TS_RUN || 21617c478bd9Sstevel@tonic-gate tp->t_schedflag & TS_ON_SWAPQ) 21627c478bd9Sstevel@tonic-gate return; 21637c478bd9Sstevel@tonic-gate 21647c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 21657c478bd9Sstevel@tonic-gate dp = tp->t_bound_cpu->cpu_disp; 21667c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 21677c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 21687c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 21697c478bd9Sstevel@tonic-gate } 21707c478bd9Sstevel@tonic-gate 21717c478bd9Sstevel@tonic-gate /* 2172685679f7Sakolb * disp_getbest() 2173685679f7Sakolb * De-queue the highest priority unbound runnable thread. 2174685679f7Sakolb * Returns with the thread unlocked and onproc but at splhigh (like disp()). 2175685679f7Sakolb * Returns NULL if nothing found. 2176685679f7Sakolb * Returns T_DONTSTEAL if the thread was not stealable. 2177685679f7Sakolb * so that the caller will try again later. 21787c478bd9Sstevel@tonic-gate * 2179685679f7Sakolb * Passed a pointer to a dispatch queue not associated with this CPU, and 2180685679f7Sakolb * its type. 21817c478bd9Sstevel@tonic-gate */ 21827c478bd9Sstevel@tonic-gate static kthread_t * 21837c478bd9Sstevel@tonic-gate disp_getbest(disp_t *dp) 21847c478bd9Sstevel@tonic-gate { 21857c478bd9Sstevel@tonic-gate kthread_t *tp; 21867c478bd9Sstevel@tonic-gate dispq_t *dq; 21877c478bd9Sstevel@tonic-gate pri_t pri; 2188685679f7Sakolb cpu_t *cp, *tcp; 2189685679f7Sakolb boolean_t allbound; 21907c478bd9Sstevel@tonic-gate 21917c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 21927c478bd9Sstevel@tonic-gate 21937c478bd9Sstevel@tonic-gate /* 21947c478bd9Sstevel@tonic-gate * If there is nothing to run, or the CPU is in the middle of a 21957c478bd9Sstevel@tonic-gate * context switch of the only thread, return NULL. 21967c478bd9Sstevel@tonic-gate */ 2197685679f7Sakolb tcp = dp->disp_cpu; 2198685679f7Sakolb cp = CPU; 21997c478bd9Sstevel@tonic-gate pri = dp->disp_max_unbound_pri; 22007c478bd9Sstevel@tonic-gate if (pri == -1 || 2201685679f7Sakolb (tcp != NULL && (tcp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 2202685679f7Sakolb tcp->cpu_disp->disp_nrunnable == 1)) { 22037c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 22047c478bd9Sstevel@tonic-gate return (NULL); 22057c478bd9Sstevel@tonic-gate } 22067c478bd9Sstevel@tonic-gate 22077c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 2208685679f7Sakolb 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate /* 2211685679f7Sakolb * Assume that all threads are bound on this queue, and change it 2212685679f7Sakolb * later when we find out that it is not the case. 22137c478bd9Sstevel@tonic-gate */ 2214685679f7Sakolb allbound = B_TRUE; 2215685679f7Sakolb for (tp = dq->dq_first; tp != NULL; tp = tp->t_link) { 2216685679f7Sakolb hrtime_t now, nosteal, rqtime; 2217685679f7Sakolb 2218685679f7Sakolb /* 2219685679f7Sakolb * Skip over bound threads which could be here even 2220685679f7Sakolb * though disp_max_unbound_pri indicated this level. 2221685679f7Sakolb */ 2222685679f7Sakolb if (tp->t_bound_cpu || tp->t_weakbound_cpu) 2223685679f7Sakolb continue; 2224685679f7Sakolb 2225685679f7Sakolb /* 2226685679f7Sakolb * We've got some unbound threads on this queue, so turn 2227685679f7Sakolb * the allbound flag off now. 2228685679f7Sakolb */ 2229685679f7Sakolb allbound = B_FALSE; 2230685679f7Sakolb 2231685679f7Sakolb /* 2232685679f7Sakolb * The thread is a candidate for stealing from its run queue. We 2233685679f7Sakolb * don't want to steal threads that became runnable just a 2234685679f7Sakolb * moment ago. This improves CPU affinity for threads that get 2235685679f7Sakolb * preempted for short periods of time and go back on the run 2236685679f7Sakolb * queue. 2237685679f7Sakolb * 2238685679f7Sakolb * We want to let it stay on its run queue if it was only placed 2239685679f7Sakolb * there recently and it was running on the same CPU before that 2240685679f7Sakolb * to preserve its cache investment. For the thread to remain on 2241685679f7Sakolb * its run queue, ALL of the following conditions must be 2242685679f7Sakolb * satisfied: 2243685679f7Sakolb * 2244685679f7Sakolb * - the disp queue should not be the kernel preemption queue 2245685679f7Sakolb * - delayed idle stealing should not be disabled 2246685679f7Sakolb * - nosteal_nsec should be non-zero 2247685679f7Sakolb * - it should run with user priority 2248685679f7Sakolb * - it should be on the run queue of the CPU where it was 2249685679f7Sakolb * running before being placed on the run queue 2250685679f7Sakolb * - it should be the only thread on the run queue (to prevent 2251685679f7Sakolb * extra scheduling latency for other threads) 2252685679f7Sakolb * - it should sit on the run queue for less than per-chip 2253685679f7Sakolb * nosteal interval or global nosteal interval 2254685679f7Sakolb * - in case of CPUs with shared cache it should sit in a run 2255685679f7Sakolb * queue of a CPU from a different chip 2256685679f7Sakolb * 2257685679f7Sakolb * The checks are arranged so that the ones that are faster are 2258685679f7Sakolb * placed earlier. 2259685679f7Sakolb */ 2260685679f7Sakolb if (tcp == NULL || 2261685679f7Sakolb pri >= minclsyspri || 2262685679f7Sakolb tp->t_cpu != tcp) 2263685679f7Sakolb break; 2264685679f7Sakolb 2265685679f7Sakolb /* 2266fb2f18f8Sesaxe * Steal immediately if, due to CMT processor architecture 2267fb2f18f8Sesaxe * migraiton between cp and tcp would incur no performance 2268fb2f18f8Sesaxe * penalty. 2269685679f7Sakolb */ 2270fb2f18f8Sesaxe if (pg_cmt_can_migrate(cp, tcp)) 2271685679f7Sakolb break; 2272685679f7Sakolb 2273fb2f18f8Sesaxe nosteal = nosteal_nsec; 2274fb2f18f8Sesaxe if (nosteal == 0) 2275685679f7Sakolb break; 2276685679f7Sakolb 2277685679f7Sakolb /* 2278685679f7Sakolb * Calculate time spent sitting on run queue 2279685679f7Sakolb */ 2280685679f7Sakolb now = gethrtime_unscaled(); 2281685679f7Sakolb rqtime = now - tp->t_waitrq; 2282685679f7Sakolb scalehrtime(&rqtime); 2283685679f7Sakolb 2284685679f7Sakolb /* 2285685679f7Sakolb * Steal immediately if the time spent on this run queue is more 2286685679f7Sakolb * than allowed nosteal delay. 2287685679f7Sakolb * 2288685679f7Sakolb * Negative rqtime check is needed here to avoid infinite 2289685679f7Sakolb * stealing delays caused by unlikely but not impossible 2290685679f7Sakolb * drifts between CPU times on different CPUs. 2291685679f7Sakolb */ 2292685679f7Sakolb if (rqtime > nosteal || rqtime < 0) 2293685679f7Sakolb break; 2294685679f7Sakolb 2295685679f7Sakolb DTRACE_PROBE4(nosteal, kthread_t *, tp, 2296685679f7Sakolb cpu_t *, tcp, cpu_t *, cp, hrtime_t, rqtime); 2297685679f7Sakolb scalehrtime(&now); 2298685679f7Sakolb /* 2299685679f7Sakolb * Calculate when this thread becomes stealable 2300685679f7Sakolb */ 2301685679f7Sakolb now += (nosteal - rqtime); 2302685679f7Sakolb 2303685679f7Sakolb /* 2304685679f7Sakolb * Calculate time when some thread becomes stealable 2305685679f7Sakolb */ 2306685679f7Sakolb if (now < dp->disp_steal) 2307685679f7Sakolb dp->disp_steal = now; 23087c478bd9Sstevel@tonic-gate } 23097c478bd9Sstevel@tonic-gate 23107c478bd9Sstevel@tonic-gate /* 23117c478bd9Sstevel@tonic-gate * If there were no unbound threads on this queue, find the queue 2312685679f7Sakolb * where they are and then return later. The value of 2313685679f7Sakolb * disp_max_unbound_pri is not always accurate because it isn't 2314685679f7Sakolb * reduced until another idle CPU looks for work. 23157c478bd9Sstevel@tonic-gate */ 2316685679f7Sakolb if (allbound) 23177c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(dp, pri); 2318685679f7Sakolb 2319685679f7Sakolb /* 2320685679f7Sakolb * If we reached the end of the queue and found no unbound threads 2321685679f7Sakolb * then return NULL so that other CPUs will be considered. If there 2322685679f7Sakolb * are unbound threads but they cannot yet be stolen, then 2323685679f7Sakolb * return T_DONTSTEAL and try again later. 2324685679f7Sakolb */ 2325685679f7Sakolb if (tp == NULL) { 23267c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 2327685679f7Sakolb return (allbound ? NULL : T_DONTSTEAL); 23287c478bd9Sstevel@tonic-gate } 23297c478bd9Sstevel@tonic-gate 23307c478bd9Sstevel@tonic-gate /* 23317c478bd9Sstevel@tonic-gate * Found a runnable, unbound thread, so remove it from queue. 23327c478bd9Sstevel@tonic-gate * dispdeq() requires that we have the thread locked, and we do, 23337c478bd9Sstevel@tonic-gate * by virtue of holding the dispatch queue lock. dispdeq() will 23347c478bd9Sstevel@tonic-gate * put the thread in transition state, thereby dropping the dispq 23357c478bd9Sstevel@tonic-gate * lock. 23367c478bd9Sstevel@tonic-gate */ 2337685679f7Sakolb 23387c478bd9Sstevel@tonic-gate #ifdef DEBUG 23397c478bd9Sstevel@tonic-gate { 23407c478bd9Sstevel@tonic-gate int thread_was_on_queue; 23417c478bd9Sstevel@tonic-gate 23427c478bd9Sstevel@tonic-gate thread_was_on_queue = dispdeq(tp); /* drops disp_lock */ 23437c478bd9Sstevel@tonic-gate ASSERT(thread_was_on_queue); 23447c478bd9Sstevel@tonic-gate } 2345685679f7Sakolb 23467c478bd9Sstevel@tonic-gate #else /* DEBUG */ 23477c478bd9Sstevel@tonic-gate (void) dispdeq(tp); /* drops disp_lock */ 23487c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 23497c478bd9Sstevel@tonic-gate 2350685679f7Sakolb /* 2351685679f7Sakolb * Reset the disp_queue steal time - we do not know what is the smallest 2352685679f7Sakolb * value across the queue is. 2353685679f7Sakolb */ 2354685679f7Sakolb dp->disp_steal = 0; 2355685679f7Sakolb 23567c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 23577c478bd9Sstevel@tonic-gate 23587c478bd9Sstevel@tonic-gate /* 23597c478bd9Sstevel@tonic-gate * Setup thread to run on the current CPU. 23607c478bd9Sstevel@tonic-gate */ 23617c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 23627c478bd9Sstevel@tonic-gate 23637c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; /* protected by spl only */ 23647c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = pri; 23650f500aa6Sbpramod 23660f500aa6Sbpramod /* 23670f500aa6Sbpramod * There can be a memory synchronization race between disp_getbest() 23680f500aa6Sbpramod * and disp_ratify() vs cpu_resched() where cpu_resched() is trying 23690f500aa6Sbpramod * to preempt the current thread to run the enqueued thread while 23700f500aa6Sbpramod * disp_getbest() and disp_ratify() are changing the current thread 23710f500aa6Sbpramod * to the stolen thread. This may lead to a situation where 23720f500aa6Sbpramod * cpu_resched() tries to preempt the wrong thread and the 23730f500aa6Sbpramod * stolen thread continues to run on the CPU which has been tagged 23740f500aa6Sbpramod * for preemption. 23750f500aa6Sbpramod * Later the clock thread gets enqueued but doesn't get to run on the 23760f500aa6Sbpramod * CPU causing the system to hang. 23770f500aa6Sbpramod * 23780f500aa6Sbpramod * To avoid this, grabbing and dropping the disp_lock (which does 23790f500aa6Sbpramod * a memory barrier) is needed to synchronize the execution of 23800f500aa6Sbpramod * cpu_resched() with disp_getbest() and disp_ratify() and 23810f500aa6Sbpramod * synchronize the memory read and written by cpu_resched(), 23820f500aa6Sbpramod * disp_getbest(), and disp_ratify() with each other. 23830f500aa6Sbpramod * (see CR#6482861 for more details). 23840f500aa6Sbpramod */ 23850f500aa6Sbpramod disp_lock_enter_high(&cp->cpu_disp->disp_lock); 23860f500aa6Sbpramod disp_lock_exit_high(&cp->cpu_disp->disp_lock); 23870f500aa6Sbpramod 23887c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 23897c478bd9Sstevel@tonic-gate 2390685679f7Sakolb DTRACE_PROBE3(steal, kthread_t *, tp, cpu_t *, tcp, cpu_t *, cp); 2391685679f7Sakolb 23927c478bd9Sstevel@tonic-gate thread_onproc(tp, cp); /* set t_state to TS_ONPROC */ 23937c478bd9Sstevel@tonic-gate 23947c478bd9Sstevel@tonic-gate /* 23957c478bd9Sstevel@tonic-gate * Return with spl high so that swtch() won't need to raise it. 23967c478bd9Sstevel@tonic-gate * The disp_lock was dropped by dispdeq(). 23977c478bd9Sstevel@tonic-gate */ 23987c478bd9Sstevel@tonic-gate 23997c478bd9Sstevel@tonic-gate return (tp); 24007c478bd9Sstevel@tonic-gate } 24017c478bd9Sstevel@tonic-gate 24027c478bd9Sstevel@tonic-gate /* 24037c478bd9Sstevel@tonic-gate * disp_bound_common() - common routine for higher level functions 24047c478bd9Sstevel@tonic-gate * that check for bound threads under certain conditions. 24057c478bd9Sstevel@tonic-gate * If 'threadlistsafe' is set then there is no need to acquire 24067c478bd9Sstevel@tonic-gate * pidlock to stop the thread list from changing (eg, if 24077c478bd9Sstevel@tonic-gate * disp_bound_* is called with cpus paused). 24087c478bd9Sstevel@tonic-gate */ 24097c478bd9Sstevel@tonic-gate static int 24107c478bd9Sstevel@tonic-gate disp_bound_common(cpu_t *cp, int threadlistsafe, int flag) 24117c478bd9Sstevel@tonic-gate { 24127c478bd9Sstevel@tonic-gate int found = 0; 24137c478bd9Sstevel@tonic-gate kthread_t *tp; 24147c478bd9Sstevel@tonic-gate 24157c478bd9Sstevel@tonic-gate ASSERT(flag); 24167c478bd9Sstevel@tonic-gate 24177c478bd9Sstevel@tonic-gate if (!threadlistsafe) 24187c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 24197c478bd9Sstevel@tonic-gate tp = curthread; /* faster than allthreads */ 24207c478bd9Sstevel@tonic-gate do { 24217c478bd9Sstevel@tonic-gate if (tp->t_state != TS_FREE) { 24227c478bd9Sstevel@tonic-gate /* 24237c478bd9Sstevel@tonic-gate * If an interrupt thread is busy, but the 24247c478bd9Sstevel@tonic-gate * caller doesn't care (i.e. BOUND_INTR is off), 24257c478bd9Sstevel@tonic-gate * then just ignore it and continue through. 24267c478bd9Sstevel@tonic-gate */ 24277c478bd9Sstevel@tonic-gate if ((tp->t_flag & T_INTR_THREAD) && 24287c478bd9Sstevel@tonic-gate !(flag & BOUND_INTR)) 24297c478bd9Sstevel@tonic-gate continue; 24307c478bd9Sstevel@tonic-gate 24317c478bd9Sstevel@tonic-gate /* 24327c478bd9Sstevel@tonic-gate * Skip the idle thread for the CPU 24337c478bd9Sstevel@tonic-gate * we're about to set offline. 24347c478bd9Sstevel@tonic-gate */ 24357c478bd9Sstevel@tonic-gate if (tp == cp->cpu_idle_thread) 24367c478bd9Sstevel@tonic-gate continue; 24377c478bd9Sstevel@tonic-gate 24387c478bd9Sstevel@tonic-gate /* 24397c478bd9Sstevel@tonic-gate * Skip the pause thread for the CPU 24407c478bd9Sstevel@tonic-gate * we're about to set offline. 24417c478bd9Sstevel@tonic-gate */ 24427c478bd9Sstevel@tonic-gate if (tp == cp->cpu_pause_thread) 24437c478bd9Sstevel@tonic-gate continue; 24447c478bd9Sstevel@tonic-gate 24457c478bd9Sstevel@tonic-gate if ((flag & BOUND_CPU) && 24467c478bd9Sstevel@tonic-gate (tp->t_bound_cpu == cp || 24477c478bd9Sstevel@tonic-gate tp->t_bind_cpu == cp->cpu_id || 24487c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == cp)) { 24497c478bd9Sstevel@tonic-gate found = 1; 24507c478bd9Sstevel@tonic-gate break; 24517c478bd9Sstevel@tonic-gate } 24527c478bd9Sstevel@tonic-gate 24537c478bd9Sstevel@tonic-gate if ((flag & BOUND_PARTITION) && 24547c478bd9Sstevel@tonic-gate (tp->t_cpupart == cp->cpu_part)) { 24557c478bd9Sstevel@tonic-gate found = 1; 24567c478bd9Sstevel@tonic-gate break; 24577c478bd9Sstevel@tonic-gate } 24587c478bd9Sstevel@tonic-gate } 24597c478bd9Sstevel@tonic-gate } while ((tp = tp->t_next) != curthread && found == 0); 24607c478bd9Sstevel@tonic-gate if (!threadlistsafe) 24617c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 24627c478bd9Sstevel@tonic-gate return (found); 24637c478bd9Sstevel@tonic-gate } 24647c478bd9Sstevel@tonic-gate 24657c478bd9Sstevel@tonic-gate /* 24667c478bd9Sstevel@tonic-gate * disp_bound_threads - return nonzero if threads are bound to the processor. 24677c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 24687c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 24697c478bd9Sstevel@tonic-gate */ 24707c478bd9Sstevel@tonic-gate int 24717c478bd9Sstevel@tonic-gate disp_bound_threads(cpu_t *cp, int threadlistsafe) 24727c478bd9Sstevel@tonic-gate { 24737c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU)); 24747c478bd9Sstevel@tonic-gate } 24757c478bd9Sstevel@tonic-gate 24767c478bd9Sstevel@tonic-gate /* 24777c478bd9Sstevel@tonic-gate * disp_bound_anythreads - return nonzero if _any_ threads are bound 24787c478bd9Sstevel@tonic-gate * to the given processor, including interrupt threads. 24797c478bd9Sstevel@tonic-gate */ 24807c478bd9Sstevel@tonic-gate int 24817c478bd9Sstevel@tonic-gate disp_bound_anythreads(cpu_t *cp, int threadlistsafe) 24827c478bd9Sstevel@tonic-gate { 24837c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU | BOUND_INTR)); 24847c478bd9Sstevel@tonic-gate } 24857c478bd9Sstevel@tonic-gate 24867c478bd9Sstevel@tonic-gate /* 24877c478bd9Sstevel@tonic-gate * disp_bound_partition - return nonzero if threads are bound to the same 24887c478bd9Sstevel@tonic-gate * partition as the processor. 24897c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 24907c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 24917c478bd9Sstevel@tonic-gate */ 24927c478bd9Sstevel@tonic-gate int 24937c478bd9Sstevel@tonic-gate disp_bound_partition(cpu_t *cp, int threadlistsafe) 24947c478bd9Sstevel@tonic-gate { 24957c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_PARTITION)); 24967c478bd9Sstevel@tonic-gate } 24977c478bd9Sstevel@tonic-gate 24987c478bd9Sstevel@tonic-gate /* 24997c478bd9Sstevel@tonic-gate * disp_cpu_inactive - make a CPU inactive by moving all of its unbound 25007c478bd9Sstevel@tonic-gate * threads to other CPUs. 25017c478bd9Sstevel@tonic-gate */ 25027c478bd9Sstevel@tonic-gate void 25037c478bd9Sstevel@tonic-gate disp_cpu_inactive(cpu_t *cp) 25047c478bd9Sstevel@tonic-gate { 25057c478bd9Sstevel@tonic-gate kthread_t *tp; 25067c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 25077c478bd9Sstevel@tonic-gate dispq_t *dq; 25087c478bd9Sstevel@tonic-gate pri_t pri; 25097c478bd9Sstevel@tonic-gate int wasonq; 25107c478bd9Sstevel@tonic-gate 25117c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 25127c478bd9Sstevel@tonic-gate while ((pri = dp->disp_max_unbound_pri) != -1) { 25137c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 25147c478bd9Sstevel@tonic-gate tp = dq->dq_first; 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate /* 25177c478bd9Sstevel@tonic-gate * Skip over bound threads. 25187c478bd9Sstevel@tonic-gate */ 25197c478bd9Sstevel@tonic-gate while (tp != NULL && tp->t_bound_cpu != NULL) { 25207c478bd9Sstevel@tonic-gate tp = tp->t_link; 25217c478bd9Sstevel@tonic-gate } 25227c478bd9Sstevel@tonic-gate 25237c478bd9Sstevel@tonic-gate if (tp == NULL) { 25247c478bd9Sstevel@tonic-gate /* disp_max_unbound_pri must be inaccurate, so fix it */ 25257c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(dp, pri); 25267c478bd9Sstevel@tonic-gate continue; 25277c478bd9Sstevel@tonic-gate } 25287c478bd9Sstevel@tonic-gate 25297c478bd9Sstevel@tonic-gate wasonq = dispdeq(tp); /* drops disp_lock */ 25307c478bd9Sstevel@tonic-gate ASSERT(wasonq); 25317c478bd9Sstevel@tonic-gate ASSERT(tp->t_weakbound_cpu == NULL); 25327c478bd9Sstevel@tonic-gate 25337c478bd9Sstevel@tonic-gate setbackdq(tp); 25347c478bd9Sstevel@tonic-gate /* 25357c478bd9Sstevel@tonic-gate * Called from cpu_offline: 25367c478bd9Sstevel@tonic-gate * 25377c478bd9Sstevel@tonic-gate * cp has already been removed from the list of active cpus 25387c478bd9Sstevel@tonic-gate * and tp->t_cpu has been changed so there is no risk of 25397c478bd9Sstevel@tonic-gate * tp ending up back on cp. 25407c478bd9Sstevel@tonic-gate * 25417c478bd9Sstevel@tonic-gate * Called from cpupart_move_cpu: 25427c478bd9Sstevel@tonic-gate * 25437c478bd9Sstevel@tonic-gate * The cpu has moved to a new cpupart. Any threads that 25447c478bd9Sstevel@tonic-gate * were on it's dispatch queues before the move remain 25457c478bd9Sstevel@tonic-gate * in the old partition and can't run in the new partition. 25467c478bd9Sstevel@tonic-gate */ 25477c478bd9Sstevel@tonic-gate ASSERT(tp->t_cpu != cp); 25487c478bd9Sstevel@tonic-gate thread_unlock(tp); 25497c478bd9Sstevel@tonic-gate 25507c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 25517c478bd9Sstevel@tonic-gate } 25527c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 25537c478bd9Sstevel@tonic-gate } 25547c478bd9Sstevel@tonic-gate 25557c478bd9Sstevel@tonic-gate /* 25567c478bd9Sstevel@tonic-gate * disp_lowpri_cpu - find CPU running the lowest priority thread. 25577c478bd9Sstevel@tonic-gate * The hint passed in is used as a starting point so we don't favor 25587c478bd9Sstevel@tonic-gate * CPU 0 or any other CPU. The caller should pass in the most recently 25597c478bd9Sstevel@tonic-gate * used CPU for the thread. 25607c478bd9Sstevel@tonic-gate * 25617c478bd9Sstevel@tonic-gate * The lgroup and priority are used to determine the best CPU to run on 25627c478bd9Sstevel@tonic-gate * in a NUMA machine. The lgroup specifies which CPUs are closest while 25637c478bd9Sstevel@tonic-gate * the thread priority will indicate whether the thread will actually run 25647c478bd9Sstevel@tonic-gate * there. To pick the best CPU, the CPUs inside and outside of the given 25657c478bd9Sstevel@tonic-gate * lgroup which are running the lowest priority threads are found. The 25667c478bd9Sstevel@tonic-gate * remote CPU is chosen only if the thread will not run locally on a CPU 25677c478bd9Sstevel@tonic-gate * within the lgroup, but will run on the remote CPU. If the thread 25687c478bd9Sstevel@tonic-gate * cannot immediately run on any CPU, the best local CPU will be chosen. 25697c478bd9Sstevel@tonic-gate * 25707c478bd9Sstevel@tonic-gate * The lpl specified also identifies the cpu partition from which 25717c478bd9Sstevel@tonic-gate * disp_lowpri_cpu should select a CPU. 25727c478bd9Sstevel@tonic-gate * 25737c478bd9Sstevel@tonic-gate * curcpu is used to indicate that disp_lowpri_cpu is being called on 25747c478bd9Sstevel@tonic-gate * behalf of the current thread. (curthread is looking for a new cpu) 25757c478bd9Sstevel@tonic-gate * In this case, cpu_dispatch_pri for this thread's cpu should be 25767c478bd9Sstevel@tonic-gate * ignored. 25777c478bd9Sstevel@tonic-gate * 25787c478bd9Sstevel@tonic-gate * If a cpu is the target of an offline request then try to avoid it. 25797c478bd9Sstevel@tonic-gate * 25807c478bd9Sstevel@tonic-gate * This function must be called at either high SPL, or with preemption 25817c478bd9Sstevel@tonic-gate * disabled, so that the "hint" CPU cannot be removed from the online 25827c478bd9Sstevel@tonic-gate * CPU list while we are traversing it. 25837c478bd9Sstevel@tonic-gate */ 25847c478bd9Sstevel@tonic-gate cpu_t * 25857c478bd9Sstevel@tonic-gate disp_lowpri_cpu(cpu_t *hint, lpl_t *lpl, pri_t tpri, cpu_t *curcpu) 25867c478bd9Sstevel@tonic-gate { 25877c478bd9Sstevel@tonic-gate cpu_t *bestcpu; 25887c478bd9Sstevel@tonic-gate cpu_t *besthomecpu; 25897c478bd9Sstevel@tonic-gate cpu_t *cp, *cpstart; 25907c478bd9Sstevel@tonic-gate 25917c478bd9Sstevel@tonic-gate pri_t bestpri; 25927c478bd9Sstevel@tonic-gate pri_t cpupri; 25937c478bd9Sstevel@tonic-gate 25947c478bd9Sstevel@tonic-gate klgrpset_t done; 25957c478bd9Sstevel@tonic-gate klgrpset_t cur_set; 25967c478bd9Sstevel@tonic-gate 25977c478bd9Sstevel@tonic-gate lpl_t *lpl_iter, *lpl_leaf; 25987c478bd9Sstevel@tonic-gate int i; 25997c478bd9Sstevel@tonic-gate 26007c478bd9Sstevel@tonic-gate /* 26017c478bd9Sstevel@tonic-gate * Scan for a CPU currently running the lowest priority thread. 26027c478bd9Sstevel@tonic-gate * Cannot get cpu_lock here because it is adaptive. 26037c478bd9Sstevel@tonic-gate * We do not require lock on CPU list. 26047c478bd9Sstevel@tonic-gate */ 26057c478bd9Sstevel@tonic-gate ASSERT(hint != NULL); 26067c478bd9Sstevel@tonic-gate ASSERT(lpl != NULL); 26077c478bd9Sstevel@tonic-gate ASSERT(lpl->lpl_ncpu > 0); 26087c478bd9Sstevel@tonic-gate 26097c478bd9Sstevel@tonic-gate /* 26107c478bd9Sstevel@tonic-gate * First examine local CPUs. Note that it's possible the hint CPU 26117c478bd9Sstevel@tonic-gate * passed in in remote to the specified home lgroup. If our priority 26127c478bd9Sstevel@tonic-gate * isn't sufficient enough such that we can run immediately at home, 26137c478bd9Sstevel@tonic-gate * then examine CPUs remote to our home lgroup. 26147c478bd9Sstevel@tonic-gate * We would like to give preference to CPUs closest to "home". 26157c478bd9Sstevel@tonic-gate * If we can't find a CPU where we'll run at a given level 26167c478bd9Sstevel@tonic-gate * of locality, we expand our search to include the next level. 26177c478bd9Sstevel@tonic-gate */ 26187c478bd9Sstevel@tonic-gate bestcpu = besthomecpu = NULL; 26197c478bd9Sstevel@tonic-gate klgrpset_clear(done); 26207c478bd9Sstevel@tonic-gate /* start with lpl we were passed */ 26217c478bd9Sstevel@tonic-gate 26227c478bd9Sstevel@tonic-gate lpl_iter = lpl; 26237c478bd9Sstevel@tonic-gate 26247c478bd9Sstevel@tonic-gate do { 26257c478bd9Sstevel@tonic-gate 26267c478bd9Sstevel@tonic-gate bestpri = SHRT_MAX; 26277c478bd9Sstevel@tonic-gate klgrpset_clear(cur_set); 26287c478bd9Sstevel@tonic-gate 26297c478bd9Sstevel@tonic-gate for (i = 0; i < lpl_iter->lpl_nrset; i++) { 26307c478bd9Sstevel@tonic-gate lpl_leaf = lpl_iter->lpl_rset[i]; 26317c478bd9Sstevel@tonic-gate if (klgrpset_ismember(done, lpl_leaf->lpl_lgrpid)) 26327c478bd9Sstevel@tonic-gate continue; 26337c478bd9Sstevel@tonic-gate 26347c478bd9Sstevel@tonic-gate klgrpset_add(cur_set, lpl_leaf->lpl_lgrpid); 26357c478bd9Sstevel@tonic-gate 26367c478bd9Sstevel@tonic-gate if (hint->cpu_lpl == lpl_leaf) 26377c478bd9Sstevel@tonic-gate cp = cpstart = hint; 26387c478bd9Sstevel@tonic-gate else 26397c478bd9Sstevel@tonic-gate cp = cpstart = lpl_leaf->lpl_cpus; 26407c478bd9Sstevel@tonic-gate 26417c478bd9Sstevel@tonic-gate do { 26427c478bd9Sstevel@tonic-gate if (cp == curcpu) 26437c478bd9Sstevel@tonic-gate cpupri = -1; 26447c478bd9Sstevel@tonic-gate else if (cp == cpu_inmotion) 26457c478bd9Sstevel@tonic-gate cpupri = SHRT_MAX; 26467c478bd9Sstevel@tonic-gate else 26477c478bd9Sstevel@tonic-gate cpupri = cp->cpu_dispatch_pri; 26487c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_maxrunpri > cpupri) 26497c478bd9Sstevel@tonic-gate cpupri = cp->cpu_disp->disp_maxrunpri; 26507c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level > cpupri) 26517c478bd9Sstevel@tonic-gate cpupri = cp->cpu_chosen_level; 26527c478bd9Sstevel@tonic-gate if (cpupri < bestpri) { 26537c478bd9Sstevel@tonic-gate if (CPU_IDLING(cpupri)) { 26547c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & 26557c478bd9Sstevel@tonic-gate CPU_QUIESCED) == 0); 26567c478bd9Sstevel@tonic-gate return (cp); 26577c478bd9Sstevel@tonic-gate } 26587c478bd9Sstevel@tonic-gate bestcpu = cp; 26597c478bd9Sstevel@tonic-gate bestpri = cpupri; 26607c478bd9Sstevel@tonic-gate } 26617c478bd9Sstevel@tonic-gate } while ((cp = cp->cpu_next_lpl) != cpstart); 26627c478bd9Sstevel@tonic-gate } 26637c478bd9Sstevel@tonic-gate 26647c478bd9Sstevel@tonic-gate if (bestcpu && (tpri > bestpri)) { 26657c478bd9Sstevel@tonic-gate ASSERT((bestcpu->cpu_flags & CPU_QUIESCED) == 0); 26667c478bd9Sstevel@tonic-gate return (bestcpu); 26677c478bd9Sstevel@tonic-gate } 26687c478bd9Sstevel@tonic-gate if (besthomecpu == NULL) 26697c478bd9Sstevel@tonic-gate besthomecpu = bestcpu; 26707c478bd9Sstevel@tonic-gate /* 26717c478bd9Sstevel@tonic-gate * Add the lgrps we just considered to the "done" set 26727c478bd9Sstevel@tonic-gate */ 26737c478bd9Sstevel@tonic-gate klgrpset_or(done, cur_set); 26747c478bd9Sstevel@tonic-gate 26757c478bd9Sstevel@tonic-gate } while ((lpl_iter = lpl_iter->lpl_parent) != NULL); 26767c478bd9Sstevel@tonic-gate 26777c478bd9Sstevel@tonic-gate /* 26787c478bd9Sstevel@tonic-gate * The specified priority isn't high enough to run immediately 26797c478bd9Sstevel@tonic-gate * anywhere, so just return the best CPU from the home lgroup. 26807c478bd9Sstevel@tonic-gate */ 26817c478bd9Sstevel@tonic-gate ASSERT((besthomecpu->cpu_flags & CPU_QUIESCED) == 0); 26827c478bd9Sstevel@tonic-gate return (besthomecpu); 26837c478bd9Sstevel@tonic-gate } 26847c478bd9Sstevel@tonic-gate 26857c478bd9Sstevel@tonic-gate /* 26867c478bd9Sstevel@tonic-gate * This routine provides the generic idle cpu function for all processors. 26877c478bd9Sstevel@tonic-gate * If a processor has some specific code to execute when idle (say, to stop 26887c478bd9Sstevel@tonic-gate * the pipeline and save power) then that routine should be defined in the 26897c478bd9Sstevel@tonic-gate * processors specific code (module_xx.c) and the global variable idle_cpu 26907c478bd9Sstevel@tonic-gate * set to that function. 26917c478bd9Sstevel@tonic-gate */ 26927c478bd9Sstevel@tonic-gate static void 26937c478bd9Sstevel@tonic-gate generic_idle_cpu(void) 26947c478bd9Sstevel@tonic-gate { 26957c478bd9Sstevel@tonic-gate } 26967c478bd9Sstevel@tonic-gate 26977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26987c478bd9Sstevel@tonic-gate static void 26997c478bd9Sstevel@tonic-gate generic_enq_thread(cpu_t *cpu, int bound) 27007c478bd9Sstevel@tonic-gate { 27017c478bd9Sstevel@tonic-gate } 2702