1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright 2019 Joyent, Inc. 25 */ 26 27 /* 28 * Architecture-independent CPU control functions. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/param.h> 33 #include <sys/var.h> 34 #include <sys/thread.h> 35 #include <sys/cpuvar.h> 36 #include <sys/cpu_event.h> 37 #include <sys/kstat.h> 38 #include <sys/uadmin.h> 39 #include <sys/systm.h> 40 #include <sys/errno.h> 41 #include <sys/cmn_err.h> 42 #include <sys/procset.h> 43 #include <sys/processor.h> 44 #include <sys/debug.h> 45 #include <sys/cpupart.h> 46 #include <sys/lgrp.h> 47 #include <sys/pset.h> 48 #include <sys/pghw.h> 49 #include <sys/kmem.h> 50 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */ 51 #include <sys/atomic.h> 52 #include <sys/callb.h> 53 #include <sys/vtrace.h> 54 #include <sys/cyclic.h> 55 #include <sys/bitmap.h> 56 #include <sys/nvpair.h> 57 #include <sys/pool_pset.h> 58 #include <sys/msacct.h> 59 #include <sys/time.h> 60 #include <sys/archsystm.h> 61 #include <sys/sdt.h> 62 #if defined(__x86) || defined(__amd64) 63 #include <sys/x86_archext.h> 64 #endif 65 #include <sys/callo.h> 66 67 extern int mp_cpu_start(cpu_t *); 68 extern int mp_cpu_stop(cpu_t *); 69 extern int mp_cpu_poweron(cpu_t *); 70 extern int mp_cpu_poweroff(cpu_t *); 71 extern int mp_cpu_configure(int); 72 extern int mp_cpu_unconfigure(int); 73 extern void mp_cpu_faulted_enter(cpu_t *); 74 extern void mp_cpu_faulted_exit(cpu_t *); 75 76 extern int cmp_cpu_to_chip(processorid_t cpuid); 77 #ifdef __sparcv9 78 extern char *cpu_fru_fmri(cpu_t *cp); 79 #endif 80 81 static void cpu_add_active_internal(cpu_t *cp); 82 static void cpu_remove_active(cpu_t *cp); 83 static void cpu_info_kstat_create(cpu_t *cp); 84 static void cpu_info_kstat_destroy(cpu_t *cp); 85 static void cpu_stats_kstat_create(cpu_t *cp); 86 static void cpu_stats_kstat_destroy(cpu_t *cp); 87 88 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw); 89 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw); 90 static int cpu_stat_ks_update(kstat_t *ksp, int rw); 91 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t); 92 93 /* 94 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active, 95 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with 96 * respect to related locks is: 97 * 98 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock() 99 * 100 * Warning: Certain sections of code do not use the cpu_lock when 101 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since 102 * all cpus are paused during modifications to this list, a solution 103 * to protect the list is too either disable kernel preemption while 104 * walking the list, *or* recheck the cpu_next pointer at each 105 * iteration in the loop. Note that in no cases can any cached 106 * copies of the cpu pointers be kept as they may become invalid. 107 */ 108 kmutex_t cpu_lock; 109 cpu_t *cpu_list; /* list of all CPUs */ 110 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ 111 cpu_t *cpu_active; /* list of active CPUs */ 112 cpuset_t cpu_active_set; /* cached set of active CPUs */ 113 static cpuset_t cpu_available; /* set of available CPUs */ 114 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ 115 116 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */ 117 118 /* 119 * max_ncpus keeps the max cpus the system can have. Initially 120 * it's NCPU, but since most archs scan the devtree for cpus 121 * fairly early on during boot, the real max can be known before 122 * ncpus is set (useful for early NCPU based allocations). 123 */ 124 int max_ncpus = NCPU; 125 /* 126 * platforms that set max_ncpus to maxiumum number of cpus that can be 127 * dynamically added will set boot_max_ncpus to the number of cpus found 128 * at device tree scan time during boot. 129 */ 130 int boot_max_ncpus = -1; 131 int boot_ncpus = -1; 132 /* 133 * Maximum possible CPU id. This can never be >= NCPU since NCPU is 134 * used to size arrays that are indexed by CPU id. 135 */ 136 processorid_t max_cpuid = NCPU - 1; 137 138 /* 139 * Maximum cpu_seqid was given. This number can only grow and never shrink. It 140 * can be used to optimize NCPU loops to avoid going through CPUs which were 141 * never on-line. 142 */ 143 processorid_t max_cpu_seqid_ever = 0; 144 145 int ncpus = 1; 146 int ncpus_online = 1; 147 148 /* 149 * CPU that we're trying to offline. Protected by cpu_lock. 150 */ 151 cpu_t *cpu_inmotion; 152 153 /* 154 * Can be raised to suppress further weakbinding, which are instead 155 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock, 156 * while individual thread weakbinding synchronization is done under thread 157 * lock. 158 */ 159 int weakbindingbarrier; 160 161 /* 162 * Variables used in pause_cpus(). 163 */ 164 static volatile char safe_list[NCPU]; 165 166 static struct _cpu_pause_info { 167 int cp_spl; /* spl saved in pause_cpus() */ 168 volatile int cp_go; /* Go signal sent after all ready */ 169 int cp_count; /* # of CPUs to pause */ 170 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */ 171 kthread_id_t cp_paused; 172 void *(*cp_func)(void *); 173 } cpu_pause_info; 174 175 static kmutex_t pause_free_mutex; 176 static kcondvar_t pause_free_cv; 177 178 179 static struct cpu_sys_stats_ks_data { 180 kstat_named_t cpu_ticks_idle; 181 kstat_named_t cpu_ticks_user; 182 kstat_named_t cpu_ticks_kernel; 183 kstat_named_t cpu_ticks_wait; 184 kstat_named_t cpu_nsec_idle; 185 kstat_named_t cpu_nsec_user; 186 kstat_named_t cpu_nsec_kernel; 187 kstat_named_t cpu_nsec_dtrace; 188 kstat_named_t cpu_nsec_intr; 189 kstat_named_t cpu_load_intr; 190 kstat_named_t wait_ticks_io; 191 kstat_named_t dtrace_probes; 192 kstat_named_t bread; 193 kstat_named_t bwrite; 194 kstat_named_t lread; 195 kstat_named_t lwrite; 196 kstat_named_t phread; 197 kstat_named_t phwrite; 198 kstat_named_t pswitch; 199 kstat_named_t trap; 200 kstat_named_t intr; 201 kstat_named_t syscall; 202 kstat_named_t sysread; 203 kstat_named_t syswrite; 204 kstat_named_t sysfork; 205 kstat_named_t sysvfork; 206 kstat_named_t sysexec; 207 kstat_named_t readch; 208 kstat_named_t writech; 209 kstat_named_t rcvint; 210 kstat_named_t xmtint; 211 kstat_named_t mdmint; 212 kstat_named_t rawch; 213 kstat_named_t canch; 214 kstat_named_t outch; 215 kstat_named_t msg; 216 kstat_named_t sema; 217 kstat_named_t namei; 218 kstat_named_t ufsiget; 219 kstat_named_t ufsdirblk; 220 kstat_named_t ufsipage; 221 kstat_named_t ufsinopage; 222 kstat_named_t procovf; 223 kstat_named_t intrthread; 224 kstat_named_t intrblk; 225 kstat_named_t intrunpin; 226 kstat_named_t idlethread; 227 kstat_named_t inv_swtch; 228 kstat_named_t nthreads; 229 kstat_named_t cpumigrate; 230 kstat_named_t xcalls; 231 kstat_named_t mutex_adenters; 232 kstat_named_t rw_rdfails; 233 kstat_named_t rw_wrfails; 234 kstat_named_t modload; 235 kstat_named_t modunload; 236 kstat_named_t bawrite; 237 kstat_named_t iowait; 238 } cpu_sys_stats_ks_data_template = { 239 { "cpu_ticks_idle", KSTAT_DATA_UINT64 }, 240 { "cpu_ticks_user", KSTAT_DATA_UINT64 }, 241 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 }, 242 { "cpu_ticks_wait", KSTAT_DATA_UINT64 }, 243 { "cpu_nsec_idle", KSTAT_DATA_UINT64 }, 244 { "cpu_nsec_user", KSTAT_DATA_UINT64 }, 245 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 }, 246 { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 }, 247 { "cpu_nsec_intr", KSTAT_DATA_UINT64 }, 248 { "cpu_load_intr", KSTAT_DATA_UINT64 }, 249 { "wait_ticks_io", KSTAT_DATA_UINT64 }, 250 { "dtrace_probes", KSTAT_DATA_UINT64 }, 251 { "bread", KSTAT_DATA_UINT64 }, 252 { "bwrite", KSTAT_DATA_UINT64 }, 253 { "lread", KSTAT_DATA_UINT64 }, 254 { "lwrite", KSTAT_DATA_UINT64 }, 255 { "phread", KSTAT_DATA_UINT64 }, 256 { "phwrite", KSTAT_DATA_UINT64 }, 257 { "pswitch", KSTAT_DATA_UINT64 }, 258 { "trap", KSTAT_DATA_UINT64 }, 259 { "intr", KSTAT_DATA_UINT64 }, 260 { "syscall", KSTAT_DATA_UINT64 }, 261 { "sysread", KSTAT_DATA_UINT64 }, 262 { "syswrite", KSTAT_DATA_UINT64 }, 263 { "sysfork", KSTAT_DATA_UINT64 }, 264 { "sysvfork", KSTAT_DATA_UINT64 }, 265 { "sysexec", KSTAT_DATA_UINT64 }, 266 { "readch", KSTAT_DATA_UINT64 }, 267 { "writech", KSTAT_DATA_UINT64 }, 268 { "rcvint", KSTAT_DATA_UINT64 }, 269 { "xmtint", KSTAT_DATA_UINT64 }, 270 { "mdmint", KSTAT_DATA_UINT64 }, 271 { "rawch", KSTAT_DATA_UINT64 }, 272 { "canch", KSTAT_DATA_UINT64 }, 273 { "outch", KSTAT_DATA_UINT64 }, 274 { "msg", KSTAT_DATA_UINT64 }, 275 { "sema", KSTAT_DATA_UINT64 }, 276 { "namei", KSTAT_DATA_UINT64 }, 277 { "ufsiget", KSTAT_DATA_UINT64 }, 278 { "ufsdirblk", KSTAT_DATA_UINT64 }, 279 { "ufsipage", KSTAT_DATA_UINT64 }, 280 { "ufsinopage", KSTAT_DATA_UINT64 }, 281 { "procovf", KSTAT_DATA_UINT64 }, 282 { "intrthread", KSTAT_DATA_UINT64 }, 283 { "intrblk", KSTAT_DATA_UINT64 }, 284 { "intrunpin", KSTAT_DATA_UINT64 }, 285 { "idlethread", KSTAT_DATA_UINT64 }, 286 { "inv_swtch", KSTAT_DATA_UINT64 }, 287 { "nthreads", KSTAT_DATA_UINT64 }, 288 { "cpumigrate", KSTAT_DATA_UINT64 }, 289 { "xcalls", KSTAT_DATA_UINT64 }, 290 { "mutex_adenters", KSTAT_DATA_UINT64 }, 291 { "rw_rdfails", KSTAT_DATA_UINT64 }, 292 { "rw_wrfails", KSTAT_DATA_UINT64 }, 293 { "modload", KSTAT_DATA_UINT64 }, 294 { "modunload", KSTAT_DATA_UINT64 }, 295 { "bawrite", KSTAT_DATA_UINT64 }, 296 { "iowait", KSTAT_DATA_UINT64 }, 297 }; 298 299 static struct cpu_vm_stats_ks_data { 300 kstat_named_t pgrec; 301 kstat_named_t pgfrec; 302 kstat_named_t pgin; 303 kstat_named_t pgpgin; 304 kstat_named_t pgout; 305 kstat_named_t pgpgout; 306 kstat_named_t swapin; 307 kstat_named_t pgswapin; 308 kstat_named_t swapout; 309 kstat_named_t pgswapout; 310 kstat_named_t zfod; 311 kstat_named_t dfree; 312 kstat_named_t scan; 313 kstat_named_t rev; 314 kstat_named_t hat_fault; 315 kstat_named_t as_fault; 316 kstat_named_t maj_fault; 317 kstat_named_t cow_fault; 318 kstat_named_t prot_fault; 319 kstat_named_t softlock; 320 kstat_named_t kernel_asflt; 321 kstat_named_t pgrrun; 322 kstat_named_t execpgin; 323 kstat_named_t execpgout; 324 kstat_named_t execfree; 325 kstat_named_t anonpgin; 326 kstat_named_t anonpgout; 327 kstat_named_t anonfree; 328 kstat_named_t fspgin; 329 kstat_named_t fspgout; 330 kstat_named_t fsfree; 331 } cpu_vm_stats_ks_data_template = { 332 { "pgrec", KSTAT_DATA_UINT64 }, 333 { "pgfrec", KSTAT_DATA_UINT64 }, 334 { "pgin", KSTAT_DATA_UINT64 }, 335 { "pgpgin", KSTAT_DATA_UINT64 }, 336 { "pgout", KSTAT_DATA_UINT64 }, 337 { "pgpgout", KSTAT_DATA_UINT64 }, 338 { "swapin", KSTAT_DATA_UINT64 }, 339 { "pgswapin", KSTAT_DATA_UINT64 }, 340 { "swapout", KSTAT_DATA_UINT64 }, 341 { "pgswapout", KSTAT_DATA_UINT64 }, 342 { "zfod", KSTAT_DATA_UINT64 }, 343 { "dfree", KSTAT_DATA_UINT64 }, 344 { "scan", KSTAT_DATA_UINT64 }, 345 { "rev", KSTAT_DATA_UINT64 }, 346 { "hat_fault", KSTAT_DATA_UINT64 }, 347 { "as_fault", KSTAT_DATA_UINT64 }, 348 { "maj_fault", KSTAT_DATA_UINT64 }, 349 { "cow_fault", KSTAT_DATA_UINT64 }, 350 { "prot_fault", KSTAT_DATA_UINT64 }, 351 { "softlock", KSTAT_DATA_UINT64 }, 352 { "kernel_asflt", KSTAT_DATA_UINT64 }, 353 { "pgrrun", KSTAT_DATA_UINT64 }, 354 { "execpgin", KSTAT_DATA_UINT64 }, 355 { "execpgout", KSTAT_DATA_UINT64 }, 356 { "execfree", KSTAT_DATA_UINT64 }, 357 { "anonpgin", KSTAT_DATA_UINT64 }, 358 { "anonpgout", KSTAT_DATA_UINT64 }, 359 { "anonfree", KSTAT_DATA_UINT64 }, 360 { "fspgin", KSTAT_DATA_UINT64 }, 361 { "fspgout", KSTAT_DATA_UINT64 }, 362 { "fsfree", KSTAT_DATA_UINT64 }, 363 }; 364 365 /* 366 * Force the specified thread to migrate to the appropriate processor. 367 * Called with thread lock held, returns with it dropped. 368 */ 369 static void 370 force_thread_migrate(kthread_id_t tp) 371 { 372 ASSERT(THREAD_LOCK_HELD(tp)); 373 if (tp == curthread) { 374 THREAD_TRANSITION(tp); 375 CL_SETRUN(tp); 376 thread_unlock_nopreempt(tp); 377 swtch(); 378 } else { 379 if (tp->t_state == TS_ONPROC) { 380 cpu_surrender(tp); 381 } else if (tp->t_state == TS_RUN) { 382 (void) dispdeq(tp); 383 setbackdq(tp); 384 } 385 thread_unlock(tp); 386 } 387 } 388 389 /* 390 * Set affinity for a specified CPU. 391 * A reference count is incremented and the affinity is held until the 392 * reference count is decremented to zero by thread_affinity_clear(). 393 * This is so regions of code requiring affinity can be nested. 394 * Caller needs to ensure that cpu_id remains valid, which can be 395 * done by holding cpu_lock across this call, unless the caller 396 * specifies CPU_CURRENT in which case the cpu_lock will be acquired 397 * by thread_affinity_set and CPU->cpu_id will be the target CPU. 398 */ 399 void 400 thread_affinity_set(kthread_id_t t, int cpu_id) 401 { 402 cpu_t *cp; 403 int c; 404 405 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); 406 407 if ((c = cpu_id) == CPU_CURRENT) { 408 mutex_enter(&cpu_lock); 409 cpu_id = CPU->cpu_id; 410 } 411 /* 412 * We should be asserting that cpu_lock is held here, but 413 * the NCA code doesn't acquire it. The following assert 414 * should be uncommented when the NCA code is fixed. 415 * 416 * ASSERT(MUTEX_HELD(&cpu_lock)); 417 */ 418 ASSERT((cpu_id >= 0) && (cpu_id < NCPU)); 419 cp = cpu[cpu_id]; 420 ASSERT(cp != NULL); /* user must provide a good cpu_id */ 421 /* 422 * If there is already a hard affinity requested, and this affinity 423 * conflicts with that, panic. 424 */ 425 thread_lock(t); 426 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) { 427 panic("affinity_set: setting %p but already bound to %p", 428 (void *)cp, (void *)t->t_bound_cpu); 429 } 430 t->t_affinitycnt++; 431 t->t_bound_cpu = cp; 432 433 /* 434 * Make sure we're running on the right CPU. 435 */ 436 if (cp != t->t_cpu || t != curthread) { 437 force_thread_migrate(t); /* drops thread lock */ 438 } else { 439 thread_unlock(t); 440 } 441 442 if (c == CPU_CURRENT) 443 mutex_exit(&cpu_lock); 444 } 445 446 /* 447 * Wrapper for backward compatibility. 448 */ 449 void 450 affinity_set(int cpu_id) 451 { 452 thread_affinity_set(curthread, cpu_id); 453 } 454 455 /* 456 * Decrement the affinity reservation count and if it becomes zero, 457 * clear the CPU affinity for the current thread, or set it to the user's 458 * software binding request. 459 */ 460 void 461 thread_affinity_clear(kthread_id_t t) 462 { 463 register processorid_t binding; 464 465 thread_lock(t); 466 if (--t->t_affinitycnt == 0) { 467 if ((binding = t->t_bind_cpu) == PBIND_NONE) { 468 /* 469 * Adjust disp_max_unbound_pri if necessary. 470 */ 471 disp_adjust_unbound_pri(t); 472 t->t_bound_cpu = NULL; 473 if (t->t_cpu->cpu_part != t->t_cpupart) { 474 force_thread_migrate(t); 475 return; 476 } 477 } else { 478 t->t_bound_cpu = cpu[binding]; 479 /* 480 * Make sure the thread is running on the bound CPU. 481 */ 482 if (t->t_cpu != t->t_bound_cpu) { 483 force_thread_migrate(t); 484 return; /* already dropped lock */ 485 } 486 } 487 } 488 thread_unlock(t); 489 } 490 491 /* 492 * Wrapper for backward compatibility. 493 */ 494 void 495 affinity_clear(void) 496 { 497 thread_affinity_clear(curthread); 498 } 499 500 /* 501 * Weak cpu affinity. Bind to the "current" cpu for short periods 502 * of time during which the thread must not block (but may be preempted). 503 * Use this instead of kpreempt_disable() when it is only "no migration" 504 * rather than "no preemption" semantics that are required - disabling 505 * preemption holds higher priority threads off of cpu and if the 506 * operation that is protected is more than momentary this is not good 507 * for realtime etc. 508 * 509 * Weakly bound threads will not prevent a cpu from being offlined - 510 * we'll only run them on the cpu to which they are weakly bound but 511 * (because they do not block) we'll always be able to move them on to 512 * another cpu at offline time if we give them just a short moment to 513 * run during which they will unbind. To give a cpu a chance of offlining, 514 * however, we require a barrier to weak bindings that may be raised for a 515 * given cpu (offline/move code may set this and then wait a short time for 516 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier. 517 * 518 * There are few restrictions on the calling context of thread_nomigrate. 519 * The caller must not hold the thread lock. Calls may be nested. 520 * 521 * After weakbinding a thread must not perform actions that may block. 522 * In particular it must not call thread_affinity_set; calling that when 523 * already weakbound is nonsensical anyway. 524 * 525 * If curthread is prevented from migrating for other reasons 526 * (kernel preemption disabled; high pil; strongly bound; interrupt thread) 527 * then the weak binding will succeed even if this cpu is the target of an 528 * offline/move request. 529 */ 530 void 531 thread_nomigrate(void) 532 { 533 cpu_t *cp; 534 kthread_id_t t = curthread; 535 536 again: 537 kpreempt_disable(); 538 cp = CPU; 539 540 /* 541 * A highlevel interrupt must not modify t_nomigrate or 542 * t_weakbound_cpu of the thread it has interrupted. A lowlevel 543 * interrupt thread cannot migrate and we can avoid the 544 * thread_lock call below by short-circuiting here. In either 545 * case we can just return since no migration is possible and 546 * the condition will persist (ie, when we test for these again 547 * in thread_allowmigrate they can't have changed). Migration 548 * is also impossible if we're at or above DISP_LEVEL pil. 549 */ 550 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD || 551 getpil() >= DISP_LEVEL) { 552 kpreempt_enable(); 553 return; 554 } 555 556 /* 557 * We must be consistent with existing weak bindings. Since we 558 * may be interrupted between the increment of t_nomigrate and 559 * the store to t_weakbound_cpu below we cannot assume that 560 * t_weakbound_cpu will be set if t_nomigrate is. Note that we 561 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not 562 * always the case. 563 */ 564 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) { 565 if (!panicstr) 566 panic("thread_nomigrate: binding to %p but already " 567 "bound to %p", (void *)cp, 568 (void *)t->t_weakbound_cpu); 569 } 570 571 /* 572 * At this point we have preemption disabled and we don't yet hold 573 * the thread lock. So it's possible that somebody else could 574 * set t_bind_cpu here and not be able to force us across to the 575 * new cpu (since we have preemption disabled). 576 */ 577 thread_lock(curthread); 578 579 /* 580 * If further weak bindings are being (temporarily) suppressed then 581 * we'll settle for disabling kernel preemption (which assures 582 * no migration provided the thread does not block which it is 583 * not allowed to if using thread_nomigrate). We must remember 584 * this disposition so we can take appropriate action in 585 * thread_allowmigrate. If this is a nested call and the 586 * thread is already weakbound then fall through as normal. 587 * We remember the decision to settle for kpreempt_disable through 588 * negative nesting counting in t_nomigrate. Once a thread has had one 589 * weakbinding request satisfied in this way any further (nested) 590 * requests will continue to be satisfied in the same way, 591 * even if weak bindings have recommenced. 592 */ 593 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) { 594 --t->t_nomigrate; 595 thread_unlock(curthread); 596 return; /* with kpreempt_disable still active */ 597 } 598 599 /* 600 * We hold thread_lock so t_bind_cpu cannot change. We could, 601 * however, be running on a different cpu to which we are t_bound_cpu 602 * to (as explained above). If we grant the weak binding request 603 * in that case then the dispatcher must favour our weak binding 604 * over our strong (in which case, just as when preemption is 605 * disabled, we can continue to run on a cpu other than the one to 606 * which we are strongbound; the difference in this case is that 607 * this thread can be preempted and so can appear on the dispatch 608 * queues of a cpu other than the one it is strongbound to). 609 * 610 * If the cpu we are running on does not appear to be a current 611 * offline target (we check cpu_inmotion to determine this - since 612 * we don't hold cpu_lock we may not see a recent store to that, 613 * so it's possible that we at times can grant a weak binding to a 614 * cpu that is an offline target, but that one request will not 615 * prevent the offline from succeeding) then we will always grant 616 * the weak binding request. This includes the case above where 617 * we grant a weakbinding not commensurate with our strong binding. 618 * 619 * If our cpu does appear to be an offline target then we're inclined 620 * not to grant the weakbinding request just yet - we'd prefer to 621 * migrate to another cpu and grant the request there. The 622 * exceptions are those cases where going through preemption code 623 * will not result in us changing cpu: 624 * 625 * . interrupts have already bypassed this case (see above) 626 * . we are already weakbound to this cpu (dispatcher code will 627 * always return us to the weakbound cpu) 628 * . preemption was disabled even before we disabled it above 629 * . we are strongbound to this cpu (if we're strongbound to 630 * another and not yet running there the trip through the 631 * dispatcher will move us to the strongbound cpu and we 632 * will grant the weak binding there) 633 */ 634 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 || 635 t->t_bound_cpu == cp) { 636 /* 637 * Don't be tempted to store to t_weakbound_cpu only on 638 * the first nested bind request - if we're interrupted 639 * after the increment of t_nomigrate and before the 640 * store to t_weakbound_cpu and the interrupt calls 641 * thread_nomigrate then the assertion in thread_allowmigrate 642 * would fail. 643 */ 644 t->t_nomigrate++; 645 t->t_weakbound_cpu = cp; 646 membar_producer(); 647 thread_unlock(curthread); 648 /* 649 * Now that we have dropped the thread_lock another thread 650 * can set our t_weakbound_cpu, and will try to migrate us 651 * to the strongbound cpu (which will not be prevented by 652 * preemption being disabled since we're about to enable 653 * preemption). We have granted the weakbinding to the current 654 * cpu, so again we are in the position that is is is possible 655 * that our weak and strong bindings differ. Again this 656 * is catered for by dispatcher code which will favour our 657 * weak binding. 658 */ 659 kpreempt_enable(); 660 } else { 661 /* 662 * Move to another cpu before granting the request by 663 * forcing this thread through preemption code. When we 664 * get to set{front,back}dq called from CL_PREEMPT() 665 * cpu_choose() will be used to select a cpu to queue 666 * us on - that will see cpu_inmotion and take 667 * steps to avoid returning us to this cpu. 668 */ 669 cp->cpu_kprunrun = 1; 670 thread_unlock(curthread); 671 kpreempt_enable(); /* will call preempt() */ 672 goto again; 673 } 674 } 675 676 void 677 thread_allowmigrate(void) 678 { 679 kthread_id_t t = curthread; 680 681 ASSERT(t->t_weakbound_cpu == CPU || 682 (t->t_nomigrate < 0 && t->t_preempt > 0) || 683 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD || 684 getpil() >= DISP_LEVEL); 685 686 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) || 687 getpil() >= DISP_LEVEL) 688 return; 689 690 if (t->t_nomigrate < 0) { 691 /* 692 * This thread was granted "weak binding" in the 693 * stronger form of kernel preemption disabling. 694 * Undo a level of nesting for both t_nomigrate 695 * and t_preempt. 696 */ 697 ++t->t_nomigrate; 698 kpreempt_enable(); 699 } else if (--t->t_nomigrate == 0) { 700 /* 701 * Time to drop the weak binding. We need to cater 702 * for the case where we're weakbound to a different 703 * cpu than that to which we're strongbound (a very 704 * temporary arrangement that must only persist until 705 * weak binding drops). We don't acquire thread_lock 706 * here so even as this code executes t_bound_cpu 707 * may be changing. So we disable preemption and 708 * a) in the case that t_bound_cpu changes while we 709 * have preemption disabled kprunrun will be set 710 * asynchronously, and b) if before disabling 711 * preemption we were already on a different cpu to 712 * our t_bound_cpu then we set kprunrun ourselves 713 * to force a trip through the dispatcher when 714 * preemption is enabled. 715 */ 716 kpreempt_disable(); 717 if (t->t_bound_cpu && 718 t->t_weakbound_cpu != t->t_bound_cpu) 719 CPU->cpu_kprunrun = 1; 720 t->t_weakbound_cpu = NULL; 721 membar_producer(); 722 kpreempt_enable(); 723 } 724 } 725 726 /* 727 * weakbinding_stop can be used to temporarily cause weakbindings made 728 * with thread_nomigrate to be satisfied through the stronger action of 729 * kpreempt_disable. weakbinding_start recommences normal weakbinding. 730 */ 731 732 void 733 weakbinding_stop(void) 734 { 735 ASSERT(MUTEX_HELD(&cpu_lock)); 736 weakbindingbarrier = 1; 737 membar_producer(); /* make visible before subsequent thread_lock */ 738 } 739 740 void 741 weakbinding_start(void) 742 { 743 ASSERT(MUTEX_HELD(&cpu_lock)); 744 weakbindingbarrier = 0; 745 } 746 747 void 748 null_xcall(void) 749 { 750 } 751 752 /* 753 * This routine is called to place the CPUs in a safe place so that 754 * one of them can be taken off line or placed on line. What we are 755 * trying to do here is prevent a thread from traversing the list 756 * of active CPUs while we are changing it or from getting placed on 757 * the run queue of a CPU that has just gone off line. We do this by 758 * creating a thread with the highest possible prio for each CPU and 759 * having it call this routine. The advantage of this method is that 760 * we can eliminate all checks for CPU_ACTIVE in the disp routines. 761 * This makes disp faster at the expense of making p_online() slower 762 * which is a good trade off. 763 */ 764 static void 765 cpu_pause(int index) 766 { 767 int s; 768 struct _cpu_pause_info *cpi = &cpu_pause_info; 769 volatile char *safe = &safe_list[index]; 770 long lindex = index; 771 772 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE)); 773 774 while (*safe != PAUSE_DIE) { 775 *safe = PAUSE_READY; 776 membar_enter(); /* make sure stores are flushed */ 777 sema_v(&cpi->cp_sem); /* signal requesting thread */ 778 779 /* 780 * Wait here until all pause threads are running. That 781 * indicates that it's safe to do the spl. Until 782 * cpu_pause_info.cp_go is set, we don't want to spl 783 * because that might block clock interrupts needed 784 * to preempt threads on other CPUs. 785 */ 786 while (cpi->cp_go == 0) 787 ; 788 /* 789 * Even though we are at the highest disp prio, we need 790 * to block out all interrupts below LOCK_LEVEL so that 791 * an intr doesn't come in, wake up a thread, and call 792 * setbackdq/setfrontdq. 793 */ 794 s = splhigh(); 795 /* 796 * if cp_func has been set then call it using index as the 797 * argument, currently only used by cpr_suspend_cpus(). 798 * This function is used as the code to execute on the 799 * "paused" cpu's when a machine comes out of a sleep state 800 * and CPU's were powered off. (could also be used for 801 * hotplugging CPU's). 802 */ 803 if (cpi->cp_func != NULL) 804 (*cpi->cp_func)((void *)lindex); 805 806 mach_cpu_pause(safe); 807 808 splx(s); 809 /* 810 * Waiting is at an end. Switch out of cpu_pause 811 * loop and resume useful work. 812 */ 813 swtch(); 814 } 815 816 mutex_enter(&pause_free_mutex); 817 *safe = PAUSE_DEAD; 818 cv_broadcast(&pause_free_cv); 819 mutex_exit(&pause_free_mutex); 820 } 821 822 /* 823 * Allow the cpus to start running again. 824 */ 825 void 826 start_cpus() 827 { 828 int i; 829 830 ASSERT(MUTEX_HELD(&cpu_lock)); 831 ASSERT(cpu_pause_info.cp_paused); 832 cpu_pause_info.cp_paused = NULL; 833 for (i = 0; i < NCPU; i++) 834 safe_list[i] = PAUSE_IDLE; 835 membar_enter(); /* make sure stores are flushed */ 836 affinity_clear(); 837 splx(cpu_pause_info.cp_spl); 838 kpreempt_enable(); 839 } 840 841 /* 842 * Allocate a pause thread for a CPU. 843 */ 844 static void 845 cpu_pause_alloc(cpu_t *cp) 846 { 847 kthread_id_t t; 848 long cpun = cp->cpu_id; 849 850 /* 851 * Note, v.v_nglobpris will not change value as long as I hold 852 * cpu_lock. 853 */ 854 t = thread_create(NULL, 0, cpu_pause, (void *)cpun, 855 0, &p0, TS_STOPPED, v.v_nglobpris - 1); 856 thread_lock(t); 857 t->t_bound_cpu = cp; 858 t->t_disp_queue = cp->cpu_disp; 859 t->t_affinitycnt = 1; 860 t->t_preempt = 1; 861 thread_unlock(t); 862 cp->cpu_pause_thread = t; 863 /* 864 * Registering a thread in the callback table is usually done 865 * in the initialization code of the thread. In this 866 * case, we do it right after thread creation because the 867 * thread itself may never run, and we need to register the 868 * fact that it is safe for cpr suspend. 869 */ 870 CALLB_CPR_INIT_SAFE(t, "cpu_pause"); 871 } 872 873 /* 874 * Free a pause thread for a CPU. 875 */ 876 static void 877 cpu_pause_free(cpu_t *cp) 878 { 879 kthread_id_t t; 880 int cpun = cp->cpu_id; 881 882 ASSERT(MUTEX_HELD(&cpu_lock)); 883 /* 884 * We have to get the thread and tell it to die. 885 */ 886 if ((t = cp->cpu_pause_thread) == NULL) { 887 ASSERT(safe_list[cpun] == PAUSE_IDLE); 888 return; 889 } 890 thread_lock(t); 891 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */ 892 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */ 893 t->t_pri = v.v_nglobpris - 1; 894 ASSERT(safe_list[cpun] == PAUSE_IDLE); 895 safe_list[cpun] = PAUSE_DIE; 896 THREAD_TRANSITION(t); 897 setbackdq(t); 898 thread_unlock_nopreempt(t); 899 900 /* 901 * If we don't wait for the thread to actually die, it may try to 902 * run on the wrong cpu as part of an actual call to pause_cpus(). 903 */ 904 mutex_enter(&pause_free_mutex); 905 while (safe_list[cpun] != PAUSE_DEAD) { 906 cv_wait(&pause_free_cv, &pause_free_mutex); 907 } 908 mutex_exit(&pause_free_mutex); 909 safe_list[cpun] = PAUSE_IDLE; 910 911 cp->cpu_pause_thread = NULL; 912 } 913 914 /* 915 * Initialize basic structures for pausing CPUs. 916 */ 917 void 918 cpu_pause_init() 919 { 920 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL); 921 /* 922 * Create initial CPU pause thread. 923 */ 924 cpu_pause_alloc(CPU); 925 } 926 927 /* 928 * Start the threads used to pause another CPU. 929 */ 930 static int 931 cpu_pause_start(processorid_t cpu_id) 932 { 933 int i; 934 int cpu_count = 0; 935 936 for (i = 0; i < NCPU; i++) { 937 cpu_t *cp; 938 kthread_id_t t; 939 940 cp = cpu[i]; 941 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) { 942 safe_list[i] = PAUSE_WAIT; 943 continue; 944 } 945 946 /* 947 * Skip CPU if it is quiesced or not yet started. 948 */ 949 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) { 950 safe_list[i] = PAUSE_WAIT; 951 continue; 952 } 953 954 /* 955 * Start this CPU's pause thread. 956 */ 957 t = cp->cpu_pause_thread; 958 thread_lock(t); 959 /* 960 * Reset the priority, since nglobpris may have 961 * changed since the thread was created, if someone 962 * has loaded the RT (or some other) scheduling 963 * class. 964 */ 965 t->t_pri = v.v_nglobpris - 1; 966 THREAD_TRANSITION(t); 967 setbackdq(t); 968 thread_unlock_nopreempt(t); 969 ++cpu_count; 970 } 971 return (cpu_count); 972 } 973 974 975 /* 976 * Pause all of the CPUs except the one we are on by creating a high 977 * priority thread bound to those CPUs. 978 * 979 * Note that one must be extremely careful regarding code 980 * executed while CPUs are paused. Since a CPU may be paused 981 * while a thread scheduling on that CPU is holding an adaptive 982 * lock, code executed with CPUs paused must not acquire adaptive 983 * (or low-level spin) locks. Also, such code must not block, 984 * since the thread that is supposed to initiate the wakeup may 985 * never run. 986 * 987 * With a few exceptions, the restrictions on code executed with CPUs 988 * paused match those for code executed at high-level interrupt 989 * context. 990 */ 991 void 992 pause_cpus(cpu_t *off_cp, void *(*func)(void *)) 993 { 994 processorid_t cpu_id; 995 int i; 996 struct _cpu_pause_info *cpi = &cpu_pause_info; 997 998 ASSERT(MUTEX_HELD(&cpu_lock)); 999 ASSERT(cpi->cp_paused == NULL); 1000 cpi->cp_count = 0; 1001 cpi->cp_go = 0; 1002 for (i = 0; i < NCPU; i++) 1003 safe_list[i] = PAUSE_IDLE; 1004 kpreempt_disable(); 1005 1006 cpi->cp_func = func; 1007 1008 /* 1009 * If running on the cpu that is going offline, get off it. 1010 * This is so that it won't be necessary to rechoose a CPU 1011 * when done. 1012 */ 1013 if (CPU == off_cp) 1014 cpu_id = off_cp->cpu_next_part->cpu_id; 1015 else 1016 cpu_id = CPU->cpu_id; 1017 affinity_set(cpu_id); 1018 1019 /* 1020 * Start the pause threads and record how many were started 1021 */ 1022 cpi->cp_count = cpu_pause_start(cpu_id); 1023 1024 /* 1025 * Now wait for all CPUs to be running the pause thread. 1026 */ 1027 while (cpi->cp_count > 0) { 1028 /* 1029 * Spin reading the count without grabbing the disp 1030 * lock to make sure we don't prevent the pause 1031 * threads from getting the lock. 1032 */ 1033 while (sema_held(&cpi->cp_sem)) 1034 ; 1035 if (sema_tryp(&cpi->cp_sem)) 1036 --cpi->cp_count; 1037 } 1038 cpi->cp_go = 1; /* all have reached cpu_pause */ 1039 1040 /* 1041 * Now wait for all CPUs to spl. (Transition from PAUSE_READY 1042 * to PAUSE_WAIT.) 1043 */ 1044 for (i = 0; i < NCPU; i++) { 1045 while (safe_list[i] != PAUSE_WAIT) 1046 ; 1047 } 1048 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */ 1049 cpi->cp_paused = curthread; 1050 } 1051 1052 /* 1053 * Check whether the current thread has CPUs paused 1054 */ 1055 int 1056 cpus_paused(void) 1057 { 1058 if (cpu_pause_info.cp_paused != NULL) { 1059 ASSERT(cpu_pause_info.cp_paused == curthread); 1060 return (1); 1061 } 1062 return (0); 1063 } 1064 1065 static cpu_t * 1066 cpu_get_all(processorid_t cpun) 1067 { 1068 ASSERT(MUTEX_HELD(&cpu_lock)); 1069 1070 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun)) 1071 return (NULL); 1072 return (cpu[cpun]); 1073 } 1074 1075 /* 1076 * Check whether cpun is a valid processor id and whether it should be 1077 * visible from the current zone. If it is, return a pointer to the 1078 * associated CPU structure. 1079 */ 1080 cpu_t * 1081 cpu_get(processorid_t cpun) 1082 { 1083 cpu_t *c; 1084 1085 ASSERT(MUTEX_HELD(&cpu_lock)); 1086 c = cpu_get_all(cpun); 1087 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() && 1088 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c)) 1089 return (NULL); 1090 return (c); 1091 } 1092 1093 /* 1094 * The following functions should be used to check CPU states in the kernel. 1095 * They should be invoked with cpu_lock held. Kernel subsystems interested 1096 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc 1097 * states. Those are for user-land (and system call) use only. 1098 */ 1099 1100 /* 1101 * Determine whether the CPU is online and handling interrupts. 1102 */ 1103 int 1104 cpu_is_online(cpu_t *cpu) 1105 { 1106 ASSERT(MUTEX_HELD(&cpu_lock)); 1107 return (cpu_flagged_online(cpu->cpu_flags)); 1108 } 1109 1110 /* 1111 * Determine whether the CPU is offline (this includes spare and faulted). 1112 */ 1113 int 1114 cpu_is_offline(cpu_t *cpu) 1115 { 1116 ASSERT(MUTEX_HELD(&cpu_lock)); 1117 return (cpu_flagged_offline(cpu->cpu_flags)); 1118 } 1119 1120 /* 1121 * Determine whether the CPU is powered off. 1122 */ 1123 int 1124 cpu_is_poweredoff(cpu_t *cpu) 1125 { 1126 ASSERT(MUTEX_HELD(&cpu_lock)); 1127 return (cpu_flagged_poweredoff(cpu->cpu_flags)); 1128 } 1129 1130 /* 1131 * Determine whether the CPU is handling interrupts. 1132 */ 1133 int 1134 cpu_is_nointr(cpu_t *cpu) 1135 { 1136 ASSERT(MUTEX_HELD(&cpu_lock)); 1137 return (cpu_flagged_nointr(cpu->cpu_flags)); 1138 } 1139 1140 /* 1141 * Determine whether the CPU is active (scheduling threads). 1142 */ 1143 int 1144 cpu_is_active(cpu_t *cpu) 1145 { 1146 ASSERT(MUTEX_HELD(&cpu_lock)); 1147 return (cpu_flagged_active(cpu->cpu_flags)); 1148 } 1149 1150 /* 1151 * Same as above, but these require cpu_flags instead of cpu_t pointers. 1152 */ 1153 int 1154 cpu_flagged_online(cpu_flag_t cpu_flags) 1155 { 1156 return (cpu_flagged_active(cpu_flags) && 1157 (cpu_flags & CPU_ENABLE)); 1158 } 1159 1160 int 1161 cpu_flagged_offline(cpu_flag_t cpu_flags) 1162 { 1163 return (((cpu_flags & CPU_POWEROFF) == 0) && 1164 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)); 1165 } 1166 1167 int 1168 cpu_flagged_poweredoff(cpu_flag_t cpu_flags) 1169 { 1170 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF); 1171 } 1172 1173 int 1174 cpu_flagged_nointr(cpu_flag_t cpu_flags) 1175 { 1176 return (cpu_flagged_active(cpu_flags) && 1177 (cpu_flags & CPU_ENABLE) == 0); 1178 } 1179 1180 int 1181 cpu_flagged_active(cpu_flag_t cpu_flags) 1182 { 1183 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) && 1184 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY)); 1185 } 1186 1187 /* 1188 * Bring the indicated CPU online. 1189 */ 1190 int 1191 cpu_online(cpu_t *cp) 1192 { 1193 int error = 0; 1194 1195 /* 1196 * Handle on-line request. 1197 * This code must put the new CPU on the active list before 1198 * starting it because it will not be paused, and will start 1199 * using the active list immediately. The real start occurs 1200 * when the CPU_QUIESCED flag is turned off. 1201 */ 1202 1203 ASSERT(MUTEX_HELD(&cpu_lock)); 1204 1205 /* 1206 * Put all the cpus into a known safe place. 1207 * No mutexes can be entered while CPUs are paused. 1208 */ 1209 error = mp_cpu_start(cp); /* arch-dep hook */ 1210 if (error == 0) { 1211 pg_cpupart_in(cp, cp->cpu_part); 1212 pause_cpus(NULL, NULL); 1213 cpu_add_active_internal(cp); 1214 if (cp->cpu_flags & CPU_FAULTED) { 1215 cp->cpu_flags &= ~CPU_FAULTED; 1216 mp_cpu_faulted_exit(cp); 1217 } 1218 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN | 1219 CPU_SPARE); 1220 CPU_NEW_GENERATION(cp); 1221 start_cpus(); 1222 cpu_stats_kstat_create(cp); 1223 cpu_create_intrstat(cp); 1224 lgrp_kstat_create(cp); 1225 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1226 cpu_intr_enable(cp); /* arch-dep hook */ 1227 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1228 cpu_set_state(cp); 1229 cyclic_online(cp); 1230 /* 1231 * This has to be called only after cyclic_online(). This 1232 * function uses cyclics. 1233 */ 1234 callout_cpu_online(cp); 1235 poke_cpu(cp->cpu_id); 1236 } 1237 1238 return (error); 1239 } 1240 1241 /* 1242 * Take the indicated CPU offline. 1243 */ 1244 int 1245 cpu_offline(cpu_t *cp, int flags) 1246 { 1247 cpupart_t *pp; 1248 int error = 0; 1249 cpu_t *ncp; 1250 int intr_enable; 1251 int cyclic_off = 0; 1252 int callout_off = 0; 1253 int loop_count; 1254 int no_quiesce = 0; 1255 int (*bound_func)(struct cpu *, int); 1256 kthread_t *t; 1257 lpl_t *cpu_lpl; 1258 proc_t *p; 1259 int lgrp_diff_lpl; 1260 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0; 1261 1262 ASSERT(MUTEX_HELD(&cpu_lock)); 1263 1264 /* 1265 * If we're going from faulted or spare to offline, just 1266 * clear these flags and update CPU state. 1267 */ 1268 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) { 1269 if (cp->cpu_flags & CPU_FAULTED) { 1270 cp->cpu_flags &= ~CPU_FAULTED; 1271 mp_cpu_faulted_exit(cp); 1272 } 1273 cp->cpu_flags &= ~CPU_SPARE; 1274 cpu_set_state(cp); 1275 return (0); 1276 } 1277 1278 /* 1279 * Handle off-line request. 1280 */ 1281 pp = cp->cpu_part; 1282 /* 1283 * Don't offline last online CPU in partition 1284 */ 1285 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2) 1286 return (EBUSY); 1287 /* 1288 * Unbind all soft-bound threads bound to our CPU and hard bound threads 1289 * if we were asked to. 1290 */ 1291 error = cpu_unbind(cp->cpu_id, unbind_all_threads); 1292 if (error != 0) 1293 return (error); 1294 /* 1295 * We shouldn't be bound to this CPU ourselves. 1296 */ 1297 if (curthread->t_bound_cpu == cp) 1298 return (EBUSY); 1299 1300 /* 1301 * Tell interested parties that this CPU is going offline. 1302 */ 1303 CPU_NEW_GENERATION(cp); 1304 cpu_state_change_notify(cp->cpu_id, CPU_OFF); 1305 1306 /* 1307 * Tell the PG subsystem that the CPU is leaving the partition 1308 */ 1309 pg_cpupart_out(cp, pp); 1310 1311 /* 1312 * Take the CPU out of interrupt participation so we won't find 1313 * bound kernel threads. If the architecture cannot completely 1314 * shut off interrupts on the CPU, don't quiesce it, but don't 1315 * run anything but interrupt thread... this is indicated by 1316 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being 1317 * off. 1318 */ 1319 intr_enable = cp->cpu_flags & CPU_ENABLE; 1320 if (intr_enable) 1321 no_quiesce = cpu_intr_disable(cp); 1322 1323 /* 1324 * Record that we are aiming to offline this cpu. This acts as 1325 * a barrier to further weak binding requests in thread_nomigrate 1326 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to 1327 * lean away from this cpu. Further strong bindings are already 1328 * avoided since we hold cpu_lock. Since threads that are set 1329 * runnable around now and others coming off the target cpu are 1330 * directed away from the target, existing strong and weak bindings 1331 * (especially the latter) to the target cpu stand maximum chance of 1332 * being able to unbind during the short delay loop below (if other 1333 * unbound threads compete they may not see cpu in time to unbind 1334 * even if they would do so immediately. 1335 */ 1336 cpu_inmotion = cp; 1337 membar_enter(); 1338 1339 /* 1340 * Check for kernel threads (strong or weak) bound to that CPU. 1341 * Strongly bound threads may not unbind, and we'll have to return 1342 * EBUSY. Weakly bound threads should always disappear - we've 1343 * stopped more weak binding with cpu_inmotion and existing 1344 * bindings will drain imminently (they may not block). Nonetheless 1345 * we will wait for a fixed period for all bound threads to disappear. 1346 * Inactive interrupt threads are OK (they'll be in TS_FREE 1347 * state). If test finds some bound threads, wait a few ticks 1348 * to give short-lived threads (such as interrupts) chance to 1349 * complete. Note that if no_quiesce is set, i.e. this cpu 1350 * is required to service interrupts, then we take the route 1351 * that permits interrupt threads to be active (or bypassed). 1352 */ 1353 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads; 1354 1355 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) { 1356 if (loop_count >= 5) { 1357 error = EBUSY; /* some threads still bound */ 1358 break; 1359 } 1360 1361 /* 1362 * If some threads were assigned, give them 1363 * a chance to complete or move. 1364 * 1365 * This assumes that the clock_thread is not bound 1366 * to any CPU, because the clock_thread is needed to 1367 * do the delay(hz/100). 1368 * 1369 * Note: we still hold the cpu_lock while waiting for 1370 * the next clock tick. This is OK since it isn't 1371 * needed for anything else except processor_bind(2), 1372 * and system initialization. If we drop the lock, 1373 * we would risk another p_online disabling the last 1374 * processor. 1375 */ 1376 delay(hz/100); 1377 } 1378 1379 if (error == 0 && callout_off == 0) { 1380 callout_cpu_offline(cp); 1381 callout_off = 1; 1382 } 1383 1384 if (error == 0 && cyclic_off == 0) { 1385 if (!cyclic_offline(cp)) { 1386 /* 1387 * We must have bound cyclics... 1388 */ 1389 error = EBUSY; 1390 goto out; 1391 } 1392 cyclic_off = 1; 1393 } 1394 1395 /* 1396 * Call mp_cpu_stop() to perform any special operations 1397 * needed for this machine architecture to offline a CPU. 1398 */ 1399 if (error == 0) 1400 error = mp_cpu_stop(cp); /* arch-dep hook */ 1401 1402 /* 1403 * If that all worked, take the CPU offline and decrement 1404 * ncpus_online. 1405 */ 1406 if (error == 0) { 1407 /* 1408 * Put all the cpus into a known safe place. 1409 * No mutexes can be entered while CPUs are paused. 1410 */ 1411 pause_cpus(cp, NULL); 1412 /* 1413 * Repeat the operation, if necessary, to make sure that 1414 * all outstanding low-level interrupts run to completion 1415 * before we set the CPU_QUIESCED flag. It's also possible 1416 * that a thread has weak bound to the cpu despite our raising 1417 * cpu_inmotion above since it may have loaded that 1418 * value before the barrier became visible (this would have 1419 * to be the thread that was on the target cpu at the time 1420 * we raised the barrier). 1421 */ 1422 if ((!no_quiesce && cp->cpu_intr_actv != 0) || 1423 (*bound_func)(cp, 1)) { 1424 start_cpus(); 1425 (void) mp_cpu_start(cp); 1426 goto again; 1427 } 1428 ncp = cp->cpu_next_part; 1429 cpu_lpl = cp->cpu_lpl; 1430 ASSERT(cpu_lpl != NULL); 1431 1432 /* 1433 * Remove the CPU from the list of active CPUs. 1434 */ 1435 cpu_remove_active(cp); 1436 1437 /* 1438 * Walk the active process list and look for threads 1439 * whose home lgroup needs to be updated, or 1440 * the last CPU they run on is the one being offlined now. 1441 */ 1442 1443 ASSERT(curthread->t_cpu != cp); 1444 for (p = practive; p != NULL; p = p->p_next) { 1445 1446 t = p->p_tlist; 1447 1448 if (t == NULL) 1449 continue; 1450 1451 lgrp_diff_lpl = 0; 1452 1453 do { 1454 ASSERT(t->t_lpl != NULL); 1455 /* 1456 * Taking last CPU in lpl offline 1457 * Rehome thread if it is in this lpl 1458 * Otherwise, update the count of how many 1459 * threads are in this CPU's lgroup but have 1460 * a different lpl. 1461 */ 1462 1463 if (cpu_lpl->lpl_ncpu == 0) { 1464 if (t->t_lpl == cpu_lpl) 1465 lgrp_move_thread(t, 1466 lgrp_choose(t, 1467 t->t_cpupart), 0); 1468 else if (t->t_lpl->lpl_lgrpid == 1469 cpu_lpl->lpl_lgrpid) 1470 lgrp_diff_lpl++; 1471 } 1472 ASSERT(t->t_lpl->lpl_ncpu > 0); 1473 1474 /* 1475 * Update CPU last ran on if it was this CPU 1476 */ 1477 if (t->t_cpu == cp && t->t_bound_cpu != cp) 1478 t->t_cpu = disp_lowpri_cpu(ncp, 1479 t->t_lpl, t->t_pri, NULL); 1480 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1481 t->t_weakbound_cpu == cp); 1482 1483 t = t->t_forw; 1484 } while (t != p->p_tlist); 1485 1486 /* 1487 * Didn't find any threads in the same lgroup as this 1488 * CPU with a different lpl, so remove the lgroup from 1489 * the process lgroup bitmask. 1490 */ 1491 1492 if (lgrp_diff_lpl == 0) 1493 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid); 1494 } 1495 1496 /* 1497 * Walk thread list looking for threads that need to be 1498 * rehomed, since there are some threads that are not in 1499 * their process's p_tlist. 1500 */ 1501 1502 t = curthread; 1503 do { 1504 ASSERT(t != NULL && t->t_lpl != NULL); 1505 1506 /* 1507 * Rehome threads with same lpl as this CPU when this 1508 * is the last CPU in the lpl. 1509 */ 1510 1511 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl)) 1512 lgrp_move_thread(t, 1513 lgrp_choose(t, t->t_cpupart), 1); 1514 1515 ASSERT(t->t_lpl->lpl_ncpu > 0); 1516 1517 /* 1518 * Update CPU last ran on if it was this CPU 1519 */ 1520 1521 if (t->t_cpu == cp && t->t_bound_cpu != cp) { 1522 t->t_cpu = disp_lowpri_cpu(ncp, 1523 t->t_lpl, t->t_pri, NULL); 1524 } 1525 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp || 1526 t->t_weakbound_cpu == cp); 1527 t = t->t_next; 1528 1529 } while (t != curthread); 1530 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0); 1531 cp->cpu_flags |= CPU_OFFLINE; 1532 disp_cpu_inactive(cp); 1533 if (!no_quiesce) 1534 cp->cpu_flags |= CPU_QUIESCED; 1535 ncpus_online--; 1536 cpu_set_state(cp); 1537 cpu_inmotion = NULL; 1538 start_cpus(); 1539 cpu_stats_kstat_destroy(cp); 1540 cpu_delete_intrstat(cp); 1541 lgrp_kstat_destroy(cp); 1542 } 1543 1544 out: 1545 cpu_inmotion = NULL; 1546 1547 /* 1548 * If we failed, re-enable interrupts. 1549 * Do this even if cpu_intr_disable returned an error, because 1550 * it may have partially disabled interrupts. 1551 */ 1552 if (error && intr_enable) 1553 cpu_intr_enable(cp); 1554 1555 /* 1556 * If we failed, but managed to offline the cyclic subsystem on this 1557 * CPU, bring it back online. 1558 */ 1559 if (error && cyclic_off) 1560 cyclic_online(cp); 1561 1562 /* 1563 * If we failed, but managed to offline callouts on this CPU, 1564 * bring it back online. 1565 */ 1566 if (error && callout_off) 1567 callout_cpu_online(cp); 1568 1569 /* 1570 * If we failed, tell the PG subsystem that the CPU is back 1571 */ 1572 pg_cpupart_in(cp, pp); 1573 1574 /* 1575 * If we failed, we need to notify everyone that this CPU is back on. 1576 */ 1577 if (error != 0) { 1578 CPU_NEW_GENERATION(cp); 1579 cpu_state_change_notify(cp->cpu_id, CPU_ON); 1580 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON); 1581 } 1582 1583 return (error); 1584 } 1585 1586 /* 1587 * Mark the indicated CPU as faulted, taking it offline. 1588 */ 1589 int 1590 cpu_faulted(cpu_t *cp, int flags) 1591 { 1592 int error = 0; 1593 1594 ASSERT(MUTEX_HELD(&cpu_lock)); 1595 ASSERT(!cpu_is_poweredoff(cp)); 1596 1597 if (cpu_is_offline(cp)) { 1598 cp->cpu_flags &= ~CPU_SPARE; 1599 cp->cpu_flags |= CPU_FAULTED; 1600 mp_cpu_faulted_enter(cp); 1601 cpu_set_state(cp); 1602 return (0); 1603 } 1604 1605 if ((error = cpu_offline(cp, flags)) == 0) { 1606 cp->cpu_flags |= CPU_FAULTED; 1607 mp_cpu_faulted_enter(cp); 1608 cpu_set_state(cp); 1609 } 1610 1611 return (error); 1612 } 1613 1614 /* 1615 * Mark the indicated CPU as a spare, taking it offline. 1616 */ 1617 int 1618 cpu_spare(cpu_t *cp, int flags) 1619 { 1620 int error = 0; 1621 1622 ASSERT(MUTEX_HELD(&cpu_lock)); 1623 ASSERT(!cpu_is_poweredoff(cp)); 1624 1625 if (cpu_is_offline(cp)) { 1626 if (cp->cpu_flags & CPU_FAULTED) { 1627 cp->cpu_flags &= ~CPU_FAULTED; 1628 mp_cpu_faulted_exit(cp); 1629 } 1630 cp->cpu_flags |= CPU_SPARE; 1631 cpu_set_state(cp); 1632 return (0); 1633 } 1634 1635 if ((error = cpu_offline(cp, flags)) == 0) { 1636 cp->cpu_flags |= CPU_SPARE; 1637 cpu_set_state(cp); 1638 } 1639 1640 return (error); 1641 } 1642 1643 /* 1644 * Take the indicated CPU from poweroff to offline. 1645 */ 1646 int 1647 cpu_poweron(cpu_t *cp) 1648 { 1649 int error = ENOTSUP; 1650 1651 ASSERT(MUTEX_HELD(&cpu_lock)); 1652 ASSERT(cpu_is_poweredoff(cp)); 1653 1654 error = mp_cpu_poweron(cp); /* arch-dep hook */ 1655 if (error == 0) 1656 cpu_set_state(cp); 1657 1658 return (error); 1659 } 1660 1661 /* 1662 * Take the indicated CPU from any inactive state to powered off. 1663 */ 1664 int 1665 cpu_poweroff(cpu_t *cp) 1666 { 1667 int error = ENOTSUP; 1668 1669 ASSERT(MUTEX_HELD(&cpu_lock)); 1670 ASSERT(cpu_is_offline(cp)); 1671 1672 if (!(cp->cpu_flags & CPU_QUIESCED)) 1673 return (EBUSY); /* not completely idle */ 1674 1675 error = mp_cpu_poweroff(cp); /* arch-dep hook */ 1676 if (error == 0) 1677 cpu_set_state(cp); 1678 1679 return (error); 1680 } 1681 1682 /* 1683 * Initialize the Sequential CPU id lookup table 1684 */ 1685 void 1686 cpu_seq_tbl_init() 1687 { 1688 cpu_t **tbl; 1689 1690 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP); 1691 tbl[0] = CPU; 1692 1693 cpu_seq = tbl; 1694 } 1695 1696 /* 1697 * Initialize the CPU lists for the first CPU. 1698 */ 1699 void 1700 cpu_list_init(cpu_t *cp) 1701 { 1702 cp->cpu_next = cp; 1703 cp->cpu_prev = cp; 1704 cpu_list = cp; 1705 clock_cpu_list = cp; 1706 1707 cp->cpu_next_onln = cp; 1708 cp->cpu_prev_onln = cp; 1709 cpu_active = cp; 1710 1711 cp->cpu_seqid = 0; 1712 CPUSET_ADD(cpu_seqid_inuse, 0); 1713 1714 /* 1715 * Bootstrap cpu_seq using cpu_list 1716 * The cpu_seq[] table will be dynamically allocated 1717 * when kmem later becomes available (but before going MP) 1718 */ 1719 cpu_seq = &cpu_list; 1720 1721 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1722 cp_default.cp_cpulist = cp; 1723 cp_default.cp_ncpus = 1; 1724 cp->cpu_next_part = cp; 1725 cp->cpu_prev_part = cp; 1726 cp->cpu_part = &cp_default; 1727 1728 CPUSET_ADD(cpu_available, cp->cpu_id); 1729 CPUSET_ADD(cpu_active_set, cp->cpu_id); 1730 } 1731 1732 /* 1733 * Insert a CPU into the list of available CPUs. 1734 */ 1735 void 1736 cpu_add_unit(cpu_t *cp) 1737 { 1738 int seqid; 1739 1740 ASSERT(MUTEX_HELD(&cpu_lock)); 1741 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1742 1743 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0); 1744 1745 /* 1746 * Note: most users of the cpu_list will grab the 1747 * cpu_lock to insure that it isn't modified. However, 1748 * certain users can't or won't do that. To allow this 1749 * we pause the other cpus. Users who walk the list 1750 * without cpu_lock, must disable kernel preemption 1751 * to insure that the list isn't modified underneath 1752 * them. Also, any cached pointers to cpu structures 1753 * must be revalidated by checking to see if the 1754 * cpu_next pointer points to itself. This check must 1755 * be done with the cpu_lock held or kernel preemption 1756 * disabled. This check relies upon the fact that 1757 * old cpu structures are not free'ed or cleared after 1758 * then are removed from the cpu_list. 1759 * 1760 * Note that the clock code walks the cpu list dereferencing 1761 * the cpu_part pointer, so we need to initialize it before 1762 * adding the cpu to the list. 1763 */ 1764 cp->cpu_part = &cp_default; 1765 pause_cpus(NULL, NULL); 1766 cp->cpu_next = cpu_list; 1767 cp->cpu_prev = cpu_list->cpu_prev; 1768 cpu_list->cpu_prev->cpu_next = cp; 1769 cpu_list->cpu_prev = cp; 1770 start_cpus(); 1771 1772 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++) 1773 continue; 1774 CPUSET_ADD(cpu_seqid_inuse, seqid); 1775 cp->cpu_seqid = seqid; 1776 1777 if (seqid > max_cpu_seqid_ever) 1778 max_cpu_seqid_ever = seqid; 1779 1780 ASSERT(ncpus < max_ncpus); 1781 ncpus++; 1782 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid); 1783 cpu[cp->cpu_id] = cp; 1784 CPUSET_ADD(cpu_available, cp->cpu_id); 1785 cpu_seq[cp->cpu_seqid] = cp; 1786 1787 /* 1788 * allocate a pause thread for this CPU. 1789 */ 1790 cpu_pause_alloc(cp); 1791 1792 /* 1793 * So that new CPUs won't have NULL prev_onln and next_onln pointers, 1794 * link them into a list of just that CPU. 1795 * This is so that disp_lowpri_cpu will work for thread_create in 1796 * pause_cpus() when called from the startup thread in a new CPU. 1797 */ 1798 cp->cpu_next_onln = cp; 1799 cp->cpu_prev_onln = cp; 1800 cpu_info_kstat_create(cp); 1801 cp->cpu_next_part = cp; 1802 cp->cpu_prev_part = cp; 1803 1804 init_cpu_mstate(cp, CMS_SYSTEM); 1805 1806 pool_pset_mod = gethrtime(); 1807 } 1808 1809 /* 1810 * Do the opposite of cpu_add_unit(). 1811 */ 1812 void 1813 cpu_del_unit(int cpuid) 1814 { 1815 struct cpu *cp, *cpnext; 1816 1817 ASSERT(MUTEX_HELD(&cpu_lock)); 1818 cp = cpu[cpuid]; 1819 ASSERT(cp != NULL); 1820 1821 ASSERT(cp->cpu_next_onln == cp); 1822 ASSERT(cp->cpu_prev_onln == cp); 1823 ASSERT(cp->cpu_next_part == cp); 1824 ASSERT(cp->cpu_prev_part == cp); 1825 1826 /* 1827 * Tear down the CPU's physical ID cache, and update any 1828 * processor groups 1829 */ 1830 pg_cpu_fini(cp, NULL); 1831 pghw_physid_destroy(cp); 1832 1833 /* 1834 * Destroy kstat stuff. 1835 */ 1836 cpu_info_kstat_destroy(cp); 1837 term_cpu_mstate(cp); 1838 /* 1839 * Free up pause thread. 1840 */ 1841 cpu_pause_free(cp); 1842 CPUSET_DEL(cpu_available, cp->cpu_id); 1843 cpu[cp->cpu_id] = NULL; 1844 cpu_seq[cp->cpu_seqid] = NULL; 1845 1846 /* 1847 * The clock thread and mutex_vector_enter cannot hold the 1848 * cpu_lock while traversing the cpu list, therefore we pause 1849 * all other threads by pausing the other cpus. These, and any 1850 * other routines holding cpu pointers while possibly sleeping 1851 * must be sure to call kpreempt_disable before processing the 1852 * list and be sure to check that the cpu has not been deleted 1853 * after any sleeps (check cp->cpu_next != NULL). We guarantee 1854 * to keep the deleted cpu structure around. 1855 * 1856 * Note that this MUST be done AFTER cpu_available 1857 * has been updated so that we don't waste time 1858 * trying to pause the cpu we're trying to delete. 1859 */ 1860 pause_cpus(NULL, NULL); 1861 1862 cpnext = cp->cpu_next; 1863 cp->cpu_prev->cpu_next = cp->cpu_next; 1864 cp->cpu_next->cpu_prev = cp->cpu_prev; 1865 if (cp == cpu_list) 1866 cpu_list = cpnext; 1867 1868 /* 1869 * Signals that the cpu has been deleted (see above). 1870 */ 1871 cp->cpu_next = NULL; 1872 cp->cpu_prev = NULL; 1873 1874 start_cpus(); 1875 1876 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid); 1877 ncpus--; 1878 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0); 1879 1880 pool_pset_mod = gethrtime(); 1881 } 1882 1883 /* 1884 * Add a CPU to the list of active CPUs. 1885 * This routine must not get any locks, because other CPUs are paused. 1886 */ 1887 static void 1888 cpu_add_active_internal(cpu_t *cp) 1889 { 1890 cpupart_t *pp = cp->cpu_part; 1891 1892 ASSERT(MUTEX_HELD(&cpu_lock)); 1893 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */ 1894 1895 ncpus_online++; 1896 cpu_set_state(cp); 1897 cp->cpu_next_onln = cpu_active; 1898 cp->cpu_prev_onln = cpu_active->cpu_prev_onln; 1899 cpu_active->cpu_prev_onln->cpu_next_onln = cp; 1900 cpu_active->cpu_prev_onln = cp; 1901 CPUSET_ADD(cpu_active_set, cp->cpu_id); 1902 1903 if (pp->cp_cpulist) { 1904 cp->cpu_next_part = pp->cp_cpulist; 1905 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part; 1906 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp; 1907 pp->cp_cpulist->cpu_prev_part = cp; 1908 } else { 1909 ASSERT(pp->cp_ncpus == 0); 1910 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp; 1911 } 1912 pp->cp_ncpus++; 1913 if (pp->cp_ncpus == 1) { 1914 cp_numparts_nonempty++; 1915 ASSERT(cp_numparts_nonempty != 0); 1916 } 1917 1918 pg_cpu_active(cp); 1919 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0); 1920 1921 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg)); 1922 } 1923 1924 /* 1925 * Add a CPU to the list of active CPUs. 1926 * This is called from machine-dependent layers when a new CPU is started. 1927 */ 1928 void 1929 cpu_add_active(cpu_t *cp) 1930 { 1931 pg_cpupart_in(cp, cp->cpu_part); 1932 1933 pause_cpus(NULL, NULL); 1934 cpu_add_active_internal(cp); 1935 start_cpus(); 1936 1937 cpu_stats_kstat_create(cp); 1938 cpu_create_intrstat(cp); 1939 lgrp_kstat_create(cp); 1940 cpu_state_change_notify(cp->cpu_id, CPU_INIT); 1941 } 1942 1943 1944 /* 1945 * Remove a CPU from the list of active CPUs. 1946 * This routine must not get any locks, because other CPUs are paused. 1947 */ 1948 /* ARGSUSED */ 1949 static void 1950 cpu_remove_active(cpu_t *cp) 1951 { 1952 cpupart_t *pp = cp->cpu_part; 1953 1954 ASSERT(MUTEX_HELD(&cpu_lock)); 1955 ASSERT(cp->cpu_next_onln != cp); /* not the last one */ 1956 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */ 1957 1958 pg_cpu_inactive(cp); 1959 1960 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0); 1961 1962 if (cp == clock_cpu_list) 1963 clock_cpu_list = cp->cpu_next_onln; 1964 1965 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln; 1966 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln; 1967 if (cpu_active == cp) { 1968 cpu_active = cp->cpu_next_onln; 1969 } 1970 cp->cpu_next_onln = cp; 1971 cp->cpu_prev_onln = cp; 1972 CPUSET_DEL(cpu_active_set, cp->cpu_id); 1973 1974 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; 1975 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; 1976 if (pp->cp_cpulist == cp) { 1977 pp->cp_cpulist = cp->cpu_next_part; 1978 ASSERT(pp->cp_cpulist != cp); 1979 } 1980 cp->cpu_next_part = cp; 1981 cp->cpu_prev_part = cp; 1982 pp->cp_ncpus--; 1983 if (pp->cp_ncpus == 0) { 1984 cp_numparts_nonempty--; 1985 ASSERT(cp_numparts_nonempty != 0); 1986 } 1987 } 1988 1989 /* 1990 * Routine used to setup a newly inserted CPU in preparation for starting 1991 * it running code. 1992 */ 1993 int 1994 cpu_configure(int cpuid) 1995 { 1996 int retval = 0; 1997 1998 ASSERT(MUTEX_HELD(&cpu_lock)); 1999 2000 /* 2001 * Some structures are statically allocated based upon 2002 * the maximum number of cpus the system supports. Do not 2003 * try to add anything beyond this limit. 2004 */ 2005 if (cpuid < 0 || cpuid >= NCPU) { 2006 return (EINVAL); 2007 } 2008 2009 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) { 2010 return (EALREADY); 2011 } 2012 2013 if ((retval = mp_cpu_configure(cpuid)) != 0) { 2014 return (retval); 2015 } 2016 2017 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF; 2018 cpu_set_state(cpu[cpuid]); 2019 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG); 2020 if (retval != 0) 2021 (void) mp_cpu_unconfigure(cpuid); 2022 2023 return (retval); 2024 } 2025 2026 /* 2027 * Routine used to cleanup a CPU that has been powered off. This will 2028 * destroy all per-cpu information related to this cpu. 2029 */ 2030 int 2031 cpu_unconfigure(int cpuid) 2032 { 2033 int error; 2034 2035 ASSERT(MUTEX_HELD(&cpu_lock)); 2036 2037 if (cpu[cpuid] == NULL) { 2038 return (ENODEV); 2039 } 2040 2041 if (cpu[cpuid]->cpu_flags == 0) { 2042 return (EALREADY); 2043 } 2044 2045 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) { 2046 return (EBUSY); 2047 } 2048 2049 if (cpu[cpuid]->cpu_props != NULL) { 2050 (void) nvlist_free(cpu[cpuid]->cpu_props); 2051 cpu[cpuid]->cpu_props = NULL; 2052 } 2053 2054 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG); 2055 2056 if (error != 0) 2057 return (error); 2058 2059 return (mp_cpu_unconfigure(cpuid)); 2060 } 2061 2062 /* 2063 * Routines for registering and de-registering cpu_setup callback functions. 2064 * 2065 * Caller's context 2066 * These routines must not be called from a driver's attach(9E) or 2067 * detach(9E) entry point. 2068 * 2069 * NOTE: CPU callbacks should not block. They are called with cpu_lock held. 2070 */ 2071 2072 /* 2073 * Ideally, these would be dynamically allocated and put into a linked 2074 * list; however that is not feasible because the registration routine 2075 * has to be available before the kmem allocator is working (in fact, 2076 * it is called by the kmem allocator init code). In any case, there 2077 * are quite a few extra entries for future users. 2078 */ 2079 #define NCPU_SETUPS 20 2080 2081 struct cpu_setup { 2082 cpu_setup_func_t *func; 2083 void *arg; 2084 } cpu_setups[NCPU_SETUPS]; 2085 2086 void 2087 register_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2088 { 2089 int i; 2090 2091 ASSERT(MUTEX_HELD(&cpu_lock)); 2092 2093 for (i = 0; i < NCPU_SETUPS; i++) 2094 if (cpu_setups[i].func == NULL) 2095 break; 2096 if (i >= NCPU_SETUPS) 2097 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries"); 2098 2099 cpu_setups[i].func = func; 2100 cpu_setups[i].arg = arg; 2101 } 2102 2103 void 2104 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg) 2105 { 2106 int i; 2107 2108 ASSERT(MUTEX_HELD(&cpu_lock)); 2109 2110 for (i = 0; i < NCPU_SETUPS; i++) 2111 if ((cpu_setups[i].func == func) && 2112 (cpu_setups[i].arg == arg)) 2113 break; 2114 if (i >= NCPU_SETUPS) 2115 cmn_err(CE_PANIC, "Could not find cpu_setup callback to " 2116 "deregister"); 2117 2118 cpu_setups[i].func = NULL; 2119 cpu_setups[i].arg = 0; 2120 } 2121 2122 /* 2123 * Call any state change hooks for this CPU, ignore any errors. 2124 */ 2125 void 2126 cpu_state_change_notify(int id, cpu_setup_t what) 2127 { 2128 int i; 2129 2130 ASSERT(MUTEX_HELD(&cpu_lock)); 2131 2132 for (i = 0; i < NCPU_SETUPS; i++) { 2133 if (cpu_setups[i].func != NULL) { 2134 cpu_setups[i].func(what, id, cpu_setups[i].arg); 2135 } 2136 } 2137 } 2138 2139 /* 2140 * Call any state change hooks for this CPU, undo it if error found. 2141 */ 2142 static int 2143 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo) 2144 { 2145 int i; 2146 int retval = 0; 2147 2148 ASSERT(MUTEX_HELD(&cpu_lock)); 2149 2150 for (i = 0; i < NCPU_SETUPS; i++) { 2151 if (cpu_setups[i].func != NULL) { 2152 retval = cpu_setups[i].func(what, id, 2153 cpu_setups[i].arg); 2154 if (retval) { 2155 for (i--; i >= 0; i--) { 2156 if (cpu_setups[i].func != NULL) 2157 cpu_setups[i].func(undo, 2158 id, cpu_setups[i].arg); 2159 } 2160 break; 2161 } 2162 } 2163 } 2164 return (retval); 2165 } 2166 2167 /* 2168 * Export information about this CPU via the kstat mechanism. 2169 */ 2170 static struct { 2171 kstat_named_t ci_state; 2172 kstat_named_t ci_state_begin; 2173 kstat_named_t ci_cpu_type; 2174 kstat_named_t ci_fpu_type; 2175 kstat_named_t ci_clock_MHz; 2176 kstat_named_t ci_chip_id; 2177 kstat_named_t ci_implementation; 2178 kstat_named_t ci_brandstr; 2179 kstat_named_t ci_core_id; 2180 kstat_named_t ci_curr_clock_Hz; 2181 kstat_named_t ci_supp_freq_Hz; 2182 kstat_named_t ci_pg_id; 2183 #if defined(__sparcv9) 2184 kstat_named_t ci_device_ID; 2185 kstat_named_t ci_cpu_fru; 2186 #endif 2187 #if defined(__x86) 2188 kstat_named_t ci_vendorstr; 2189 kstat_named_t ci_family; 2190 kstat_named_t ci_model; 2191 kstat_named_t ci_step; 2192 kstat_named_t ci_clogid; 2193 kstat_named_t ci_pkg_core_id; 2194 kstat_named_t ci_ncpuperchip; 2195 kstat_named_t ci_ncoreperchip; 2196 kstat_named_t ci_max_cstates; 2197 kstat_named_t ci_curr_cstate; 2198 kstat_named_t ci_cacheid; 2199 kstat_named_t ci_sktstr; 2200 #endif 2201 } cpu_info_template = { 2202 { "state", KSTAT_DATA_CHAR }, 2203 { "state_begin", KSTAT_DATA_LONG }, 2204 { "cpu_type", KSTAT_DATA_CHAR }, 2205 { "fpu_type", KSTAT_DATA_CHAR }, 2206 { "clock_MHz", KSTAT_DATA_LONG }, 2207 { "chip_id", KSTAT_DATA_LONG }, 2208 { "implementation", KSTAT_DATA_STRING }, 2209 { "brand", KSTAT_DATA_STRING }, 2210 { "core_id", KSTAT_DATA_LONG }, 2211 { "current_clock_Hz", KSTAT_DATA_UINT64 }, 2212 { "supported_frequencies_Hz", KSTAT_DATA_STRING }, 2213 { "pg_id", KSTAT_DATA_LONG }, 2214 #if defined(__sparcv9) 2215 { "device_ID", KSTAT_DATA_UINT64 }, 2216 { "cpu_fru", KSTAT_DATA_STRING }, 2217 #endif 2218 #if defined(__x86) 2219 { "vendor_id", KSTAT_DATA_STRING }, 2220 { "family", KSTAT_DATA_INT32 }, 2221 { "model", KSTAT_DATA_INT32 }, 2222 { "stepping", KSTAT_DATA_INT32 }, 2223 { "clog_id", KSTAT_DATA_INT32 }, 2224 { "pkg_core_id", KSTAT_DATA_LONG }, 2225 { "ncpu_per_chip", KSTAT_DATA_INT32 }, 2226 { "ncore_per_chip", KSTAT_DATA_INT32 }, 2227 { "supported_max_cstates", KSTAT_DATA_INT32 }, 2228 { "current_cstate", KSTAT_DATA_INT32 }, 2229 { "cache_id", KSTAT_DATA_INT32 }, 2230 { "socket_type", KSTAT_DATA_STRING }, 2231 #endif 2232 }; 2233 2234 static kmutex_t cpu_info_template_lock; 2235 2236 static int 2237 cpu_info_kstat_update(kstat_t *ksp, int rw) 2238 { 2239 cpu_t *cp = ksp->ks_private; 2240 const char *pi_state; 2241 2242 if (rw == KSTAT_WRITE) 2243 return (EACCES); 2244 2245 #if defined(__x86) 2246 /* Is the cpu still initialising itself? */ 2247 if (cpuid_checkpass(cp, 1) == 0) 2248 return (ENXIO); 2249 #endif 2250 switch (cp->cpu_type_info.pi_state) { 2251 case P_ONLINE: 2252 pi_state = PS_ONLINE; 2253 break; 2254 case P_POWEROFF: 2255 pi_state = PS_POWEROFF; 2256 break; 2257 case P_NOINTR: 2258 pi_state = PS_NOINTR; 2259 break; 2260 case P_FAULTED: 2261 pi_state = PS_FAULTED; 2262 break; 2263 case P_SPARE: 2264 pi_state = PS_SPARE; 2265 break; 2266 case P_OFFLINE: 2267 pi_state = PS_OFFLINE; 2268 break; 2269 default: 2270 pi_state = "unknown"; 2271 } 2272 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state); 2273 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin; 2274 (void) strncpy(cpu_info_template.ci_cpu_type.value.c, 2275 cp->cpu_type_info.pi_processor_type, 15); 2276 (void) strncpy(cpu_info_template.ci_fpu_type.value.c, 2277 cp->cpu_type_info.pi_fputypes, 15); 2278 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock; 2279 cpu_info_template.ci_chip_id.value.l = 2280 pg_plat_hw_instance_id(cp, PGHW_CHIP); 2281 kstat_named_setstr(&cpu_info_template.ci_implementation, 2282 cp->cpu_idstr); 2283 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr); 2284 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp); 2285 cpu_info_template.ci_curr_clock_Hz.value.ui64 = 2286 cp->cpu_curr_clock; 2287 cpu_info_template.ci_pg_id.value.l = 2288 cp->cpu_pg && cp->cpu_pg->cmt_lineage ? 2289 cp->cpu_pg->cmt_lineage->pg_id : -1; 2290 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz, 2291 cp->cpu_supp_freqs); 2292 #if defined(__sparcv9) 2293 cpu_info_template.ci_device_ID.value.ui64 = 2294 cpunodes[cp->cpu_id].device_id; 2295 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp)); 2296 #endif 2297 #if defined(__x86) 2298 kstat_named_setstr(&cpu_info_template.ci_vendorstr, 2299 cpuid_getvendorstr(cp)); 2300 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp); 2301 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp); 2302 cpu_info_template.ci_step.value.l = cpuid_getstep(cp); 2303 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp); 2304 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp); 2305 cpu_info_template.ci_ncoreperchip.value.l = 2306 cpuid_get_ncore_per_chip(cp); 2307 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp); 2308 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates; 2309 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp); 2310 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp); 2311 kstat_named_setstr(&cpu_info_template.ci_sktstr, 2312 cpuid_getsocketstr(cp)); 2313 #endif 2314 2315 return (0); 2316 } 2317 2318 static void 2319 cpu_info_kstat_create(cpu_t *cp) 2320 { 2321 zoneid_t zoneid; 2322 2323 ASSERT(MUTEX_HELD(&cpu_lock)); 2324 2325 if (pool_pset_enabled()) 2326 zoneid = GLOBAL_ZONEID; 2327 else 2328 zoneid = ALL_ZONES; 2329 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id, 2330 NULL, "misc", KSTAT_TYPE_NAMED, 2331 sizeof (cpu_info_template) / sizeof (kstat_named_t), 2332 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) { 2333 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN; 2334 #if defined(__sparcv9) 2335 cp->cpu_info_kstat->ks_data_size += 2336 strlen(cpu_fru_fmri(cp)) + 1; 2337 #endif 2338 #if defined(__x86) 2339 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN; 2340 #endif 2341 if (cp->cpu_supp_freqs != NULL) 2342 cp->cpu_info_kstat->ks_data_size += 2343 strlen(cp->cpu_supp_freqs) + 1; 2344 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock; 2345 cp->cpu_info_kstat->ks_data = &cpu_info_template; 2346 cp->cpu_info_kstat->ks_private = cp; 2347 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update; 2348 kstat_install(cp->cpu_info_kstat); 2349 } 2350 } 2351 2352 static void 2353 cpu_info_kstat_destroy(cpu_t *cp) 2354 { 2355 ASSERT(MUTEX_HELD(&cpu_lock)); 2356 2357 kstat_delete(cp->cpu_info_kstat); 2358 cp->cpu_info_kstat = NULL; 2359 } 2360 2361 /* 2362 * Create and install kstats for the boot CPU. 2363 */ 2364 void 2365 cpu_kstat_init(cpu_t *cp) 2366 { 2367 mutex_enter(&cpu_lock); 2368 cpu_info_kstat_create(cp); 2369 cpu_stats_kstat_create(cp); 2370 cpu_create_intrstat(cp); 2371 cpu_set_state(cp); 2372 mutex_exit(&cpu_lock); 2373 } 2374 2375 /* 2376 * Make visible to the zone that subset of the cpu information that would be 2377 * initialized when a cpu is configured (but still offline). 2378 */ 2379 void 2380 cpu_visibility_configure(cpu_t *cp, zone_t *zone) 2381 { 2382 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2383 2384 ASSERT(MUTEX_HELD(&cpu_lock)); 2385 ASSERT(pool_pset_enabled()); 2386 ASSERT(cp != NULL); 2387 2388 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2389 zone->zone_ncpus++; 2390 ASSERT(zone->zone_ncpus <= ncpus); 2391 } 2392 if (cp->cpu_info_kstat != NULL) 2393 kstat_zone_add(cp->cpu_info_kstat, zoneid); 2394 } 2395 2396 /* 2397 * Make visible to the zone that subset of the cpu information that would be 2398 * initialized when a previously configured cpu is onlined. 2399 */ 2400 void 2401 cpu_visibility_online(cpu_t *cp, zone_t *zone) 2402 { 2403 kstat_t *ksp; 2404 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2405 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2406 processorid_t cpun; 2407 2408 ASSERT(MUTEX_HELD(&cpu_lock)); 2409 ASSERT(pool_pset_enabled()); 2410 ASSERT(cp != NULL); 2411 ASSERT(cpu_is_active(cp)); 2412 2413 cpun = cp->cpu_id; 2414 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2415 zone->zone_ncpus_online++; 2416 ASSERT(zone->zone_ncpus_online <= ncpus_online); 2417 } 2418 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2419 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2420 != NULL) { 2421 kstat_zone_add(ksp, zoneid); 2422 kstat_rele(ksp); 2423 } 2424 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2425 kstat_zone_add(ksp, zoneid); 2426 kstat_rele(ksp); 2427 } 2428 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2429 kstat_zone_add(ksp, zoneid); 2430 kstat_rele(ksp); 2431 } 2432 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2433 NULL) { 2434 kstat_zone_add(ksp, zoneid); 2435 kstat_rele(ksp); 2436 } 2437 } 2438 2439 /* 2440 * Update relevant kstats such that cpu is now visible to processes 2441 * executing in specified zone. 2442 */ 2443 void 2444 cpu_visibility_add(cpu_t *cp, zone_t *zone) 2445 { 2446 cpu_visibility_configure(cp, zone); 2447 if (cpu_is_active(cp)) 2448 cpu_visibility_online(cp, zone); 2449 } 2450 2451 /* 2452 * Make invisible to the zone that subset of the cpu information that would be 2453 * torn down when a previously offlined cpu is unconfigured. 2454 */ 2455 void 2456 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone) 2457 { 2458 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2459 2460 ASSERT(MUTEX_HELD(&cpu_lock)); 2461 ASSERT(pool_pset_enabled()); 2462 ASSERT(cp != NULL); 2463 2464 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2465 ASSERT(zone->zone_ncpus != 0); 2466 zone->zone_ncpus--; 2467 } 2468 if (cp->cpu_info_kstat) 2469 kstat_zone_remove(cp->cpu_info_kstat, zoneid); 2470 } 2471 2472 /* 2473 * Make invisible to the zone that subset of the cpu information that would be 2474 * torn down when a cpu is offlined (but still configured). 2475 */ 2476 void 2477 cpu_visibility_offline(cpu_t *cp, zone_t *zone) 2478 { 2479 kstat_t *ksp; 2480 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */ 2481 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES; 2482 processorid_t cpun; 2483 2484 ASSERT(MUTEX_HELD(&cpu_lock)); 2485 ASSERT(pool_pset_enabled()); 2486 ASSERT(cp != NULL); 2487 ASSERT(cpu_is_active(cp)); 2488 2489 cpun = cp->cpu_id; 2490 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) { 2491 ASSERT(zone->zone_ncpus_online != 0); 2492 zone->zone_ncpus_online--; 2493 } 2494 2495 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) != 2496 NULL) { 2497 kstat_zone_remove(ksp, zoneid); 2498 kstat_rele(ksp); 2499 } 2500 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) { 2501 kstat_zone_remove(ksp, zoneid); 2502 kstat_rele(ksp); 2503 } 2504 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) { 2505 kstat_zone_remove(ksp, zoneid); 2506 kstat_rele(ksp); 2507 } 2508 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun); 2509 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES)) 2510 != NULL) { 2511 kstat_zone_remove(ksp, zoneid); 2512 kstat_rele(ksp); 2513 } 2514 } 2515 2516 /* 2517 * Update relevant kstats such that cpu is no longer visible to processes 2518 * executing in specified zone. 2519 */ 2520 void 2521 cpu_visibility_remove(cpu_t *cp, zone_t *zone) 2522 { 2523 if (cpu_is_active(cp)) 2524 cpu_visibility_offline(cp, zone); 2525 cpu_visibility_unconfigure(cp, zone); 2526 } 2527 2528 /* 2529 * Bind a thread to a CPU as requested. 2530 */ 2531 int 2532 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, 2533 int *error) 2534 { 2535 processorid_t binding; 2536 cpu_t *cp = NULL; 2537 2538 ASSERT(MUTEX_HELD(&cpu_lock)); 2539 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock)); 2540 2541 thread_lock(tp); 2542 2543 /* 2544 * Record old binding, but change the obind, which was initialized 2545 * to PBIND_NONE, only if this thread has a binding. This avoids 2546 * reporting PBIND_NONE for a process when some LWPs are bound. 2547 */ 2548 binding = tp->t_bind_cpu; 2549 if (binding != PBIND_NONE) 2550 *obind = binding; /* record old binding */ 2551 2552 switch (bind) { 2553 case PBIND_QUERY: 2554 /* Just return the old binding */ 2555 thread_unlock(tp); 2556 return (0); 2557 2558 case PBIND_QUERY_TYPE: 2559 /* Return the binding type */ 2560 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD; 2561 thread_unlock(tp); 2562 return (0); 2563 2564 case PBIND_SOFT: 2565 /* 2566 * Set soft binding for this thread and return the actual 2567 * binding 2568 */ 2569 TB_CPU_SOFT_SET(tp); 2570 thread_unlock(tp); 2571 return (0); 2572 2573 case PBIND_HARD: 2574 /* 2575 * Set hard binding for this thread and return the actual 2576 * binding 2577 */ 2578 TB_CPU_HARD_SET(tp); 2579 thread_unlock(tp); 2580 return (0); 2581 2582 default: 2583 break; 2584 } 2585 2586 /* 2587 * If this thread/LWP cannot be bound because of permission 2588 * problems, just note that and return success so that the 2589 * other threads/LWPs will be bound. This is the way 2590 * processor_bind() is defined to work. 2591 * 2592 * Binding will get EPERM if the thread is of system class 2593 * or hasprocperm() fails. 2594 */ 2595 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) { 2596 *error = EPERM; 2597 thread_unlock(tp); 2598 return (0); 2599 } 2600 2601 binding = bind; 2602 if (binding != PBIND_NONE) { 2603 cp = cpu_get((processorid_t)binding); 2604 /* 2605 * Make sure binding is valid and is in right partition. 2606 */ 2607 if (cp == NULL || tp->t_cpupart != cp->cpu_part) { 2608 *error = EINVAL; 2609 thread_unlock(tp); 2610 return (0); 2611 } 2612 } 2613 tp->t_bind_cpu = binding; /* set new binding */ 2614 2615 /* 2616 * If there is no system-set reason for affinity, set 2617 * the t_bound_cpu field to reflect the binding. 2618 */ 2619 if (tp->t_affinitycnt == 0) { 2620 if (binding == PBIND_NONE) { 2621 /* 2622 * We may need to adjust disp_max_unbound_pri 2623 * since we're becoming unbound. 2624 */ 2625 disp_adjust_unbound_pri(tp); 2626 2627 tp->t_bound_cpu = NULL; /* set new binding */ 2628 2629 /* 2630 * Move thread to lgroup with strongest affinity 2631 * after unbinding 2632 */ 2633 if (tp->t_lgrp_affinity) 2634 lgrp_move_thread(tp, 2635 lgrp_choose(tp, tp->t_cpupart), 1); 2636 2637 if (tp->t_state == TS_ONPROC && 2638 tp->t_cpu->cpu_part != tp->t_cpupart) 2639 cpu_surrender(tp); 2640 } else { 2641 lpl_t *lpl; 2642 2643 tp->t_bound_cpu = cp; 2644 ASSERT(cp->cpu_lpl != NULL); 2645 2646 /* 2647 * Set home to lgroup with most affinity containing CPU 2648 * that thread is being bound or minimum bounding 2649 * lgroup if no affinities set 2650 */ 2651 if (tp->t_lgrp_affinity) 2652 lpl = lgrp_affinity_best(tp, tp->t_cpupart, 2653 LGRP_NONE, B_FALSE); 2654 else 2655 lpl = cp->cpu_lpl; 2656 2657 if (tp->t_lpl != lpl) { 2658 /* can't grab cpu_lock */ 2659 lgrp_move_thread(tp, lpl, 1); 2660 } 2661 2662 /* 2663 * Make the thread switch to the bound CPU. 2664 * If the thread is runnable, we need to 2665 * requeue it even if t_cpu is already set 2666 * to the right CPU, since it may be on a 2667 * kpreempt queue and need to move to a local 2668 * queue. We could check t_disp_queue to 2669 * avoid unnecessary overhead if it's already 2670 * on the right queue, but since this isn't 2671 * a performance-critical operation it doesn't 2672 * seem worth the extra code and complexity. 2673 * 2674 * If the thread is weakbound to the cpu then it will 2675 * resist the new binding request until the weak 2676 * binding drops. The cpu_surrender or requeueing 2677 * below could be skipped in such cases (since it 2678 * will have no effect), but that would require 2679 * thread_allowmigrate to acquire thread_lock so 2680 * we'll take the very occasional hit here instead. 2681 */ 2682 if (tp->t_state == TS_ONPROC) { 2683 cpu_surrender(tp); 2684 } else if (tp->t_state == TS_RUN) { 2685 cpu_t *ocp = tp->t_cpu; 2686 2687 (void) dispdeq(tp); 2688 setbackdq(tp); 2689 /* 2690 * Either on the bound CPU's disp queue now, 2691 * or swapped out or on the swap queue. 2692 */ 2693 ASSERT(tp->t_disp_queue == cp->cpu_disp || 2694 tp->t_weakbound_cpu == ocp || 2695 (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) 2696 != TS_LOAD); 2697 } 2698 } 2699 } 2700 2701 /* 2702 * Our binding has changed; set TP_CHANGEBIND. 2703 */ 2704 tp->t_proc_flag |= TP_CHANGEBIND; 2705 aston(tp); 2706 2707 thread_unlock(tp); 2708 2709 return (0); 2710 } 2711 2712 2713 cpuset_t * 2714 cpuset_alloc(int kmflags) 2715 { 2716 return (kmem_alloc(sizeof (cpuset_t), kmflags)); 2717 } 2718 2719 void 2720 cpuset_free(cpuset_t *s) 2721 { 2722 kmem_free(s, sizeof (cpuset_t)); 2723 } 2724 2725 void 2726 cpuset_all(cpuset_t *s) 2727 { 2728 int i; 2729 2730 for (i = 0; i < CPUSET_WORDS; i++) 2731 s->cpub[i] = ~0UL; 2732 } 2733 2734 void 2735 cpuset_all_but(cpuset_t *s, const uint_t cpu) 2736 { 2737 cpuset_all(s); 2738 CPUSET_DEL(*s, cpu); 2739 } 2740 2741 void 2742 cpuset_only(cpuset_t *s, const uint_t cpu) 2743 { 2744 CPUSET_ZERO(*s); 2745 CPUSET_ADD(*s, cpu); 2746 } 2747 2748 long 2749 cpu_in_set(const cpuset_t *s, const uint_t cpu) 2750 { 2751 VERIFY(cpu < NCPU); 2752 return (BT_TEST(s->cpub, cpu)); 2753 } 2754 2755 void 2756 cpuset_add(cpuset_t *s, const uint_t cpu) 2757 { 2758 VERIFY(cpu < NCPU); 2759 BT_SET(s->cpub, cpu); 2760 } 2761 2762 void 2763 cpuset_del(cpuset_t *s, const uint_t cpu) 2764 { 2765 VERIFY(cpu < NCPU); 2766 BT_CLEAR(s->cpub, cpu); 2767 } 2768 2769 int 2770 cpuset_isnull(const cpuset_t *s) 2771 { 2772 int i; 2773 2774 for (i = 0; i < CPUSET_WORDS; i++) { 2775 if (s->cpub[i] != 0) 2776 return (0); 2777 } 2778 return (1); 2779 } 2780 2781 int 2782 cpuset_isequal(const cpuset_t *s1, const cpuset_t *s2) 2783 { 2784 int i; 2785 2786 for (i = 0; i < CPUSET_WORDS; i++) { 2787 if (s1->cpub[i] != s2->cpub[i]) 2788 return (0); 2789 } 2790 return (1); 2791 } 2792 2793 uint_t 2794 cpuset_find(const cpuset_t *s) 2795 { 2796 2797 uint_t i; 2798 uint_t cpu = (uint_t)-1; 2799 2800 /* 2801 * Find a cpu in the cpuset 2802 */ 2803 for (i = 0; i < CPUSET_WORDS; i++) { 2804 cpu = (uint_t)(lowbit(s->cpub[i]) - 1); 2805 if (cpu != (uint_t)-1) { 2806 cpu += i * BT_NBIPUL; 2807 break; 2808 } 2809 } 2810 return (cpu); 2811 } 2812 2813 void 2814 cpuset_bounds(const cpuset_t *s, uint_t *smallestid, uint_t *largestid) 2815 { 2816 int i, j; 2817 uint_t bit; 2818 2819 /* 2820 * First, find the smallest cpu id in the set. 2821 */ 2822 for (i = 0; i < CPUSET_WORDS; i++) { 2823 if (s->cpub[i] != 0) { 2824 bit = (uint_t)(lowbit(s->cpub[i]) - 1); 2825 ASSERT(bit != (uint_t)-1); 2826 *smallestid = bit + (i * BT_NBIPUL); 2827 2828 /* 2829 * Now find the largest cpu id in 2830 * the set and return immediately. 2831 * Done in an inner loop to avoid 2832 * having to break out of the first 2833 * loop. 2834 */ 2835 for (j = CPUSET_WORDS - 1; j >= i; j--) { 2836 if (s->cpub[j] != 0) { 2837 bit = (uint_t)(highbit(s->cpub[j]) - 1); 2838 ASSERT(bit != (uint_t)-1); 2839 *largestid = bit + (j * BT_NBIPUL); 2840 ASSERT(*largestid >= *smallestid); 2841 return; 2842 } 2843 } 2844 2845 /* 2846 * If this code is reached, a 2847 * smallestid was found, but not a 2848 * largestid. The cpuset must have 2849 * been changed during the course 2850 * of this function call. 2851 */ 2852 ASSERT(0); 2853 } 2854 } 2855 *smallestid = *largestid = CPUSET_NOTINSET; 2856 } 2857 2858 void 2859 cpuset_atomic_del(cpuset_t *s, const uint_t cpu) 2860 { 2861 VERIFY(cpu < NCPU); 2862 BT_ATOMIC_CLEAR(s->cpub, (cpu)) 2863 } 2864 2865 void 2866 cpuset_atomic_add(cpuset_t *s, const uint_t cpu) 2867 { 2868 VERIFY(cpu < NCPU); 2869 BT_ATOMIC_SET(s->cpub, (cpu)) 2870 } 2871 2872 long 2873 cpuset_atomic_xadd(cpuset_t *s, const uint_t cpu) 2874 { 2875 long res; 2876 2877 VERIFY(cpu < NCPU); 2878 BT_ATOMIC_SET_EXCL(s->cpub, cpu, res); 2879 return (res); 2880 } 2881 2882 long 2883 cpuset_atomic_xdel(cpuset_t *s, const uint_t cpu) 2884 { 2885 long res; 2886 2887 VERIFY(cpu < NCPU); 2888 BT_ATOMIC_CLEAR_EXCL(s->cpub, cpu, res); 2889 return (res); 2890 } 2891 2892 void 2893 cpuset_or(cpuset_t *dst, cpuset_t *src) 2894 { 2895 for (int i = 0; i < CPUSET_WORDS; i++) { 2896 dst->cpub[i] |= src->cpub[i]; 2897 } 2898 } 2899 2900 void 2901 cpuset_xor(cpuset_t *dst, cpuset_t *src) 2902 { 2903 for (int i = 0; i < CPUSET_WORDS; i++) { 2904 dst->cpub[i] ^= src->cpub[i]; 2905 } 2906 } 2907 2908 void 2909 cpuset_and(cpuset_t *dst, cpuset_t *src) 2910 { 2911 for (int i = 0; i < CPUSET_WORDS; i++) { 2912 dst->cpub[i] &= src->cpub[i]; 2913 } 2914 } 2915 2916 void 2917 cpuset_zero(cpuset_t *dst) 2918 { 2919 for (int i = 0; i < CPUSET_WORDS; i++) { 2920 dst->cpub[i] = 0; 2921 } 2922 } 2923 2924 2925 /* 2926 * Unbind threads bound to specified CPU. 2927 * 2928 * If `unbind_all_threads' is true, unbind all user threads bound to a given 2929 * CPU. Otherwise unbind all soft-bound user threads. 2930 */ 2931 int 2932 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads) 2933 { 2934 processorid_t obind; 2935 kthread_t *tp; 2936 int ret = 0; 2937 proc_t *pp; 2938 int err, berr = 0; 2939 2940 ASSERT(MUTEX_HELD(&cpu_lock)); 2941 2942 mutex_enter(&pidlock); 2943 for (pp = practive; pp != NULL; pp = pp->p_next) { 2944 mutex_enter(&pp->p_lock); 2945 tp = pp->p_tlist; 2946 /* 2947 * Skip zombies, kernel processes, and processes in 2948 * other zones, if called from a non-global zone. 2949 */ 2950 if (tp == NULL || (pp->p_flag & SSYS) || 2951 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) { 2952 mutex_exit(&pp->p_lock); 2953 continue; 2954 } 2955 do { 2956 if (tp->t_bind_cpu != cpu) 2957 continue; 2958 /* 2959 * Skip threads with hard binding when 2960 * `unbind_all_threads' is not specified. 2961 */ 2962 if (!unbind_all_threads && TB_CPU_IS_HARD(tp)) 2963 continue; 2964 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr); 2965 if (ret == 0) 2966 ret = err; 2967 } while ((tp = tp->t_forw) != pp->p_tlist); 2968 mutex_exit(&pp->p_lock); 2969 } 2970 mutex_exit(&pidlock); 2971 if (ret == 0) 2972 ret = berr; 2973 return (ret); 2974 } 2975 2976 2977 /* 2978 * Destroy all remaining bound threads on a cpu. 2979 */ 2980 void 2981 cpu_destroy_bound_threads(cpu_t *cp) 2982 { 2983 extern id_t syscid; 2984 register kthread_id_t t, tlist, tnext; 2985 2986 /* 2987 * Destroy all remaining bound threads on the cpu. This 2988 * should include both the interrupt threads and the idle thread. 2989 * This requires some care, since we need to traverse the 2990 * thread list with the pidlock mutex locked, but thread_free 2991 * also locks the pidlock mutex. So, we collect the threads 2992 * we're going to reap in a list headed by "tlist", then we 2993 * unlock the pidlock mutex and traverse the tlist list, 2994 * doing thread_free's on the thread's. Simple, n'est pas? 2995 * Also, this depends on thread_free not mucking with the 2996 * t_next and t_prev links of the thread. 2997 */ 2998 2999 if ((t = curthread) != NULL) { 3000 3001 tlist = NULL; 3002 mutex_enter(&pidlock); 3003 do { 3004 tnext = t->t_next; 3005 if (t->t_bound_cpu == cp) { 3006 3007 /* 3008 * We've found a bound thread, carefully unlink 3009 * it out of the thread list, and add it to 3010 * our "tlist". We "know" we don't have to 3011 * worry about unlinking curthread (the thread 3012 * that is executing this code). 3013 */ 3014 t->t_next->t_prev = t->t_prev; 3015 t->t_prev->t_next = t->t_next; 3016 t->t_next = tlist; 3017 tlist = t; 3018 ASSERT(t->t_cid == syscid); 3019 /* wake up anyone blocked in thread_join */ 3020 cv_broadcast(&t->t_joincv); 3021 /* 3022 * t_lwp set by interrupt threads and not 3023 * cleared. 3024 */ 3025 t->t_lwp = NULL; 3026 /* 3027 * Pause and idle threads always have 3028 * t_state set to TS_ONPROC. 3029 */ 3030 t->t_state = TS_FREE; 3031 t->t_prev = NULL; /* Just in case */ 3032 } 3033 3034 } while ((t = tnext) != curthread); 3035 3036 mutex_exit(&pidlock); 3037 3038 mutex_sync(); 3039 for (t = tlist; t != NULL; t = tnext) { 3040 tnext = t->t_next; 3041 thread_free(t); 3042 } 3043 } 3044 } 3045 3046 /* 3047 * Update the cpu_supp_freqs of this cpu. This information is returned 3048 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then 3049 * maintain the kstat data size. 3050 */ 3051 void 3052 cpu_set_supp_freqs(cpu_t *cp, const char *freqs) 3053 { 3054 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */ 3055 const char *lfreqs = clkstr; 3056 boolean_t kstat_exists = B_FALSE; 3057 kstat_t *ksp; 3058 size_t len; 3059 3060 /* 3061 * A NULL pointer means we only support one speed. 3062 */ 3063 if (freqs == NULL) 3064 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64, 3065 cp->cpu_curr_clock); 3066 else 3067 lfreqs = freqs; 3068 3069 /* 3070 * Make sure the frequency doesn't change while a snapshot is 3071 * going on. Of course, we only need to worry about this if 3072 * the kstat exists. 3073 */ 3074 if ((ksp = cp->cpu_info_kstat) != NULL) { 3075 mutex_enter(ksp->ks_lock); 3076 kstat_exists = B_TRUE; 3077 } 3078 3079 /* 3080 * Free any previously allocated string and if the kstat 3081 * already exists, then update its data size. 3082 */ 3083 if (cp->cpu_supp_freqs != NULL) { 3084 len = strlen(cp->cpu_supp_freqs) + 1; 3085 kmem_free(cp->cpu_supp_freqs, len); 3086 if (kstat_exists) 3087 ksp->ks_data_size -= len; 3088 } 3089 3090 /* 3091 * Allocate the new string and set the pointer. 3092 */ 3093 len = strlen(lfreqs) + 1; 3094 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP); 3095 (void) strcpy(cp->cpu_supp_freqs, lfreqs); 3096 3097 /* 3098 * If the kstat already exists then update the data size and 3099 * free the lock. 3100 */ 3101 if (kstat_exists) { 3102 ksp->ks_data_size += len; 3103 mutex_exit(ksp->ks_lock); 3104 } 3105 } 3106 3107 /* 3108 * Indicate the current CPU's clock freqency (in Hz). 3109 * The calling context must be such that CPU references are safe. 3110 */ 3111 void 3112 cpu_set_curr_clock(uint64_t new_clk) 3113 { 3114 uint64_t old_clk; 3115 3116 old_clk = CPU->cpu_curr_clock; 3117 CPU->cpu_curr_clock = new_clk; 3118 3119 /* 3120 * The cpu-change-speed DTrace probe exports the frequency in Hz 3121 */ 3122 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id, 3123 uint64_t, old_clk, uint64_t, new_clk); 3124 } 3125 3126 /* 3127 * processor_info(2) and p_online(2) status support functions 3128 * The constants returned by the cpu_get_state() and cpu_get_state_str() are 3129 * for use in communicating processor state information to userland. Kernel 3130 * subsystems should only be using the cpu_flags value directly. Subsystems 3131 * modifying cpu_flags should record the state change via a call to the 3132 * cpu_set_state(). 3133 */ 3134 3135 /* 3136 * Update the pi_state of this CPU. This function provides the CPU status for 3137 * the information returned by processor_info(2). 3138 */ 3139 void 3140 cpu_set_state(cpu_t *cpu) 3141 { 3142 ASSERT(MUTEX_HELD(&cpu_lock)); 3143 cpu->cpu_type_info.pi_state = cpu_get_state(cpu); 3144 cpu->cpu_state_begin = gethrestime_sec(); 3145 pool_cpu_mod = gethrtime(); 3146 } 3147 3148 /* 3149 * Return offline/online/other status for the indicated CPU. Use only for 3150 * communication with user applications; cpu_flags provides the in-kernel 3151 * interface. 3152 */ 3153 int 3154 cpu_get_state(cpu_t *cpu) 3155 { 3156 ASSERT(MUTEX_HELD(&cpu_lock)); 3157 if (cpu->cpu_flags & CPU_POWEROFF) 3158 return (P_POWEROFF); 3159 else if (cpu->cpu_flags & CPU_FAULTED) 3160 return (P_FAULTED); 3161 else if (cpu->cpu_flags & CPU_SPARE) 3162 return (P_SPARE); 3163 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY) 3164 return (P_OFFLINE); 3165 else if (cpu->cpu_flags & CPU_ENABLE) 3166 return (P_ONLINE); 3167 else 3168 return (P_NOINTR); 3169 } 3170 3171 /* 3172 * Return processor_info(2) state as a string. 3173 */ 3174 const char * 3175 cpu_get_state_str(cpu_t *cpu) 3176 { 3177 const char *string; 3178 3179 switch (cpu_get_state(cpu)) { 3180 case P_ONLINE: 3181 string = PS_ONLINE; 3182 break; 3183 case P_POWEROFF: 3184 string = PS_POWEROFF; 3185 break; 3186 case P_NOINTR: 3187 string = PS_NOINTR; 3188 break; 3189 case P_SPARE: 3190 string = PS_SPARE; 3191 break; 3192 case P_FAULTED: 3193 string = PS_FAULTED; 3194 break; 3195 case P_OFFLINE: 3196 string = PS_OFFLINE; 3197 break; 3198 default: 3199 string = "unknown"; 3200 break; 3201 } 3202 return (string); 3203 } 3204 3205 /* 3206 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named 3207 * kstats, respectively. This is done when a CPU is initialized or placed 3208 * online via p_online(2). 3209 */ 3210 static void 3211 cpu_stats_kstat_create(cpu_t *cp) 3212 { 3213 int instance = cp->cpu_id; 3214 char *module = "cpu"; 3215 char *class = "misc"; 3216 kstat_t *ksp; 3217 zoneid_t zoneid; 3218 3219 ASSERT(MUTEX_HELD(&cpu_lock)); 3220 3221 if (pool_pset_enabled()) 3222 zoneid = GLOBAL_ZONEID; 3223 else 3224 zoneid = ALL_ZONES; 3225 /* 3226 * Create named kstats 3227 */ 3228 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \ 3229 ksp = kstat_create_zone(module, instance, (name), class, \ 3230 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \ 3231 zoneid); \ 3232 if (ksp != NULL) { \ 3233 ksp->ks_private = cp; \ 3234 ksp->ks_update = (update_func); \ 3235 kstat_install(ksp); \ 3236 } else \ 3237 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \ 3238 module, instance, (name)); 3239 3240 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template), 3241 cpu_sys_stats_ks_update); 3242 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template), 3243 cpu_vm_stats_ks_update); 3244 3245 /* 3246 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat. 3247 */ 3248 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL, 3249 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid); 3250 if (ksp != NULL) { 3251 ksp->ks_update = cpu_stat_ks_update; 3252 ksp->ks_private = cp; 3253 kstat_install(ksp); 3254 } 3255 } 3256 3257 static void 3258 cpu_stats_kstat_destroy(cpu_t *cp) 3259 { 3260 char ks_name[KSTAT_STRLEN]; 3261 3262 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id); 3263 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name); 3264 3265 kstat_delete_byname("cpu", cp->cpu_id, "sys"); 3266 kstat_delete_byname("cpu", cp->cpu_id, "vm"); 3267 } 3268 3269 static int 3270 cpu_sys_stats_ks_update(kstat_t *ksp, int rw) 3271 { 3272 cpu_t *cp = (cpu_t *)ksp->ks_private; 3273 struct cpu_sys_stats_ks_data *csskd; 3274 cpu_sys_stats_t *css; 3275 hrtime_t msnsecs[NCMSTATES]; 3276 int i; 3277 3278 if (rw == KSTAT_WRITE) 3279 return (EACCES); 3280 3281 csskd = ksp->ks_data; 3282 css = &cp->cpu_stats.sys; 3283 3284 /* 3285 * Read CPU mstate, but compare with the last values we 3286 * received to make sure that the returned kstats never 3287 * decrease. 3288 */ 3289 3290 get_cpu_mstate(cp, msnsecs); 3291 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE]) 3292 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64; 3293 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER]) 3294 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64; 3295 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM]) 3296 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64; 3297 3298 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data, 3299 sizeof (cpu_sys_stats_ks_data_template)); 3300 3301 csskd->cpu_ticks_wait.value.ui64 = 0; 3302 csskd->wait_ticks_io.value.ui64 = 0; 3303 3304 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE]; 3305 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER]; 3306 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM]; 3307 csskd->cpu_ticks_idle.value.ui64 = 3308 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64); 3309 csskd->cpu_ticks_user.value.ui64 = 3310 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64); 3311 csskd->cpu_ticks_kernel.value.ui64 = 3312 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64); 3313 csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec; 3314 csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes; 3315 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast; 3316 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload; 3317 csskd->bread.value.ui64 = css->bread; 3318 csskd->bwrite.value.ui64 = css->bwrite; 3319 csskd->lread.value.ui64 = css->lread; 3320 csskd->lwrite.value.ui64 = css->lwrite; 3321 csskd->phread.value.ui64 = css->phread; 3322 csskd->phwrite.value.ui64 = css->phwrite; 3323 csskd->pswitch.value.ui64 = css->pswitch; 3324 csskd->trap.value.ui64 = css->trap; 3325 csskd->intr.value.ui64 = 0; 3326 for (i = 0; i < PIL_MAX; i++) 3327 csskd->intr.value.ui64 += css->intr[i]; 3328 csskd->syscall.value.ui64 = css->syscall; 3329 csskd->sysread.value.ui64 = css->sysread; 3330 csskd->syswrite.value.ui64 = css->syswrite; 3331 csskd->sysfork.value.ui64 = css->sysfork; 3332 csskd->sysvfork.value.ui64 = css->sysvfork; 3333 csskd->sysexec.value.ui64 = css->sysexec; 3334 csskd->readch.value.ui64 = css->readch; 3335 csskd->writech.value.ui64 = css->writech; 3336 csskd->rcvint.value.ui64 = css->rcvint; 3337 csskd->xmtint.value.ui64 = css->xmtint; 3338 csskd->mdmint.value.ui64 = css->mdmint; 3339 csskd->rawch.value.ui64 = css->rawch; 3340 csskd->canch.value.ui64 = css->canch; 3341 csskd->outch.value.ui64 = css->outch; 3342 csskd->msg.value.ui64 = css->msg; 3343 csskd->sema.value.ui64 = css->sema; 3344 csskd->namei.value.ui64 = css->namei; 3345 csskd->ufsiget.value.ui64 = css->ufsiget; 3346 csskd->ufsdirblk.value.ui64 = css->ufsdirblk; 3347 csskd->ufsipage.value.ui64 = css->ufsipage; 3348 csskd->ufsinopage.value.ui64 = css->ufsinopage; 3349 csskd->procovf.value.ui64 = css->procovf; 3350 csskd->intrthread.value.ui64 = 0; 3351 for (i = 0; i < LOCK_LEVEL - 1; i++) 3352 csskd->intrthread.value.ui64 += css->intr[i]; 3353 csskd->intrblk.value.ui64 = css->intrblk; 3354 csskd->intrunpin.value.ui64 = css->intrunpin; 3355 csskd->idlethread.value.ui64 = css->idlethread; 3356 csskd->inv_swtch.value.ui64 = css->inv_swtch; 3357 csskd->nthreads.value.ui64 = css->nthreads; 3358 csskd->cpumigrate.value.ui64 = css->cpumigrate; 3359 csskd->xcalls.value.ui64 = css->xcalls; 3360 csskd->mutex_adenters.value.ui64 = css->mutex_adenters; 3361 csskd->rw_rdfails.value.ui64 = css->rw_rdfails; 3362 csskd->rw_wrfails.value.ui64 = css->rw_wrfails; 3363 csskd->modload.value.ui64 = css->modload; 3364 csskd->modunload.value.ui64 = css->modunload; 3365 csskd->bawrite.value.ui64 = css->bawrite; 3366 csskd->iowait.value.ui64 = css->iowait; 3367 3368 return (0); 3369 } 3370 3371 static int 3372 cpu_vm_stats_ks_update(kstat_t *ksp, int rw) 3373 { 3374 cpu_t *cp = (cpu_t *)ksp->ks_private; 3375 struct cpu_vm_stats_ks_data *cvskd; 3376 cpu_vm_stats_t *cvs; 3377 3378 if (rw == KSTAT_WRITE) 3379 return (EACCES); 3380 3381 cvs = &cp->cpu_stats.vm; 3382 cvskd = ksp->ks_data; 3383 3384 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data, 3385 sizeof (cpu_vm_stats_ks_data_template)); 3386 cvskd->pgrec.value.ui64 = cvs->pgrec; 3387 cvskd->pgfrec.value.ui64 = cvs->pgfrec; 3388 cvskd->pgin.value.ui64 = cvs->pgin; 3389 cvskd->pgpgin.value.ui64 = cvs->pgpgin; 3390 cvskd->pgout.value.ui64 = cvs->pgout; 3391 cvskd->pgpgout.value.ui64 = cvs->pgpgout; 3392 cvskd->swapin.value.ui64 = cvs->swapin; 3393 cvskd->pgswapin.value.ui64 = cvs->pgswapin; 3394 cvskd->swapout.value.ui64 = cvs->swapout; 3395 cvskd->pgswapout.value.ui64 = cvs->pgswapout; 3396 cvskd->zfod.value.ui64 = cvs->zfod; 3397 cvskd->dfree.value.ui64 = cvs->dfree; 3398 cvskd->scan.value.ui64 = cvs->scan; 3399 cvskd->rev.value.ui64 = cvs->rev; 3400 cvskd->hat_fault.value.ui64 = cvs->hat_fault; 3401 cvskd->as_fault.value.ui64 = cvs->as_fault; 3402 cvskd->maj_fault.value.ui64 = cvs->maj_fault; 3403 cvskd->cow_fault.value.ui64 = cvs->cow_fault; 3404 cvskd->prot_fault.value.ui64 = cvs->prot_fault; 3405 cvskd->softlock.value.ui64 = cvs->softlock; 3406 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt; 3407 cvskd->pgrrun.value.ui64 = cvs->pgrrun; 3408 cvskd->execpgin.value.ui64 = cvs->execpgin; 3409 cvskd->execpgout.value.ui64 = cvs->execpgout; 3410 cvskd->execfree.value.ui64 = cvs->execfree; 3411 cvskd->anonpgin.value.ui64 = cvs->anonpgin; 3412 cvskd->anonpgout.value.ui64 = cvs->anonpgout; 3413 cvskd->anonfree.value.ui64 = cvs->anonfree; 3414 cvskd->fspgin.value.ui64 = cvs->fspgin; 3415 cvskd->fspgout.value.ui64 = cvs->fspgout; 3416 cvskd->fsfree.value.ui64 = cvs->fsfree; 3417 3418 return (0); 3419 } 3420 3421 static int 3422 cpu_stat_ks_update(kstat_t *ksp, int rw) 3423 { 3424 cpu_stat_t *cso; 3425 cpu_t *cp; 3426 int i; 3427 hrtime_t msnsecs[NCMSTATES]; 3428 3429 cso = (cpu_stat_t *)ksp->ks_data; 3430 cp = (cpu_t *)ksp->ks_private; 3431 3432 if (rw == KSTAT_WRITE) 3433 return (EACCES); 3434 3435 /* 3436 * Read CPU mstate, but compare with the last values we 3437 * received to make sure that the returned kstats never 3438 * decrease. 3439 */ 3440 3441 get_cpu_mstate(cp, msnsecs); 3442 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]); 3443 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]); 3444 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]); 3445 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE]) 3446 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE]; 3447 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER]) 3448 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; 3449 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) 3450 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; 3451 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; 3452 cso->cpu_sysinfo.wait[W_IO] = 0; 3453 cso->cpu_sysinfo.wait[W_SWAP] = 0; 3454 cso->cpu_sysinfo.wait[W_PIO] = 0; 3455 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); 3456 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); 3457 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); 3458 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); 3459 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); 3460 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); 3461 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); 3462 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); 3463 cso->cpu_sysinfo.intr = 0; 3464 for (i = 0; i < PIL_MAX; i++) 3465 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); 3466 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall); 3467 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread); 3468 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite); 3469 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork); 3470 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork); 3471 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec); 3472 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch); 3473 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech); 3474 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint); 3475 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint); 3476 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint); 3477 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch); 3478 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch); 3479 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch); 3480 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg); 3481 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema); 3482 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei); 3483 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget); 3484 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk); 3485 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage); 3486 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage); 3487 cso->cpu_sysinfo.inodeovf = 0; 3488 cso->cpu_sysinfo.fileovf = 0; 3489 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf); 3490 cso->cpu_sysinfo.intrthread = 0; 3491 for (i = 0; i < LOCK_LEVEL - 1; i++) 3492 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]); 3493 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk); 3494 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread); 3495 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch); 3496 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads); 3497 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate); 3498 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls); 3499 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters); 3500 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails); 3501 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails); 3502 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload); 3503 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload); 3504 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite); 3505 cso->cpu_sysinfo.rw_enters = 0; 3506 cso->cpu_sysinfo.win_uo_cnt = 0; 3507 cso->cpu_sysinfo.win_uu_cnt = 0; 3508 cso->cpu_sysinfo.win_so_cnt = 0; 3509 cso->cpu_sysinfo.win_su_cnt = 0; 3510 cso->cpu_sysinfo.win_suo_cnt = 0; 3511 3512 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait); 3513 cso->cpu_syswait.swap = 0; 3514 cso->cpu_syswait.physio = 0; 3515 3516 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec); 3517 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec); 3518 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin); 3519 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin); 3520 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout); 3521 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout); 3522 cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin); 3523 cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin); 3524 cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout); 3525 cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout); 3526 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod); 3527 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree); 3528 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan); 3529 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev); 3530 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault); 3531 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault); 3532 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault); 3533 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault); 3534 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault); 3535 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock); 3536 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt); 3537 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun); 3538 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin); 3539 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout); 3540 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree); 3541 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin); 3542 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout); 3543 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree); 3544 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin); 3545 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout); 3546 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree); 3547 3548 return (0); 3549 } 3550