17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5100b72f4Sandrei * Common Development and Distribution License (the "License"). 6100b72f4Sandrei * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21ae115bc7Smrj 227c478bd9Sstevel@tonic-gate /* 23fb2f18f8Sesaxe * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/types.h> 307c478bd9Sstevel@tonic-gate #include <sys/thread.h> 317c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 327c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 337c478bd9Sstevel@tonic-gate #include <sys/param.h> 347c478bd9Sstevel@tonic-gate #include <sys/proc.h> 357c478bd9Sstevel@tonic-gate #include <sys/disp.h> 367c478bd9Sstevel@tonic-gate #include <sys/class.h> 377c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 387c478bd9Sstevel@tonic-gate #include <sys/debug.h> 397c478bd9Sstevel@tonic-gate #include <sys/asm_linkage.h> 407c478bd9Sstevel@tonic-gate #include <sys/x_call.h> 417c478bd9Sstevel@tonic-gate #include <sys/systm.h> 427c478bd9Sstevel@tonic-gate #include <sys/var.h> 437c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 447c478bd9Sstevel@tonic-gate #include <vm/hat.h> 457c478bd9Sstevel@tonic-gate #include <vm/as.h> 467c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 47ae115bc7Smrj #include <vm/seg_kp.h> 487c478bd9Sstevel@tonic-gate #include <sys/segments.h> 497c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 507c478bd9Sstevel@tonic-gate #include <sys/stack.h> 517c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 527c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 537c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 547c478bd9Sstevel@tonic-gate #include <sys/traptrace.h> 557c478bd9Sstevel@tonic-gate #include <sys/clock.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 57fb2f18f8Sesaxe #include <sys/pg.h> 58fb2f18f8Sesaxe #include <sys/cmt.h> 597c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 607c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 617c478bd9Sstevel@tonic-gate #include <sys/fp.h> 627c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 63ae115bc7Smrj #include <sys/kdi_machimpl.h> 647c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 657c478bd9Sstevel@tonic-gate #include <sys/memnode.h> 66ef50d8c0Sesaxe #include <sys/pci_cfgspace.h> 67ae115bc7Smrj #include <sys/mach_mmu.h> 68ae115bc7Smrj #include <sys/sysmacros.h> 697aec1d6eScindi #include <sys/cpu_module.h> 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate struct cpu cpus[1]; /* CPU data */ 727c478bd9Sstevel@tonic-gate struct cpu *cpu[NCPU] = {&cpus[0]}; /* pointers to all CPUs */ 737c478bd9Sstevel@tonic-gate cpu_core_t cpu_core[NCPU]; /* cpu_core structures */ 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate /* 76ae115bc7Smrj * Useful for disabling MP bring-up on a MP capable system. 777c478bd9Sstevel@tonic-gate */ 787c478bd9Sstevel@tonic-gate int use_mp = 1; 797c478bd9Sstevel@tonic-gate 8041791439Sandrei /* 81ae115bc7Smrj * to be set by a PSM to indicate what cpus 82ae115bc7Smrj * are sitting around on the system. 8341791439Sandrei */ 84ae115bc7Smrj cpuset_t mp_cpus; 857c478bd9Sstevel@tonic-gate 867c478bd9Sstevel@tonic-gate /* 877c478bd9Sstevel@tonic-gate * This variable is used by the hat layer to decide whether or not 887c478bd9Sstevel@tonic-gate * critical sections are needed to prevent race conditions. For sun4m, 897c478bd9Sstevel@tonic-gate * this variable is set once enough MP initialization has been done in 907c478bd9Sstevel@tonic-gate * order to allow cross calls. 917c478bd9Sstevel@tonic-gate */ 92ae115bc7Smrj int flushes_require_xcalls; 93ae115bc7Smrj cpuset_t cpu_ready_set = 1; 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate static void mp_startup(void); 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate static void cpu_sep_enable(void); 987c478bd9Sstevel@tonic-gate static void cpu_sep_disable(void); 997c478bd9Sstevel@tonic-gate static void cpu_asysc_enable(void); 1007c478bd9Sstevel@tonic-gate static void cpu_asysc_disable(void); 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate extern int tsc_gethrtime_enable; 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate /* 1057c478bd9Sstevel@tonic-gate * Init CPU info - get CPU type info for processor_info system call. 1067c478bd9Sstevel@tonic-gate */ 1077c478bd9Sstevel@tonic-gate void 1087c478bd9Sstevel@tonic-gate init_cpu_info(struct cpu *cp) 1097c478bd9Sstevel@tonic-gate { 1107c478bd9Sstevel@tonic-gate processor_info_t *pi = &cp->cpu_type_info; 1117c478bd9Sstevel@tonic-gate char buf[CPU_IDSTRLEN]; 1127c478bd9Sstevel@tonic-gate 1137c478bd9Sstevel@tonic-gate /* 1147c478bd9Sstevel@tonic-gate * Get clock-frequency property for the CPU. 1157c478bd9Sstevel@tonic-gate */ 1167c478bd9Sstevel@tonic-gate pi->pi_clock = cpu_freq; 1177c478bd9Sstevel@tonic-gate 1185cff7825Smh /* 1195cff7825Smh * Current frequency in Hz. 1205cff7825Smh */ 121*cf74e62bSmh cp->cpu_curr_clock = cpu_freq_hz; 1225cff7825Smh 1237c478bd9Sstevel@tonic-gate (void) strcpy(pi->pi_processor_type, "i386"); 1247c478bd9Sstevel@tonic-gate if (fpu_exists) 1257c478bd9Sstevel@tonic-gate (void) strcpy(pi->pi_fputypes, "i387 compatible"); 1267c478bd9Sstevel@tonic-gate 1277c478bd9Sstevel@tonic-gate (void) cpuid_getidstr(cp, buf, sizeof (buf)); 1287c478bd9Sstevel@tonic-gate 1297c478bd9Sstevel@tonic-gate cp->cpu_idstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 1307c478bd9Sstevel@tonic-gate (void) strcpy(cp->cpu_idstr, buf); 1317c478bd9Sstevel@tonic-gate 1327c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr); 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate (void) cpuid_getbrandstr(cp, buf, sizeof (buf)); 1357c478bd9Sstevel@tonic-gate cp->cpu_brandstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 1367c478bd9Sstevel@tonic-gate (void) strcpy(cp->cpu_brandstr, buf); 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr); 1397c478bd9Sstevel@tonic-gate } 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate /* 1427c478bd9Sstevel@tonic-gate * Configure syscall support on this CPU. 1437c478bd9Sstevel@tonic-gate */ 1447c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1457c478bd9Sstevel@tonic-gate static void 1467c478bd9Sstevel@tonic-gate init_cpu_syscall(struct cpu *cp) 1477c478bd9Sstevel@tonic-gate { 1487c478bd9Sstevel@tonic-gate kpreempt_disable(); 1497c478bd9Sstevel@tonic-gate 1507c478bd9Sstevel@tonic-gate #if defined(__amd64) 151ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC)) { 1527c478bd9Sstevel@tonic-gate 1537c478bd9Sstevel@tonic-gate #if !defined(__lint) 1547c478bd9Sstevel@tonic-gate /* 1557c478bd9Sstevel@tonic-gate * The syscall instruction imposes a certain ordering on 1567c478bd9Sstevel@tonic-gate * segment selectors, so we double-check that ordering 1577c478bd9Sstevel@tonic-gate * here. 1587c478bd9Sstevel@tonic-gate */ 1597c478bd9Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 1607c478bd9Sstevel@tonic-gate ASSERT(UDS_SEL == U32CS_SEL + 8); 1617c478bd9Sstevel@tonic-gate ASSERT(UCS_SEL == U32CS_SEL + 16); 1627c478bd9Sstevel@tonic-gate #endif 1637c478bd9Sstevel@tonic-gate /* 1647c478bd9Sstevel@tonic-gate * Turn syscall/sysret extensions on. 1657c478bd9Sstevel@tonic-gate */ 1667c478bd9Sstevel@tonic-gate cpu_asysc_enable(); 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate /* 1697c478bd9Sstevel@tonic-gate * Program the magic registers .. 1707c478bd9Sstevel@tonic-gate */ 171ae115bc7Smrj wrmsr(MSR_AMD_STAR, 172ae115bc7Smrj ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32); 1730ac7d7d8Skucharsk wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall); 1740ac7d7d8Skucharsk wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32); 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate /* 1777c478bd9Sstevel@tonic-gate * This list of flags is masked off the incoming 1787c478bd9Sstevel@tonic-gate * %rfl when we enter the kernel. 1797c478bd9Sstevel@tonic-gate */ 1800ac7d7d8Skucharsk wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T)); 1817c478bd9Sstevel@tonic-gate } 1827c478bd9Sstevel@tonic-gate #endif 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate /* 1857c478bd9Sstevel@tonic-gate * On 32-bit kernels, we use sysenter/sysexit because it's too 1867c478bd9Sstevel@tonic-gate * hard to use syscall/sysret, and it is more portable anyway. 1877c478bd9Sstevel@tonic-gate * 1887c478bd9Sstevel@tonic-gate * On 64-bit kernels on Nocona machines, the 32-bit syscall 1897c478bd9Sstevel@tonic-gate * variant isn't available to 32-bit applications, but sysenter is. 1907c478bd9Sstevel@tonic-gate */ 191ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP)) { 1927c478bd9Sstevel@tonic-gate 1937c478bd9Sstevel@tonic-gate #if !defined(__lint) 1947c478bd9Sstevel@tonic-gate /* 1957c478bd9Sstevel@tonic-gate * The sysenter instruction imposes a certain ordering on 1967c478bd9Sstevel@tonic-gate * segment selectors, so we double-check that ordering 1977c478bd9Sstevel@tonic-gate * here. See "sysenter" in Intel document 245471-012, "IA-32 1987c478bd9Sstevel@tonic-gate * Intel Architecture Software Developer's Manual Volume 2: 1997c478bd9Sstevel@tonic-gate * Instruction Set Reference" 2007c478bd9Sstevel@tonic-gate */ 2017c478bd9Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3)); 2047c478bd9Sstevel@tonic-gate ASSERT32(UDS_SEL == UCS_SEL + 8); 2057c478bd9Sstevel@tonic-gate 2067c478bd9Sstevel@tonic-gate ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3)); 2077c478bd9Sstevel@tonic-gate ASSERT64(UDS_SEL == U32CS_SEL + 8); 2087c478bd9Sstevel@tonic-gate #endif 2097c478bd9Sstevel@tonic-gate 2107c478bd9Sstevel@tonic-gate cpu_sep_enable(); 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate /* 2137c478bd9Sstevel@tonic-gate * resume() sets this value to the base of the threads stack 2147c478bd9Sstevel@tonic-gate * via a context handler. 2157c478bd9Sstevel@tonic-gate */ 216ae115bc7Smrj wrmsr(MSR_INTC_SEP_ESP, 0); 2170ac7d7d8Skucharsk wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter); 2187c478bd9Sstevel@tonic-gate } 2197c478bd9Sstevel@tonic-gate 2207c478bd9Sstevel@tonic-gate kpreempt_enable(); 2217c478bd9Sstevel@tonic-gate } 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate /* 2247c478bd9Sstevel@tonic-gate * Multiprocessor initialization. 2257c478bd9Sstevel@tonic-gate * 2267c478bd9Sstevel@tonic-gate * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the 2277c478bd9Sstevel@tonic-gate * startup and idle threads for the specified CPU. 2287c478bd9Sstevel@tonic-gate */ 229ae115bc7Smrj struct cpu * 2307c478bd9Sstevel@tonic-gate mp_startup_init(int cpun) 2317c478bd9Sstevel@tonic-gate { 2327c478bd9Sstevel@tonic-gate struct cpu *cp; 2337c478bd9Sstevel@tonic-gate kthread_id_t tp; 2347c478bd9Sstevel@tonic-gate caddr_t sp; 2357c478bd9Sstevel@tonic-gate proc_t *procp; 2367c478bd9Sstevel@tonic-gate extern void idle(); 2377c478bd9Sstevel@tonic-gate 2387c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 2397c478bd9Sstevel@tonic-gate trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun]; 2407c478bd9Sstevel@tonic-gate #endif 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate ASSERT(cpun < NCPU && cpu[cpun] == NULL); 2437c478bd9Sstevel@tonic-gate 244ae115bc7Smrj cp = kmem_zalloc(sizeof (*cp), KM_SLEEP); 245f98fbcecSbholler if (x86_feature & X86_MWAIT) 246f98fbcecSbholler cp->cpu_m.mcpu_mwait = mach_alloc_mwait(CPU); 247f98fbcecSbholler 2487c478bd9Sstevel@tonic-gate procp = curthread->t_procp; 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Initialize the dispatcher first. 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate disp_cpu_init(cp); 2557c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2567c478bd9Sstevel@tonic-gate 257affbd3ccSkchow cpu_vm_data_init(cp); 258affbd3ccSkchow 2597c478bd9Sstevel@tonic-gate /* 2607c478bd9Sstevel@tonic-gate * Allocate and initialize the startup thread for this CPU. 2617c478bd9Sstevel@tonic-gate * Interrupt and process switch stacks get allocated later 2627c478bd9Sstevel@tonic-gate * when the CPU starts running. 2637c478bd9Sstevel@tonic-gate */ 2647c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, NULL, NULL, 0, procp, 2657c478bd9Sstevel@tonic-gate TS_STOPPED, maxclsyspri); 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate /* 2687c478bd9Sstevel@tonic-gate * Set state to TS_ONPROC since this thread will start running 2697c478bd9Sstevel@tonic-gate * as soon as the CPU comes online. 2707c478bd9Sstevel@tonic-gate * 2717c478bd9Sstevel@tonic-gate * All the other fields of the thread structure are setup by 2727c478bd9Sstevel@tonic-gate * thread_create(). 2737c478bd9Sstevel@tonic-gate */ 2747c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cp); 2757c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 2767c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 2777c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 2787c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 2797c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate /* 2827c478bd9Sstevel@tonic-gate * Setup thread to start in mp_startup. 2837c478bd9Sstevel@tonic-gate */ 2847c478bd9Sstevel@tonic-gate sp = tp->t_stk; 2857c478bd9Sstevel@tonic-gate tp->t_pc = (uintptr_t)mp_startup; 2867c478bd9Sstevel@tonic-gate tp->t_sp = (uintptr_t)(sp - MINFRAME); 287ae115bc7Smrj #if defined(__amd64) 288ae115bc7Smrj tp->t_sp -= STACK_ENTRY_ALIGN; /* fake a call */ 289ae115bc7Smrj #endif 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate cp->cpu_id = cpun; 2927c478bd9Sstevel@tonic-gate cp->cpu_self = cp; 2937c478bd9Sstevel@tonic-gate cp->cpu_thread = tp; 2947c478bd9Sstevel@tonic-gate cp->cpu_lwp = NULL; 2957c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; 2967c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(tp); 2977c478bd9Sstevel@tonic-gate 298da43ceabSsethg /* 299da43ceabSsethg * cpu_base_spl must be set explicitly here to prevent any blocking 300da43ceabSsethg * operations in mp_startup from causing the spl of the cpu to drop 301da43ceabSsethg * to 0 (allowing device interrupts before we're ready) in resume(). 302da43ceabSsethg * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY. 303da43ceabSsethg * As an extra bit of security on DEBUG kernels, this is enforced with 304da43ceabSsethg * an assertion in mp_startup() -- before cpu_base_spl is set to its 305da43ceabSsethg * proper value. 306da43ceabSsethg */ 307da43ceabSsethg cp->cpu_base_spl = ipltospl(LOCK_LEVEL); 308da43ceabSsethg 3097c478bd9Sstevel@tonic-gate /* 3107c478bd9Sstevel@tonic-gate * Now, initialize per-CPU idle thread for this CPU. 3117c478bd9Sstevel@tonic-gate */ 3127c478bd9Sstevel@tonic-gate tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1); 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate cp->cpu_idle_thread = tp; 3157c478bd9Sstevel@tonic-gate 3167c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 3177c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 3187c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 3197c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 3207c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 3217c478bd9Sstevel@tonic-gate 322394b433dSesaxe /* 323fb2f18f8Sesaxe * Bootstrap the CPU's PG data 324394b433dSesaxe */ 325fb2f18f8Sesaxe pg_cpu_bootstrap(cp); 326394b433dSesaxe 3277c478bd9Sstevel@tonic-gate /* 328ae115bc7Smrj * Perform CPC initialization on the new CPU. 3297c478bd9Sstevel@tonic-gate */ 3307c478bd9Sstevel@tonic-gate kcpc_hw_init(cp); 3317c478bd9Sstevel@tonic-gate 3327c478bd9Sstevel@tonic-gate /* 3337c478bd9Sstevel@tonic-gate * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2 3347c478bd9Sstevel@tonic-gate * for each CPU. 3357c478bd9Sstevel@tonic-gate */ 3367c478bd9Sstevel@tonic-gate setup_vaddr_for_ppcopy(cp); 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate /* 339ae115bc7Smrj * Allocate page for new GDT and initialize from current GDT. 3407c478bd9Sstevel@tonic-gate */ 341ae115bc7Smrj #if !defined(__lint) 342ae115bc7Smrj ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE); 343ae115bc7Smrj #endif 344ae115bc7Smrj cp->cpu_m.mcpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP); 345ae115bc7Smrj bcopy(CPU->cpu_m.mcpu_gdt, cp->cpu_m.mcpu_gdt, 346ae115bc7Smrj (sizeof (*cp->cpu_m.mcpu_gdt) * NGDT)); 3477c478bd9Sstevel@tonic-gate 348ae115bc7Smrj #if defined(__i386) 3497c478bd9Sstevel@tonic-gate /* 3507c478bd9Sstevel@tonic-gate * setup kernel %gs. 3517c478bd9Sstevel@tonic-gate */ 3527c478bd9Sstevel@tonic-gate set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA, 3537c478bd9Sstevel@tonic-gate SEL_KPL, 0, 1); 354ae115bc7Smrj #endif 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate /* 3577c478bd9Sstevel@tonic-gate * If we have more than one node, each cpu gets a copy of IDT 3587c478bd9Sstevel@tonic-gate * local to its node. If this is a Pentium box, we use cpu 0's 3597c478bd9Sstevel@tonic-gate * IDT. cpu 0's IDT has been made read-only to workaround the 3607c478bd9Sstevel@tonic-gate * cmpxchgl register bug 3617c478bd9Sstevel@tonic-gate */ 3627c478bd9Sstevel@tonic-gate if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) { 363ae115bc7Smrj struct machcpu *mcpu = &cp->cpu_m; 364ae115bc7Smrj 365ae115bc7Smrj mcpu->mcpu_idt = kmem_alloc(sizeof (idt0), KM_SLEEP); 366ae115bc7Smrj bcopy(idt0, mcpu->mcpu_idt, sizeof (idt0)); 367ae115bc7Smrj } else { 368ae115bc7Smrj cp->cpu_m.mcpu_idt = CPU->cpu_m.mcpu_idt; 3697c478bd9Sstevel@tonic-gate } 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate /* 372ae115bc7Smrj * Get interrupt priority data from cpu 0. 3737c478bd9Sstevel@tonic-gate */ 3747c478bd9Sstevel@tonic-gate cp->cpu_pri_data = CPU->cpu_pri_data; 3757c478bd9Sstevel@tonic-gate 3767c478bd9Sstevel@tonic-gate /* 377ae115bc7Smrj * alloc space for cpuid info 3787c478bd9Sstevel@tonic-gate */ 379ae115bc7Smrj cpuid_alloc_space(cp); 3807c478bd9Sstevel@tonic-gate 3812449e17fSsherrym /* 3822449e17fSsherrym * alloc space for ucode_info 3832449e17fSsherrym */ 3842449e17fSsherrym ucode_alloc_space(cp); 3852449e17fSsherrym 386ae115bc7Smrj hat_cpu_online(cp); 3877c478bd9Sstevel@tonic-gate 3887c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 3897c478bd9Sstevel@tonic-gate /* 390ae115bc7Smrj * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers 3917c478bd9Sstevel@tonic-gate */ 3927c478bd9Sstevel@tonic-gate ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP); 3937c478bd9Sstevel@tonic-gate ttc->ttc_next = ttc->ttc_first; 3947c478bd9Sstevel@tonic-gate ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize; 3957c478bd9Sstevel@tonic-gate #endif 3967c478bd9Sstevel@tonic-gate /* 3977c478bd9Sstevel@tonic-gate * Record that we have another CPU. 3987c478bd9Sstevel@tonic-gate */ 3997c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 4007c478bd9Sstevel@tonic-gate /* 4017c478bd9Sstevel@tonic-gate * Initialize the interrupt threads for this CPU 4027c478bd9Sstevel@tonic-gate */ 403100b72f4Sandrei cpu_intr_alloc(cp, NINTR_THREADS); 4047c478bd9Sstevel@tonic-gate /* 4057c478bd9Sstevel@tonic-gate * Add CPU to list of available CPUs. It'll be on the active list 4067c478bd9Sstevel@tonic-gate * after mp_startup(). 4077c478bd9Sstevel@tonic-gate */ 4087c478bd9Sstevel@tonic-gate cpu_add_unit(cp); 4097c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 410ae115bc7Smrj 411ae115bc7Smrj return (cp); 412ae115bc7Smrj } 413ae115bc7Smrj 414ae115bc7Smrj /* 415ae115bc7Smrj * Undo what was done in mp_startup_init 416ae115bc7Smrj */ 417ae115bc7Smrj static void 418ae115bc7Smrj mp_startup_fini(struct cpu *cp, int error) 419ae115bc7Smrj { 420ae115bc7Smrj mutex_enter(&cpu_lock); 421ae115bc7Smrj 422ae115bc7Smrj /* 423ae115bc7Smrj * Remove the CPU from the list of available CPUs. 424ae115bc7Smrj */ 425ae115bc7Smrj cpu_del_unit(cp->cpu_id); 426ae115bc7Smrj 427ae115bc7Smrj if (error == ETIMEDOUT) { 428ae115bc7Smrj /* 429ae115bc7Smrj * The cpu was started, but never *seemed* to run any 430ae115bc7Smrj * code in the kernel; it's probably off spinning in its 431ae115bc7Smrj * own private world, though with potential references to 432ae115bc7Smrj * our kmem-allocated IDTs and GDTs (for example). 433ae115bc7Smrj * 434ae115bc7Smrj * Worse still, it may actually wake up some time later, 435ae115bc7Smrj * so rather than guess what it might or might not do, we 436ae115bc7Smrj * leave the fundamental data structures intact. 437ae115bc7Smrj */ 438ae115bc7Smrj cp->cpu_flags = 0; 439ae115bc7Smrj mutex_exit(&cpu_lock); 440ae115bc7Smrj return; 441ae115bc7Smrj } 442ae115bc7Smrj 443ae115bc7Smrj /* 444ae115bc7Smrj * At this point, the only threads bound to this CPU should 445ae115bc7Smrj * special per-cpu threads: it's idle thread, it's pause threads, 446ae115bc7Smrj * and it's interrupt threads. Clean these up. 447ae115bc7Smrj */ 448ae115bc7Smrj cpu_destroy_bound_threads(cp); 449ae115bc7Smrj cp->cpu_idle_thread = NULL; 450ae115bc7Smrj 451ae115bc7Smrj /* 452ae115bc7Smrj * Free the interrupt stack. 453ae115bc7Smrj */ 454ae115bc7Smrj segkp_release(segkp, 455ae115bc7Smrj cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME))); 456ae115bc7Smrj 457ae115bc7Smrj mutex_exit(&cpu_lock); 458ae115bc7Smrj 459ae115bc7Smrj #ifdef TRAPTRACE 460ae115bc7Smrj /* 461ae115bc7Smrj * Discard the trap trace buffer 462ae115bc7Smrj */ 463ae115bc7Smrj { 464ae115bc7Smrj trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id]; 465ae115bc7Smrj 466ae115bc7Smrj kmem_free((void *)ttc->ttc_first, trap_trace_bufsize); 467ae115bc7Smrj ttc->ttc_first = NULL; 468ae115bc7Smrj } 469ae115bc7Smrj #endif 470ae115bc7Smrj 471ae115bc7Smrj hat_cpu_offline(cp); 472ae115bc7Smrj 473ae115bc7Smrj cpuid_free_space(cp); 474ae115bc7Smrj 4752449e17fSsherrym ucode_free_space(cp); 4762449e17fSsherrym 477ae115bc7Smrj if (cp->cpu_m.mcpu_idt != CPU->cpu_m.mcpu_idt) 478ae115bc7Smrj kmem_free(cp->cpu_m.mcpu_idt, sizeof (idt0)); 479ae115bc7Smrj cp->cpu_m.mcpu_idt = NULL; 480ae115bc7Smrj 481ae115bc7Smrj kmem_free(cp->cpu_m.mcpu_gdt, PAGESIZE); 482ae115bc7Smrj cp->cpu_m.mcpu_gdt = NULL; 483ae115bc7Smrj 484ae115bc7Smrj teardown_vaddr_for_ppcopy(cp); 485ae115bc7Smrj 486ae115bc7Smrj kcpc_hw_fini(cp); 487ae115bc7Smrj 488ae115bc7Smrj cp->cpu_dispthread = NULL; 489ae115bc7Smrj cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */ 490ae115bc7Smrj 491ae115bc7Smrj cpu_vm_data_destroy(cp); 492ae115bc7Smrj 493ae115bc7Smrj mutex_enter(&cpu_lock); 494ae115bc7Smrj disp_cpu_fini(cp); 495ae115bc7Smrj mutex_exit(&cpu_lock); 496ae115bc7Smrj 497ae115bc7Smrj kmem_free(cp, sizeof (*cp)); 4987c478bd9Sstevel@tonic-gate } 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* 5017c478bd9Sstevel@tonic-gate * Apply workarounds for known errata, and warn about those that are absent. 5027c478bd9Sstevel@tonic-gate * 5037c478bd9Sstevel@tonic-gate * System vendors occasionally create configurations which contain different 5047c478bd9Sstevel@tonic-gate * revisions of the CPUs that are almost but not exactly the same. At the 5057c478bd9Sstevel@tonic-gate * time of writing, this meant that their clock rates were the same, their 5067c478bd9Sstevel@tonic-gate * feature sets were the same, but the required workaround were -not- 5077c478bd9Sstevel@tonic-gate * necessarily the same. So, this routine is invoked on -every- CPU soon 5087c478bd9Sstevel@tonic-gate * after starting to make sure that the resulting system contains the most 5097c478bd9Sstevel@tonic-gate * pessimal set of workarounds needed to cope with *any* of the CPUs in the 5107c478bd9Sstevel@tonic-gate * system. 5117c478bd9Sstevel@tonic-gate * 512ef50d8c0Sesaxe * workaround_errata is invoked early in mlsetup() for CPU 0, and in 513ef50d8c0Sesaxe * mp_startup() for all slave CPUs. Slaves process workaround_errata prior 514ef50d8c0Sesaxe * to acknowledging their readiness to the master, so this routine will 515ef50d8c0Sesaxe * never be executed by multiple CPUs in parallel, thus making updates to 516ef50d8c0Sesaxe * global data safe. 517ef50d8c0Sesaxe * 5182201b277Skucharsk * These workarounds are based on Rev 3.57 of the Revision Guide for 5192201b277Skucharsk * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005. 5207c478bd9Sstevel@tonic-gate */ 5217c478bd9Sstevel@tonic-gate 522ae115bc7Smrj #if defined(OPTERON_ERRATUM_88) 523ae115bc7Smrj int opteron_erratum_88; /* if non-zero -> at least one cpu has it */ 524ae115bc7Smrj #endif 525ae115bc7Smrj 5267c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 5277c478bd9Sstevel@tonic-gate int opteron_erratum_91; /* if non-zero -> at least one cpu has it */ 5287c478bd9Sstevel@tonic-gate #endif 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 5317c478bd9Sstevel@tonic-gate int opteron_erratum_93; /* if non-zero -> at least one cpu has it */ 5327c478bd9Sstevel@tonic-gate #endif 5337c478bd9Sstevel@tonic-gate 534ae115bc7Smrj #if defined(OPTERON_ERRATUM_95) 535ae115bc7Smrj int opteron_erratum_95; /* if non-zero -> at least one cpu has it */ 536ae115bc7Smrj #endif 537ae115bc7Smrj 5387c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 5397c478bd9Sstevel@tonic-gate int opteron_erratum_100; /* if non-zero -> at least one cpu has it */ 5407c478bd9Sstevel@tonic-gate #endif 5417c478bd9Sstevel@tonic-gate 542ae115bc7Smrj #if defined(OPTERON_ERRATUM_108) 543ae115bc7Smrj int opteron_erratum_108; /* if non-zero -> at least one cpu has it */ 544ae115bc7Smrj #endif 545ae115bc7Smrj 5467c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 5477c478bd9Sstevel@tonic-gate int opteron_erratum_109; /* if non-zero -> at least one cpu has it */ 5487c478bd9Sstevel@tonic-gate #endif 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 5517c478bd9Sstevel@tonic-gate int opteron_erratum_121; /* if non-zero -> at least one cpu has it */ 5527c478bd9Sstevel@tonic-gate #endif 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 5557c478bd9Sstevel@tonic-gate int opteron_erratum_122; /* if non-zero -> at least one cpu has it */ 5567c478bd9Sstevel@tonic-gate #endif 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 5597c478bd9Sstevel@tonic-gate int opteron_erratum_123; /* if non-zero -> at least one cpu has it */ 5607c478bd9Sstevel@tonic-gate #endif 5617c478bd9Sstevel@tonic-gate 5622201b277Skucharsk #if defined(OPTERON_ERRATUM_131) 5632201b277Skucharsk int opteron_erratum_131; /* if non-zero -> at least one cpu has it */ 5642201b277Skucharsk #endif 5657c478bd9Sstevel@tonic-gate 566ef50d8c0Sesaxe #if defined(OPTERON_WORKAROUND_6336786) 567ef50d8c0Sesaxe int opteron_workaround_6336786; /* non-zero -> WA relevant and applied */ 568ef50d8c0Sesaxe int opteron_workaround_6336786_UP = 0; /* Not needed for UP */ 569ef50d8c0Sesaxe #endif 570ef50d8c0Sesaxe 571ee88d2b9Skchow #if defined(OPTERON_WORKAROUND_6323525) 572ee88d2b9Skchow int opteron_workaround_6323525; /* if non-zero -> at least one cpu has it */ 573ee88d2b9Skchow #endif 574ee88d2b9Skchow 575ae115bc7Smrj static void 576ae115bc7Smrj workaround_warning(cpu_t *cp, uint_t erratum) 577ae115bc7Smrj { 578ae115bc7Smrj cmn_err(CE_WARN, "cpu%d: no workaround for erratum %u", 579ae115bc7Smrj cp->cpu_id, erratum); 580ae115bc7Smrj } 581ae115bc7Smrj 582ae115bc7Smrj static void 583ae115bc7Smrj workaround_applied(uint_t erratum) 584ae115bc7Smrj { 585ae115bc7Smrj if (erratum > 1000000) 586ae115bc7Smrj cmn_err(CE_CONT, "?workaround applied for cpu issue #%d\n", 587ae115bc7Smrj erratum); 588ae115bc7Smrj else 589ae115bc7Smrj cmn_err(CE_CONT, "?workaround applied for cpu erratum #%d\n", 590ae115bc7Smrj erratum); 591ae115bc7Smrj } 592ae115bc7Smrj 593ae115bc7Smrj static void 594ae115bc7Smrj msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error) 595ae115bc7Smrj { 596ae115bc7Smrj cmn_err(CE_WARN, "cpu%d: couldn't %smsr 0x%x, error %d", 597ae115bc7Smrj cp->cpu_id, rw, msr, error); 598ae115bc7Smrj } 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate uint_t 6017c478bd9Sstevel@tonic-gate workaround_errata(struct cpu *cpu) 6027c478bd9Sstevel@tonic-gate { 6037c478bd9Sstevel@tonic-gate uint_t missing = 0; 6047c478bd9Sstevel@tonic-gate 6057c478bd9Sstevel@tonic-gate ASSERT(cpu == CPU); 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate /*LINTED*/ 6087c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 88) > 0) { 6097c478bd9Sstevel@tonic-gate /* 6107c478bd9Sstevel@tonic-gate * SWAPGS May Fail To Read Correct GS Base 6117c478bd9Sstevel@tonic-gate */ 6127c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_88) 6137c478bd9Sstevel@tonic-gate /* 6147c478bd9Sstevel@tonic-gate * The workaround is an mfence in the relevant assembler code 6157c478bd9Sstevel@tonic-gate */ 616ae115bc7Smrj opteron_erratum_88++; 6177c478bd9Sstevel@tonic-gate #else 618ae115bc7Smrj workaround_warning(cpu, 88); 6197c478bd9Sstevel@tonic-gate missing++; 6207c478bd9Sstevel@tonic-gate #endif 6217c478bd9Sstevel@tonic-gate } 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 91) > 0) { 6247c478bd9Sstevel@tonic-gate /* 6257c478bd9Sstevel@tonic-gate * Software Prefetches May Report A Page Fault 6267c478bd9Sstevel@tonic-gate */ 6277c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 6287c478bd9Sstevel@tonic-gate /* 6297c478bd9Sstevel@tonic-gate * fix is in trap.c 6307c478bd9Sstevel@tonic-gate */ 6317c478bd9Sstevel@tonic-gate opteron_erratum_91++; 6327c478bd9Sstevel@tonic-gate #else 633ae115bc7Smrj workaround_warning(cpu, 91); 6347c478bd9Sstevel@tonic-gate missing++; 6357c478bd9Sstevel@tonic-gate #endif 6367c478bd9Sstevel@tonic-gate } 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 93) > 0) { 6397c478bd9Sstevel@tonic-gate /* 6407c478bd9Sstevel@tonic-gate * RSM Auto-Halt Restart Returns to Incorrect RIP 6417c478bd9Sstevel@tonic-gate */ 6427c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 6437c478bd9Sstevel@tonic-gate /* 6447c478bd9Sstevel@tonic-gate * fix is in trap.c 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate opteron_erratum_93++; 6477c478bd9Sstevel@tonic-gate #else 648ae115bc7Smrj workaround_warning(cpu, 93); 6497c478bd9Sstevel@tonic-gate missing++; 6507c478bd9Sstevel@tonic-gate #endif 6517c478bd9Sstevel@tonic-gate } 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate /*LINTED*/ 6547c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 95) > 0) { 6557c478bd9Sstevel@tonic-gate /* 6567c478bd9Sstevel@tonic-gate * RET Instruction May Return to Incorrect EIP 6577c478bd9Sstevel@tonic-gate */ 6587c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_95) 6597c478bd9Sstevel@tonic-gate #if defined(_LP64) 6607c478bd9Sstevel@tonic-gate /* 6617c478bd9Sstevel@tonic-gate * Workaround this by ensuring that 32-bit user code and 6627c478bd9Sstevel@tonic-gate * 64-bit kernel code never occupy the same address 6637c478bd9Sstevel@tonic-gate * range mod 4G. 6647c478bd9Sstevel@tonic-gate */ 6657c478bd9Sstevel@tonic-gate if (_userlimit32 > 0xc0000000ul) 6667c478bd9Sstevel@tonic-gate *(uintptr_t *)&_userlimit32 = 0xc0000000ul; 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate /*LINTED*/ 6697c478bd9Sstevel@tonic-gate ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u); 670ae115bc7Smrj opteron_erratum_95++; 6717c478bd9Sstevel@tonic-gate #endif /* _LP64 */ 6727c478bd9Sstevel@tonic-gate #else 673ae115bc7Smrj workaround_warning(cpu, 95); 6747c478bd9Sstevel@tonic-gate missing++; 675ae115bc7Smrj #endif 6767c478bd9Sstevel@tonic-gate } 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 100) > 0) { 6797c478bd9Sstevel@tonic-gate /* 6807c478bd9Sstevel@tonic-gate * Compatibility Mode Branches Transfer to Illegal Address 6817c478bd9Sstevel@tonic-gate */ 6827c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 6837c478bd9Sstevel@tonic-gate /* 6847c478bd9Sstevel@tonic-gate * fix is in trap.c 6857c478bd9Sstevel@tonic-gate */ 6867c478bd9Sstevel@tonic-gate opteron_erratum_100++; 6877c478bd9Sstevel@tonic-gate #else 688ae115bc7Smrj workaround_warning(cpu, 100); 6897c478bd9Sstevel@tonic-gate missing++; 6907c478bd9Sstevel@tonic-gate #endif 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate /*LINTED*/ 6947c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 108) > 0) { 6957c478bd9Sstevel@tonic-gate /* 6967c478bd9Sstevel@tonic-gate * CPUID Instruction May Return Incorrect Model Number In 6977c478bd9Sstevel@tonic-gate * Some Processors 6987c478bd9Sstevel@tonic-gate */ 6997c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108) 7007c478bd9Sstevel@tonic-gate /* 7017c478bd9Sstevel@tonic-gate * (Our cpuid-handling code corrects the model number on 7027c478bd9Sstevel@tonic-gate * those processors) 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate #else 705ae115bc7Smrj workaround_warning(cpu, 108); 7067c478bd9Sstevel@tonic-gate missing++; 7077c478bd9Sstevel@tonic-gate #endif 7087c478bd9Sstevel@tonic-gate } 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate /*LINTED*/ 711ae115bc7Smrj if (cpuid_opteron_erratum(cpu, 109) > 0) do { 7127c478bd9Sstevel@tonic-gate /* 7137c478bd9Sstevel@tonic-gate * Certain Reverse REP MOVS May Produce Unpredictable Behaviour 7147c478bd9Sstevel@tonic-gate */ 7157c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 716ae115bc7Smrj /* 717ae115bc7Smrj * The "workaround" is to print a warning to upgrade the BIOS 718ae115bc7Smrj */ 719ae115bc7Smrj uint64_t value; 720ae115bc7Smrj const uint_t msr = MSR_AMD_PATCHLEVEL; 721ae115bc7Smrj int err; 722ae115bc7Smrj 723ae115bc7Smrj if ((err = checked_rdmsr(msr, &value)) != 0) { 724ae115bc7Smrj msr_warning(cpu, "rd", msr, err); 725ae115bc7Smrj workaround_warning(cpu, 109); 726ae115bc7Smrj missing++; 727ae115bc7Smrj } 728ae115bc7Smrj if (value == 0) 7297c478bd9Sstevel@tonic-gate opteron_erratum_109++; 7307c478bd9Sstevel@tonic-gate #else 731ae115bc7Smrj workaround_warning(cpu, 109); 7327c478bd9Sstevel@tonic-gate missing++; 7337c478bd9Sstevel@tonic-gate #endif 734ae115bc7Smrj /*CONSTANTCONDITION*/ 735ae115bc7Smrj } while (0); 736ae115bc7Smrj 7377c478bd9Sstevel@tonic-gate /*LINTED*/ 7387c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 121) > 0) { 7397c478bd9Sstevel@tonic-gate /* 7407c478bd9Sstevel@tonic-gate * Sequential Execution Across Non_Canonical Boundary Caused 7417c478bd9Sstevel@tonic-gate * Processor Hang 7427c478bd9Sstevel@tonic-gate */ 7437c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 744ae115bc7Smrj #if defined(_LP64) 7457c478bd9Sstevel@tonic-gate /* 7467c478bd9Sstevel@tonic-gate * Erratum 121 is only present in long (64 bit) mode. 7477c478bd9Sstevel@tonic-gate * Workaround is to include the page immediately before the 7487c478bd9Sstevel@tonic-gate * va hole to eliminate the possibility of system hangs due to 7497c478bd9Sstevel@tonic-gate * sequential execution across the va hole boundary. 7507c478bd9Sstevel@tonic-gate */ 751ae115bc7Smrj if (opteron_erratum_121) 752ae115bc7Smrj opteron_erratum_121++; 753ae115bc7Smrj else { 754ae115bc7Smrj if (hole_start) { 755ae115bc7Smrj hole_start -= PAGESIZE; 756ae115bc7Smrj } else { 757ae115bc7Smrj /* 758ae115bc7Smrj * hole_start not yet initialized by 759ae115bc7Smrj * mmu_init. Initialize hole_start 760ae115bc7Smrj * with value to be subtracted. 761ae115bc7Smrj */ 762ae115bc7Smrj hole_start = PAGESIZE; 7637c478bd9Sstevel@tonic-gate } 764ae115bc7Smrj opteron_erratum_121++; 7657c478bd9Sstevel@tonic-gate } 766ae115bc7Smrj #endif /* _LP64 */ 7677c478bd9Sstevel@tonic-gate #else 768ae115bc7Smrj workaround_warning(cpu, 121); 7697c478bd9Sstevel@tonic-gate missing++; 7707c478bd9Sstevel@tonic-gate #endif 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate /*LINTED*/ 774ae115bc7Smrj if (cpuid_opteron_erratum(cpu, 122) > 0) do { 7757c478bd9Sstevel@tonic-gate /* 776ae115bc7Smrj * TLB Flush Filter May Cause Coherency Problem in 7777c478bd9Sstevel@tonic-gate * Multiprocessor Systems 7787c478bd9Sstevel@tonic-gate */ 7797c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 780ae115bc7Smrj uint64_t value; 781ae115bc7Smrj const uint_t msr = MSR_AMD_HWCR; 782ae115bc7Smrj int error; 783ae115bc7Smrj 7847c478bd9Sstevel@tonic-gate /* 7857c478bd9Sstevel@tonic-gate * Erratum 122 is only present in MP configurations (multi-core 7867c478bd9Sstevel@tonic-gate * or multi-processor). 7877c478bd9Sstevel@tonic-gate */ 788ae115bc7Smrj if (!opteron_erratum_122 && lgrp_plat_node_cnt == 1 && 789ae115bc7Smrj cpuid_get_ncpu_per_chip(cpu) == 1) 790ae115bc7Smrj break; 791ae115bc7Smrj 792ae115bc7Smrj /* disable TLB Flush Filter */ 793ae115bc7Smrj 794ae115bc7Smrj if ((error = checked_rdmsr(msr, &value)) != 0) { 795ae115bc7Smrj msr_warning(cpu, "rd", msr, error); 796ae115bc7Smrj workaround_warning(cpu, 122); 797ae115bc7Smrj missing++; 798ae115bc7Smrj } else { 799ae115bc7Smrj value |= (uint64_t)AMD_HWCR_FFDIS; 800ae115bc7Smrj if ((error = checked_wrmsr(msr, value)) != 0) { 801ae115bc7Smrj msr_warning(cpu, "wr", msr, error); 802ae115bc7Smrj workaround_warning(cpu, 122); 803ae115bc7Smrj missing++; 804ae115bc7Smrj } 8057c478bd9Sstevel@tonic-gate } 806ae115bc7Smrj opteron_erratum_122++; 8077c478bd9Sstevel@tonic-gate #else 808ae115bc7Smrj workaround_warning(cpu, 122); 8097c478bd9Sstevel@tonic-gate missing++; 8107c478bd9Sstevel@tonic-gate #endif 811ae115bc7Smrj /*CONSTANTCONDITION*/ 812ae115bc7Smrj } while (0); 813403c216aSkchow 8147c478bd9Sstevel@tonic-gate /*LINTED*/ 815ae115bc7Smrj if (cpuid_opteron_erratum(cpu, 123) > 0) do { 8167c478bd9Sstevel@tonic-gate /* 8177c478bd9Sstevel@tonic-gate * Bypassed Reads May Cause Data Corruption of System Hang in 8187c478bd9Sstevel@tonic-gate * Dual Core Processors 8197c478bd9Sstevel@tonic-gate */ 820ae115bc7Smrj #if defined(OPTERON_ERRATUM_123) 821ae115bc7Smrj uint64_t value; 822ae115bc7Smrj const uint_t msr = MSR_AMD_PATCHLEVEL; 823ae115bc7Smrj int err; 824ae115bc7Smrj 8257c478bd9Sstevel@tonic-gate /* 8267c478bd9Sstevel@tonic-gate * Erratum 123 applies only to multi-core cpus. 8277c478bd9Sstevel@tonic-gate */ 828ae115bc7Smrj if (cpuid_get_ncpu_per_chip(cpu) < 2) 829ae115bc7Smrj break; 8307c478bd9Sstevel@tonic-gate 831ae115bc7Smrj /* 832ae115bc7Smrj * The "workaround" is to print a warning to upgrade the BIOS 833ae115bc7Smrj */ 834ae115bc7Smrj if ((err = checked_rdmsr(msr, &value)) != 0) { 835ae115bc7Smrj msr_warning(cpu, "rd", msr, err); 836ae115bc7Smrj workaround_warning(cpu, 123); 837ae115bc7Smrj missing++; 8387c478bd9Sstevel@tonic-gate } 839ae115bc7Smrj if (value == 0) 840ae115bc7Smrj opteron_erratum_123++; 841ae115bc7Smrj #else 842ae115bc7Smrj workaround_warning(cpu, 123); 843ae115bc7Smrj missing++; 844ae115bc7Smrj 845403c216aSkchow #endif 846ae115bc7Smrj /*CONSTANTCONDITION*/ 847ae115bc7Smrj } while (0); 8482201b277Skucharsk 8492201b277Skucharsk /*LINTED*/ 850ae115bc7Smrj if (cpuid_opteron_erratum(cpu, 131) > 0) do { 8512201b277Skucharsk /* 8522201b277Skucharsk * Multiprocessor Systems with Four or More Cores May Deadlock 8532201b277Skucharsk * Waiting for a Probe Response 8542201b277Skucharsk */ 855ae115bc7Smrj #if defined(OPTERON_ERRATUM_131) 856ae115bc7Smrj uint64_t nbcfg; 857ae115bc7Smrj const uint_t msr = MSR_AMD_NB_CFG; 858ae115bc7Smrj const uint64_t wabits = 859ae115bc7Smrj AMD_NB_CFG_SRQ_HEARTBEAT | AMD_NB_CFG_SRQ_SPR; 860ae115bc7Smrj int error; 861ae115bc7Smrj 8622201b277Skucharsk /* 8632201b277Skucharsk * Erratum 131 applies to any system with four or more cores. 8642201b277Skucharsk */ 865ae115bc7Smrj if (opteron_erratum_131) 866ae115bc7Smrj break; 867cb9f16ebSkchow 868ae115bc7Smrj if (lgrp_plat_node_cnt * cpuid_get_ncpu_per_chip(cpu) < 4) 869ae115bc7Smrj break; 870cb9f16ebSkchow 871ae115bc7Smrj /* 872ae115bc7Smrj * Print a warning if neither of the workarounds for 873ae115bc7Smrj * erratum 131 is present. 874ae115bc7Smrj */ 875ae115bc7Smrj if ((error = checked_rdmsr(msr, &nbcfg)) != 0) { 876ae115bc7Smrj msr_warning(cpu, "rd", msr, error); 877ae115bc7Smrj workaround_warning(cpu, 131); 878ae115bc7Smrj missing++; 879ae115bc7Smrj } else if ((nbcfg & wabits) == 0) { 880ae115bc7Smrj opteron_erratum_131++; 881ae115bc7Smrj } else { 882ae115bc7Smrj /* cannot have both workarounds set */ 883ae115bc7Smrj ASSERT((nbcfg & wabits) != wabits); 8842201b277Skucharsk } 885ae115bc7Smrj #else 886ae115bc7Smrj workaround_warning(cpu, 131); 887ae115bc7Smrj missing++; 8882201b277Skucharsk #endif 889ae115bc7Smrj /*CONSTANTCONDITION*/ 890ae115bc7Smrj } while (0); 891ef50d8c0Sesaxe 892ef50d8c0Sesaxe /* 893ae115bc7Smrj * This isn't really an erratum, but for convenience the 894ef50d8c0Sesaxe * detection/workaround code lives here and in cpuid_opteron_erratum. 895ef50d8c0Sesaxe */ 896ef50d8c0Sesaxe if (cpuid_opteron_erratum(cpu, 6336786) > 0) { 897ae115bc7Smrj #if defined(OPTERON_WORKAROUND_6336786) 898ef50d8c0Sesaxe /* 899ef50d8c0Sesaxe * Disable C1-Clock ramping on multi-core/multi-processor 900ef50d8c0Sesaxe * K8 platforms to guard against TSC drift. 901ef50d8c0Sesaxe */ 902ef50d8c0Sesaxe if (opteron_workaround_6336786) { 903ef50d8c0Sesaxe opteron_workaround_6336786++; 904ef50d8c0Sesaxe } else if ((lgrp_plat_node_cnt * 905ae115bc7Smrj cpuid_get_ncpu_per_chip(cpu) > 1) || 906ef50d8c0Sesaxe opteron_workaround_6336786_UP) { 907ae115bc7Smrj int node; 908ae115bc7Smrj uint8_t data; 909ae115bc7Smrj 910ef50d8c0Sesaxe for (node = 0; node < lgrp_plat_node_cnt; node++) { 911ef50d8c0Sesaxe /* 912ef50d8c0Sesaxe * Clear PMM7[1:0] (function 3, offset 0x87) 913ef50d8c0Sesaxe * Northbridge device is the node id + 24. 914ef50d8c0Sesaxe */ 915ef50d8c0Sesaxe data = pci_getb_func(0, node + 24, 3, 0x87); 916ef50d8c0Sesaxe data &= 0xFC; 917ef50d8c0Sesaxe pci_putb_func(0, node + 24, 3, 0x87, data); 918ef50d8c0Sesaxe } 919ef50d8c0Sesaxe opteron_workaround_6336786++; 920ef50d8c0Sesaxe } 921ae115bc7Smrj #else 922ae115bc7Smrj workaround_warning(cpu, 6336786); 923ae115bc7Smrj missing++; 924ef50d8c0Sesaxe #endif 925ae115bc7Smrj } 926ee88d2b9Skchow 927ee88d2b9Skchow /*LINTED*/ 928ee88d2b9Skchow /* 929ee88d2b9Skchow * Mutex primitives don't work as expected. 930ee88d2b9Skchow */ 931ee88d2b9Skchow if (cpuid_opteron_erratum(cpu, 6323525) > 0) { 932ae115bc7Smrj #if defined(OPTERON_WORKAROUND_6323525) 933ee88d2b9Skchow /* 934ae115bc7Smrj * This problem only occurs with 2 or more cores. If bit in 935ee88d2b9Skchow * MSR_BU_CFG set, then not applicable. The workaround 936ee88d2b9Skchow * is to patch the semaphone routines with the lfence 937ee88d2b9Skchow * instruction to provide necessary load memory barrier with 938ee88d2b9Skchow * possible subsequent read-modify-write ops. 939ee88d2b9Skchow * 940ee88d2b9Skchow * It is too early in boot to call the patch routine so 941ee88d2b9Skchow * set erratum variable to be done in startup_end(). 942ee88d2b9Skchow */ 943ee88d2b9Skchow if (opteron_workaround_6323525) { 944ee88d2b9Skchow opteron_workaround_6323525++; 945ee88d2b9Skchow } else if ((x86_feature & X86_SSE2) && ((lgrp_plat_node_cnt * 946ae115bc7Smrj cpuid_get_ncpu_per_chip(cpu)) > 1)) { 947ee88d2b9Skchow if ((xrdmsr(MSR_BU_CFG) & 0x02) == 0) 948ee88d2b9Skchow opteron_workaround_6323525++; 949ee88d2b9Skchow } 950ae115bc7Smrj #else 951ae115bc7Smrj workaround_warning(cpu, 6323525); 952ae115bc7Smrj missing++; 953ee88d2b9Skchow #endif 954ae115bc7Smrj } 955ae115bc7Smrj 9567c478bd9Sstevel@tonic-gate return (missing); 9577c478bd9Sstevel@tonic-gate } 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate void 9607c478bd9Sstevel@tonic-gate workaround_errata_end() 9617c478bd9Sstevel@tonic-gate { 962ae115bc7Smrj #if defined(OPTERON_ERRATUM_88) 963ae115bc7Smrj if (opteron_erratum_88) 964ae115bc7Smrj workaround_applied(88); 965ae115bc7Smrj #endif 966ae115bc7Smrj #if defined(OPTERON_ERRATUM_91) 967ae115bc7Smrj if (opteron_erratum_91) 968ae115bc7Smrj workaround_applied(91); 969ae115bc7Smrj #endif 970ae115bc7Smrj #if defined(OPTERON_ERRATUM_93) 971ae115bc7Smrj if (opteron_erratum_93) 972ae115bc7Smrj workaround_applied(93); 973ae115bc7Smrj #endif 974ae115bc7Smrj #if defined(OPTERON_ERRATUM_95) 975ae115bc7Smrj if (opteron_erratum_95) 976ae115bc7Smrj workaround_applied(95); 977ae115bc7Smrj #endif 978ae115bc7Smrj #if defined(OPTERON_ERRATUM_100) 979ae115bc7Smrj if (opteron_erratum_100) 980ae115bc7Smrj workaround_applied(100); 981ae115bc7Smrj #endif 982ae115bc7Smrj #if defined(OPTERON_ERRATUM_108) 983ae115bc7Smrj if (opteron_erratum_108) 984ae115bc7Smrj workaround_applied(108); 985ae115bc7Smrj #endif 9867c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 9877c478bd9Sstevel@tonic-gate if (opteron_erratum_109) { 9882201b277Skucharsk cmn_err(CE_WARN, 9892201b277Skucharsk "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 9902201b277Skucharsk " processor\nerratum 109 was not detected; updating your" 9912201b277Skucharsk " system's BIOS to a version\ncontaining this" 9922201b277Skucharsk " microcode patch is HIGHLY recommended or erroneous" 9932201b277Skucharsk " system\noperation may occur.\n"); 9947c478bd9Sstevel@tonic-gate } 995ae115bc7Smrj #endif 996ae115bc7Smrj #if defined(OPTERON_ERRATUM_121) 997ae115bc7Smrj if (opteron_erratum_121) 998ae115bc7Smrj workaround_applied(121); 999ae115bc7Smrj #endif 1000ae115bc7Smrj #if defined(OPTERON_ERRATUM_122) 1001ae115bc7Smrj if (opteron_erratum_122) 1002ae115bc7Smrj workaround_applied(122); 1003ae115bc7Smrj #endif 10047c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 10057c478bd9Sstevel@tonic-gate if (opteron_erratum_123) { 10062201b277Skucharsk cmn_err(CE_WARN, 10072201b277Skucharsk "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 10082201b277Skucharsk " processor\nerratum 123 was not detected; updating your" 10092201b277Skucharsk " system's BIOS to a version\ncontaining this" 10102201b277Skucharsk " microcode patch is HIGHLY recommended or erroneous" 10112201b277Skucharsk " system\noperation may occur.\n"); 10127c478bd9Sstevel@tonic-gate } 1013ae115bc7Smrj #endif 10142201b277Skucharsk #if defined(OPTERON_ERRATUM_131) 10152201b277Skucharsk if (opteron_erratum_131) { 10162201b277Skucharsk cmn_err(CE_WARN, 10172201b277Skucharsk "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 10182201b277Skucharsk " processor\nerratum 131 was not detected; updating your" 10192201b277Skucharsk " system's BIOS to a version\ncontaining this" 10202201b277Skucharsk " microcode patch is HIGHLY recommended or erroneous" 10212201b277Skucharsk " system\noperation may occur.\n"); 10222201b277Skucharsk } 1023ae115bc7Smrj #endif 1024ae115bc7Smrj #if defined(OPTERON_WORKAROUND_6336786) 1025ae115bc7Smrj if (opteron_workaround_6336786) 1026ae115bc7Smrj workaround_applied(6336786); 1027ae115bc7Smrj #endif 1028ae115bc7Smrj #if defined(OPTERON_WORKAROUND_6323525) 1029ae115bc7Smrj if (opteron_workaround_6323525) 1030ae115bc7Smrj workaround_applied(6323525); 1031ae115bc7Smrj #endif 10327c478bd9Sstevel@tonic-gate } 10337c478bd9Sstevel@tonic-gate 1034ae115bc7Smrj static cpuset_t procset; 1035ae115bc7Smrj 1036ae115bc7Smrj /* 1037ae115bc7Smrj * Start a single cpu, assuming that the kernel context is available 1038ae115bc7Smrj * to successfully start another cpu. 1039ae115bc7Smrj * 1040ae115bc7Smrj * (For example, real mode code is mapped into the right place 1041ae115bc7Smrj * in memory and is ready to be run.) 1042ae115bc7Smrj */ 1043ae115bc7Smrj int 1044ae115bc7Smrj start_cpu(processorid_t who) 1045ae115bc7Smrj { 1046ae115bc7Smrj void *ctx; 1047ae115bc7Smrj cpu_t *cp; 1048ae115bc7Smrj int delays; 1049ae115bc7Smrj int error = 0; 1050ae115bc7Smrj 1051ae115bc7Smrj ASSERT(who != 0); 1052ae115bc7Smrj 1053ae115bc7Smrj /* 1054ae115bc7Smrj * Check if there's at least a Mbyte of kmem available 1055ae115bc7Smrj * before attempting to start the cpu. 1056ae115bc7Smrj */ 1057ae115bc7Smrj if (kmem_avail() < 1024 * 1024) { 1058ae115bc7Smrj /* 1059ae115bc7Smrj * Kick off a reap in case that helps us with 1060ae115bc7Smrj * later attempts .. 1061ae115bc7Smrj */ 1062ae115bc7Smrj kmem_reap(); 1063ae115bc7Smrj return (ENOMEM); 1064ae115bc7Smrj } 1065ae115bc7Smrj 1066ae115bc7Smrj cp = mp_startup_init(who); 1067ae115bc7Smrj if ((ctx = mach_cpucontext_alloc(cp)) == NULL || 1068ae115bc7Smrj (error = mach_cpu_start(cp, ctx)) != 0) { 1069ae115bc7Smrj 1070ae115bc7Smrj /* 1071ae115bc7Smrj * Something went wrong before we even started it 1072ae115bc7Smrj */ 1073ae115bc7Smrj if (ctx) 1074ae115bc7Smrj cmn_err(CE_WARN, 1075ae115bc7Smrj "cpu%d: failed to start error %d", 1076ae115bc7Smrj cp->cpu_id, error); 1077ae115bc7Smrj else 1078ae115bc7Smrj cmn_err(CE_WARN, 1079ae115bc7Smrj "cpu%d: failed to allocate context", cp->cpu_id); 1080ae115bc7Smrj 1081ae115bc7Smrj if (ctx) 1082ae115bc7Smrj mach_cpucontext_free(cp, ctx, error); 1083ae115bc7Smrj else 1084ae115bc7Smrj error = EAGAIN; /* hmm. */ 1085ae115bc7Smrj mp_startup_fini(cp, error); 1086ae115bc7Smrj return (error); 1087ae115bc7Smrj } 1088ae115bc7Smrj 1089ae115bc7Smrj for (delays = 0; !CPU_IN_SET(procset, who); delays++) { 1090ae115bc7Smrj if (delays == 500) { 1091ae115bc7Smrj /* 1092ae115bc7Smrj * After five seconds, things are probably looking 1093ae115bc7Smrj * a bit bleak - explain the hang. 1094ae115bc7Smrj */ 1095ae115bc7Smrj cmn_err(CE_NOTE, "cpu%d: started, " 1096ae115bc7Smrj "but not running in the kernel yet", who); 1097ae115bc7Smrj } else if (delays > 2000) { 1098ae115bc7Smrj /* 1099ae115bc7Smrj * We waited at least 20 seconds, bail .. 1100ae115bc7Smrj */ 1101ae115bc7Smrj error = ETIMEDOUT; 1102ae115bc7Smrj cmn_err(CE_WARN, "cpu%d: timed out", who); 1103ae115bc7Smrj mach_cpucontext_free(cp, ctx, error); 1104ae115bc7Smrj mp_startup_fini(cp, error); 1105ae115bc7Smrj return (error); 1106ae115bc7Smrj } 1107ae115bc7Smrj 1108ae115bc7Smrj /* 1109ae115bc7Smrj * wait at least 10ms, then check again.. 1110ae115bc7Smrj */ 1111ae115bc7Smrj delay(USEC_TO_TICK_ROUNDUP(10000)); 1112ae115bc7Smrj } 1113ae115bc7Smrj 1114ae115bc7Smrj mach_cpucontext_free(cp, ctx, 0); 1115ae115bc7Smrj 1116ae115bc7Smrj if (tsc_gethrtime_enable) 1117ae115bc7Smrj tsc_sync_master(who); 1118ae115bc7Smrj 1119ae115bc7Smrj if (dtrace_cpu_init != NULL) { 1120ae115bc7Smrj /* 1121ae115bc7Smrj * DTrace CPU initialization expects cpu_lock to be held. 1122ae115bc7Smrj */ 1123ae115bc7Smrj mutex_enter(&cpu_lock); 1124ae115bc7Smrj (*dtrace_cpu_init)(who); 1125ae115bc7Smrj mutex_exit(&cpu_lock); 1126ae115bc7Smrj } 1127ae115bc7Smrj 1128ae115bc7Smrj while (!CPU_IN_SET(cpu_ready_set, who)) 1129ae115bc7Smrj delay(1); 1130ae115bc7Smrj 1131ae115bc7Smrj return (0); 1132ae115bc7Smrj } 11337c478bd9Sstevel@tonic-gate 113441791439Sandrei 11357c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11367c478bd9Sstevel@tonic-gate void 11377c478bd9Sstevel@tonic-gate start_other_cpus(int cprboot) 11387c478bd9Sstevel@tonic-gate { 1139ae115bc7Smrj uint_t who; 1140ae115bc7Smrj uint_t skipped = 0; 1141ae115bc7Smrj uint_t bootcpuid = 0; 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate /* 11447c478bd9Sstevel@tonic-gate * Initialize our own cpu_info. 11457c478bd9Sstevel@tonic-gate */ 11467c478bd9Sstevel@tonic-gate init_cpu_info(CPU); 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate /* 11497c478bd9Sstevel@tonic-gate * Initialize our syscall handlers 11507c478bd9Sstevel@tonic-gate */ 11517c478bd9Sstevel@tonic-gate init_cpu_syscall(CPU); 11527c478bd9Sstevel@tonic-gate 1153ae115bc7Smrj /* 1154ae115bc7Smrj * Take the boot cpu out of the mp_cpus set because we know 1155ae115bc7Smrj * it's already running. Add it to the cpu_ready_set for 1156ae115bc7Smrj * precisely the same reason. 1157ae115bc7Smrj */ 1158ae115bc7Smrj CPUSET_DEL(mp_cpus, bootcpuid); 1159ae115bc7Smrj CPUSET_ADD(cpu_ready_set, bootcpuid); 1160ae115bc7Smrj 11617c478bd9Sstevel@tonic-gate /* 11627c478bd9Sstevel@tonic-gate * if only 1 cpu or not using MP, skip the rest of this 11637c478bd9Sstevel@tonic-gate */ 1164ae115bc7Smrj if (CPUSET_ISNULL(mp_cpus) || use_mp == 0) { 11657c478bd9Sstevel@tonic-gate if (use_mp == 0) 11667c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?***** Not in MP mode\n"); 11677c478bd9Sstevel@tonic-gate goto done; 11687c478bd9Sstevel@tonic-gate } 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate /* 11717c478bd9Sstevel@tonic-gate * perform such initialization as is needed 11727c478bd9Sstevel@tonic-gate * to be able to take CPUs on- and off-line. 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate cpu_pause_init(); 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate xc_init(); /* initialize processor crosscalls */ 11777c478bd9Sstevel@tonic-gate 1178ae115bc7Smrj if (mach_cpucontext_init() != 0) 11797c478bd9Sstevel@tonic-gate goto done; 11807c478bd9Sstevel@tonic-gate 11817c478bd9Sstevel@tonic-gate flushes_require_xcalls = 1; 11827c478bd9Sstevel@tonic-gate 11835205ae23Snf /* 11845205ae23Snf * We lock our affinity to the master CPU to ensure that all slave CPUs 11855205ae23Snf * do their TSC syncs with the same CPU. 11865205ae23Snf */ 11877c478bd9Sstevel@tonic-gate affinity_set(CPU_CURRENT); 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate for (who = 0; who < NCPU; who++) { 11905205ae23Snf 119141791439Sandrei if (!CPU_IN_SET(mp_cpus, who)) 119241791439Sandrei continue; 1193ae115bc7Smrj ASSERT(who != bootcpuid); 119441791439Sandrei if (ncpus >= max_ncpus) { 119541791439Sandrei skipped = who; 11967c478bd9Sstevel@tonic-gate continue; 119741791439Sandrei } 1198ae115bc7Smrj if (start_cpu(who) != 0) 1199ae115bc7Smrj CPUSET_DEL(mp_cpus, who); 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12022449e17fSsherrym /* Free the space allocated to hold the microcode file */ 12032449e17fSsherrym ucode_free(); 12042449e17fSsherrym 12057c478bd9Sstevel@tonic-gate affinity_clear(); 12067c478bd9Sstevel@tonic-gate 120741791439Sandrei if (skipped) { 120841791439Sandrei cmn_err(CE_NOTE, 1209ae115bc7Smrj "System detected %d cpus, but " 1210ae115bc7Smrj "only %d cpu(s) were enabled during boot.", 121141791439Sandrei skipped + 1, ncpus); 121241791439Sandrei cmn_err(CE_NOTE, 121341791439Sandrei "Use \"boot-ncpus\" parameter to enable more CPU(s). " 121441791439Sandrei "See eeprom(1M)."); 121541791439Sandrei } 121641791439Sandrei 12177c478bd9Sstevel@tonic-gate done: 12187c478bd9Sstevel@tonic-gate workaround_errata_end(); 1219ae115bc7Smrj mach_cpucontext_fini(); 12203ad553a7Sgavinm 12213ad553a7Sgavinm cmi_post_mpstartup(); 12227c478bd9Sstevel@tonic-gate } 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate /* 12257c478bd9Sstevel@tonic-gate * Dummy functions - no i86pc platforms support dynamic cpu allocation. 12267c478bd9Sstevel@tonic-gate */ 12277c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 12287c478bd9Sstevel@tonic-gate int 12297c478bd9Sstevel@tonic-gate mp_cpu_configure(int cpuid) 12307c478bd9Sstevel@tonic-gate { 12317c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 12327c478bd9Sstevel@tonic-gate } 12337c478bd9Sstevel@tonic-gate 12347c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 12357c478bd9Sstevel@tonic-gate int 12367c478bd9Sstevel@tonic-gate mp_cpu_unconfigure(int cpuid) 12377c478bd9Sstevel@tonic-gate { 12387c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate 12417c478bd9Sstevel@tonic-gate /* 12427c478bd9Sstevel@tonic-gate * Startup function for 'other' CPUs (besides boot cpu). 1243498697c5Sdmick * Called from real_mode_start. 1244b4b46911Skchow * 1245b4b46911Skchow * WARNING: until CPU_READY is set, mp_startup and routines called by 1246b4b46911Skchow * mp_startup should not call routines (e.g. kmem_free) that could call 1247b4b46911Skchow * hat_unload which requires CPU_READY to be set. 12487c478bd9Sstevel@tonic-gate */ 12497c478bd9Sstevel@tonic-gate void 12507c478bd9Sstevel@tonic-gate mp_startup(void) 12517c478bd9Sstevel@tonic-gate { 12527c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; 12537c478bd9Sstevel@tonic-gate uint_t new_x86_feature; 12547c478bd9Sstevel@tonic-gate 125524a74e86Sdmick /* 125624a74e86Sdmick * We need to get TSC on this proc synced (i.e., any delta 125724a74e86Sdmick * from cpu0 accounted for) as soon as we can, because many 125824a74e86Sdmick * many things use gethrtime/pc_gethrestime, including 125924a74e86Sdmick * interrupts, cmn_err, etc. 126024a74e86Sdmick */ 126124a74e86Sdmick 126224a74e86Sdmick /* Let cpu0 continue into tsc_sync_master() */ 126324a74e86Sdmick CPUSET_ATOMIC_ADD(procset, cp->cpu_id); 126424a74e86Sdmick 126524a74e86Sdmick if (tsc_gethrtime_enable) 126624a74e86Sdmick tsc_sync_slave(); 126724a74e86Sdmick 1268498697c5Sdmick /* 1269498697c5Sdmick * Once this was done from assembly, but it's safer here; if 1270498697c5Sdmick * it blocks, we need to be able to swtch() to and from, and 1271498697c5Sdmick * since we get here by calling t_pc, we need to do that call 1272498697c5Sdmick * before swtch() overwrites it. 1273498697c5Sdmick */ 1274498697c5Sdmick 1275498697c5Sdmick (void) (*ap_mlsetup)(); 1276498697c5Sdmick 12777c478bd9Sstevel@tonic-gate new_x86_feature = cpuid_pass1(cp); 12787c478bd9Sstevel@tonic-gate 12797c478bd9Sstevel@tonic-gate /* 12807c478bd9Sstevel@tonic-gate * We need to Sync MTRR with cpu0's MTRR. We have to do 12817c478bd9Sstevel@tonic-gate * this with interrupts disabled. 12827c478bd9Sstevel@tonic-gate */ 12837c478bd9Sstevel@tonic-gate if (x86_feature & X86_MTRR) 12847c478bd9Sstevel@tonic-gate mtrr_sync(); 12857c478bd9Sstevel@tonic-gate 1286ae115bc7Smrj /* 1287ae115bc7Smrj * Set up TSC_AUX to contain the cpuid for this processor 1288ae115bc7Smrj * for the rdtscp instruction. 1289ae115bc7Smrj */ 1290ae115bc7Smrj if (x86_feature & X86_TSCP) 1291ae115bc7Smrj (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id); 1292ae115bc7Smrj 12937c478bd9Sstevel@tonic-gate /* 12947c478bd9Sstevel@tonic-gate * Initialize this CPU's syscall handlers 12957c478bd9Sstevel@tonic-gate */ 12967c478bd9Sstevel@tonic-gate init_cpu_syscall(cp); 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate /* 12997c478bd9Sstevel@tonic-gate * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the 13007c478bd9Sstevel@tonic-gate * highest level at which a routine is permitted to block on 13017c478bd9Sstevel@tonic-gate * an adaptive mutex (allows for cpu poke interrupt in case 13027c478bd9Sstevel@tonic-gate * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks 13037c478bd9Sstevel@tonic-gate * device interrupts that may end up in the hat layer issuing cross 13047c478bd9Sstevel@tonic-gate * calls before CPU_READY is set. 13057c478bd9Sstevel@tonic-gate */ 1306ae115bc7Smrj splx(ipltospl(LOCK_LEVEL)); 1307ae115bc7Smrj sti(); 13087c478bd9Sstevel@tonic-gate 13097c478bd9Sstevel@tonic-gate /* 13107c478bd9Sstevel@tonic-gate * Do a sanity check to make sure this new CPU is a sane thing 13117c478bd9Sstevel@tonic-gate * to add to the collection of processors running this system. 13127c478bd9Sstevel@tonic-gate * 13137c478bd9Sstevel@tonic-gate * XXX Clearly this needs to get more sophisticated, if x86 13147c478bd9Sstevel@tonic-gate * systems start to get built out of heterogenous CPUs; as is 13157c478bd9Sstevel@tonic-gate * likely to happen once the number of processors in a configuration 13167c478bd9Sstevel@tonic-gate * gets large enough. 13177c478bd9Sstevel@tonic-gate */ 13187c478bd9Sstevel@tonic-gate if ((x86_feature & new_x86_feature) != x86_feature) { 13197c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %b\n", 13207c478bd9Sstevel@tonic-gate cp->cpu_id, new_x86_feature, FMT_X86_FEATURE); 13217c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id); 13227c478bd9Sstevel@tonic-gate } 13237c478bd9Sstevel@tonic-gate 1324f98fbcecSbholler /* 1325f98fbcecSbholler * We do not support cpus with mixed monitor/mwait support if the 1326f98fbcecSbholler * boot cpu supports monitor/mwait. 1327f98fbcecSbholler */ 1328f98fbcecSbholler if ((x86_feature & ~new_x86_feature) & X86_MWAIT) 1329f98fbcecSbholler panic("unsupported mixed cpu monitor/mwait support detected"); 1330f98fbcecSbholler 13317c478bd9Sstevel@tonic-gate /* 13327c478bd9Sstevel@tonic-gate * We could be more sophisticated here, and just mark the CPU 13337c478bd9Sstevel@tonic-gate * as "faulted" but at this point we'll opt for the easier 13347c478bd9Sstevel@tonic-gate * answer of dieing horribly. Provided the boot cpu is ok, 13357c478bd9Sstevel@tonic-gate * the system can be recovered by booting with use_mp set to zero. 13367c478bd9Sstevel@tonic-gate */ 13377c478bd9Sstevel@tonic-gate if (workaround_errata(cp) != 0) 13387c478bd9Sstevel@tonic-gate panic("critical workaround(s) missing for cpu%d", cp->cpu_id); 13397c478bd9Sstevel@tonic-gate 13407c478bd9Sstevel@tonic-gate cpuid_pass2(cp); 13417c478bd9Sstevel@tonic-gate cpuid_pass3(cp); 13427c478bd9Sstevel@tonic-gate (void) cpuid_pass4(cp); 13437c478bd9Sstevel@tonic-gate 13447c478bd9Sstevel@tonic-gate init_cpu_info(cp); 13457c478bd9Sstevel@tonic-gate 13467c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 13477c478bd9Sstevel@tonic-gate /* 1348fb2f18f8Sesaxe * Processor group initialization for this CPU is dependent on the 1349fb2f18f8Sesaxe * cpuid probing, which must be done in the context of the current 1350fb2f18f8Sesaxe * CPU. 13517c478bd9Sstevel@tonic-gate */ 1352fb2f18f8Sesaxe pghw_physid_create(cp); 1353fb2f18f8Sesaxe pg_cpu_init(cp); 1354fb2f18f8Sesaxe pg_cmt_cpu_startup(cp); 13557c478bd9Sstevel@tonic-gate 13567c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_ENABLE | CPU_EXISTS; 13577c478bd9Sstevel@tonic-gate cpu_add_active(cp); 13585205ae23Snf 13595205ae23Snf if (dtrace_cpu_init != NULL) { 13605205ae23Snf (*dtrace_cpu_init)(cp->cpu_id); 13615205ae23Snf } 13625205ae23Snf 13632449e17fSsherrym /* 13642449e17fSsherrym * Fill out cpu_ucode_info. Update microcode if necessary. 13652449e17fSsherrym */ 13662449e17fSsherrym ucode_check(cp); 13672449e17fSsherrym 13687c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 13697c478bd9Sstevel@tonic-gate 1370aa7b6435Ssethg /* 1371aa7b6435Ssethg * Enable preemption here so that contention for any locks acquired 1372aa7b6435Ssethg * later in mp_startup may be preempted if the thread owning those 1373aa7b6435Ssethg * locks is continously executing on other CPUs (for example, this 1374aa7b6435Ssethg * CPU must be preemptible to allow other CPUs to pause it during their 1375aa7b6435Ssethg * startup phases). It's safe to enable preemption here because the 1376aa7b6435Ssethg * CPU state is pretty-much fully constructed. 1377aa7b6435Ssethg */ 1378aa7b6435Ssethg curthread->t_preempt = 0; 1379aa7b6435Ssethg 1380b4b46911Skchow add_cpunode2devtree(cp->cpu_id, cp->cpu_m.mcpu_cpi); 1381b4b46911Skchow 1382da43ceabSsethg /* The base spl should still be at LOCK LEVEL here */ 1383da43ceabSsethg ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL)); 1384da43ceabSsethg set_base_spl(); /* Restore the spl to its proper value */ 1385da43ceabSsethg 13867c478bd9Sstevel@tonic-gate (void) spl0(); /* enable interrupts */ 13877c478bd9Sstevel@tonic-gate 13887aec1d6eScindi /* 13897aec1d6eScindi * Set up the CPU module for this CPU. This can't be done before 13907aec1d6eScindi * this CPU is made CPU_READY, because we may (in heterogeneous systems) 13917aec1d6eScindi * need to go load another CPU module. The act of attempting to load 13927aec1d6eScindi * a module may trigger a cross-call, which will ASSERT unless this 13937aec1d6eScindi * cpu is CPU_READY. 13947aec1d6eScindi */ 13957aec1d6eScindi cmi_init(); 13967aec1d6eScindi 13977aec1d6eScindi if (x86_feature & X86_MCA) 13987aec1d6eScindi cmi_mca_init(); 13997aec1d6eScindi 14007c478bd9Sstevel@tonic-gate if (boothowto & RB_DEBUG) 1401ae115bc7Smrj kdi_cpu_init(); 14027c478bd9Sstevel@tonic-gate 14037c478bd9Sstevel@tonic-gate /* 14047c478bd9Sstevel@tonic-gate * Setting the bit in cpu_ready_set must be the last operation in 14057c478bd9Sstevel@tonic-gate * processor initialization; the boot CPU will continue to boot once 14067c478bd9Sstevel@tonic-gate * it sees this bit set for all active CPUs. 14077c478bd9Sstevel@tonic-gate */ 14087c478bd9Sstevel@tonic-gate CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id); 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate /* 14117c478bd9Sstevel@tonic-gate * Because mp_startup() gets fired off after init() starts, we 14127c478bd9Sstevel@tonic-gate * can't use the '?' trick to do 'boot -v' printing - so we 14137c478bd9Sstevel@tonic-gate * always direct the 'cpu .. online' messages to the log. 14147c478bd9Sstevel@tonic-gate */ 14157c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", 14167c478bd9Sstevel@tonic-gate cp->cpu_id); 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate /* 14197c478bd9Sstevel@tonic-gate * Now we are done with the startup thread, so free it up. 14207c478bd9Sstevel@tonic-gate */ 14217c478bd9Sstevel@tonic-gate thread_exit(); 14227c478bd9Sstevel@tonic-gate panic("mp_startup: cannot return"); 14237c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 14247c478bd9Sstevel@tonic-gate } 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * Start CPU on user request. 14297c478bd9Sstevel@tonic-gate */ 14307c478bd9Sstevel@tonic-gate /* ARGSUSED */ 14317c478bd9Sstevel@tonic-gate int 14327c478bd9Sstevel@tonic-gate mp_cpu_start(struct cpu *cp) 14337c478bd9Sstevel@tonic-gate { 14347c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 14357c478bd9Sstevel@tonic-gate return (0); 14367c478bd9Sstevel@tonic-gate } 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate /* 14397c478bd9Sstevel@tonic-gate * Stop CPU on user request. 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate /* ARGSUSED */ 14427c478bd9Sstevel@tonic-gate int 14437c478bd9Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp) 14447c478bd9Sstevel@tonic-gate { 1445d90554ebSdmick extern int cbe_psm_timer_mode; 14467c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1447d90554ebSdmick 1448d90554ebSdmick /* 1449d90554ebSdmick * If TIMER_PERIODIC mode is used, CPU0 is the one running it; 1450d90554ebSdmick * can't stop it. (This is true only for machines with no TSC.) 1451d90554ebSdmick */ 1452d90554ebSdmick 1453d90554ebSdmick if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0)) 1454d90554ebSdmick return (1); 14557c478bd9Sstevel@tonic-gate 14567c478bd9Sstevel@tonic-gate return (0); 14577c478bd9Sstevel@tonic-gate } 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate /* 14607c478bd9Sstevel@tonic-gate * Take the specified CPU out of participation in interrupts. 14617c478bd9Sstevel@tonic-gate */ 14627c478bd9Sstevel@tonic-gate int 14637c478bd9Sstevel@tonic-gate cpu_disable_intr(struct cpu *cp) 14647c478bd9Sstevel@tonic-gate { 14657c478bd9Sstevel@tonic-gate if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS) 14667c478bd9Sstevel@tonic-gate return (EBUSY); 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_ENABLE; 14697c478bd9Sstevel@tonic-gate return (0); 14707c478bd9Sstevel@tonic-gate } 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * Allow the specified CPU to participate in interrupts. 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate void 14767c478bd9Sstevel@tonic-gate cpu_enable_intr(struct cpu *cp) 14777c478bd9Sstevel@tonic-gate { 14787c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 14797c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_ENABLE; 14807c478bd9Sstevel@tonic-gate psm_enable_intr(cp->cpu_id); 14817c478bd9Sstevel@tonic-gate } 14827c478bd9Sstevel@tonic-gate 14837c478bd9Sstevel@tonic-gate 14847c478bd9Sstevel@tonic-gate 14857c478bd9Sstevel@tonic-gate void 14867c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp) 14877aec1d6eScindi { 14887aec1d6eScindi cmi_faulted_enter(cp); 14897aec1d6eScindi } 14907c478bd9Sstevel@tonic-gate 14917c478bd9Sstevel@tonic-gate void 14927c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp) 14937aec1d6eScindi { 14947aec1d6eScindi cmi_faulted_exit(cp); 14957aec1d6eScindi } 14967c478bd9Sstevel@tonic-gate 14977c478bd9Sstevel@tonic-gate /* 14987c478bd9Sstevel@tonic-gate * The following two routines are used as context operators on threads belonging 14997c478bd9Sstevel@tonic-gate * to processes with a private LDT (see sysi86). Due to the rarity of such 15007c478bd9Sstevel@tonic-gate * processes, these routines are currently written for best code readability and 15017c478bd9Sstevel@tonic-gate * organization rather than speed. We could avoid checking x86_feature at every 15027c478bd9Sstevel@tonic-gate * context switch by installing different context ops, depending on the 15037c478bd9Sstevel@tonic-gate * x86_feature flags, at LDT creation time -- one for each combination of fast 15047c478bd9Sstevel@tonic-gate * syscall feature flags. 15057c478bd9Sstevel@tonic-gate */ 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15087c478bd9Sstevel@tonic-gate void 15097c478bd9Sstevel@tonic-gate cpu_fast_syscall_disable(void *arg) 15107c478bd9Sstevel@tonic-gate { 1511ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP)) 15127c478bd9Sstevel@tonic-gate cpu_sep_disable(); 1513ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC)) 15147c478bd9Sstevel@tonic-gate cpu_asysc_disable(); 15157c478bd9Sstevel@tonic-gate } 15167c478bd9Sstevel@tonic-gate 15177c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15187c478bd9Sstevel@tonic-gate void 15197c478bd9Sstevel@tonic-gate cpu_fast_syscall_enable(void *arg) 15207c478bd9Sstevel@tonic-gate { 1521ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP)) 15227c478bd9Sstevel@tonic-gate cpu_sep_enable(); 1523ae115bc7Smrj if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC)) 15247c478bd9Sstevel@tonic-gate cpu_asysc_enable(); 15257c478bd9Sstevel@tonic-gate } 15267c478bd9Sstevel@tonic-gate 15277c478bd9Sstevel@tonic-gate static void 15287c478bd9Sstevel@tonic-gate cpu_sep_enable(void) 15297c478bd9Sstevel@tonic-gate { 15307c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 15317c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 15327c478bd9Sstevel@tonic-gate 15330ac7d7d8Skucharsk wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL); 15347c478bd9Sstevel@tonic-gate } 15357c478bd9Sstevel@tonic-gate 15367c478bd9Sstevel@tonic-gate static void 15377c478bd9Sstevel@tonic-gate cpu_sep_disable(void) 15387c478bd9Sstevel@tonic-gate { 15397c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 15407c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate /* 15437c478bd9Sstevel@tonic-gate * Setting the SYSENTER_CS_MSR register to 0 causes software executing 15447c478bd9Sstevel@tonic-gate * the sysenter or sysexit instruction to trigger a #gp fault. 15457c478bd9Sstevel@tonic-gate */ 1546ae115bc7Smrj wrmsr(MSR_INTC_SEP_CS, 0); 15477c478bd9Sstevel@tonic-gate } 15487c478bd9Sstevel@tonic-gate 15497c478bd9Sstevel@tonic-gate static void 15507c478bd9Sstevel@tonic-gate cpu_asysc_enable(void) 15517c478bd9Sstevel@tonic-gate { 15527c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 15537c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 15547c478bd9Sstevel@tonic-gate 15550ac7d7d8Skucharsk wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) | 15560ac7d7d8Skucharsk (uint64_t)(uintptr_t)AMD_EFER_SCE); 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate static void 15607c478bd9Sstevel@tonic-gate cpu_asysc_disable(void) 15617c478bd9Sstevel@tonic-gate { 15627c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 15637c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 15647c478bd9Sstevel@tonic-gate 15657c478bd9Sstevel@tonic-gate /* 15667c478bd9Sstevel@tonic-gate * Turn off the SCE (syscall enable) bit in the EFER register. Software 15677c478bd9Sstevel@tonic-gate * executing syscall or sysret with this bit off will incur a #ud trap. 15687c478bd9Sstevel@tonic-gate */ 15690ac7d7d8Skucharsk wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) & 15700ac7d7d8Skucharsk ~((uint64_t)(uintptr_t)AMD_EFER_SCE)); 15717c478bd9Sstevel@tonic-gate } 1572