1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*7c478bd9Sstevel@tonic-gate 29*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 30*7c478bd9Sstevel@tonic-gate #include <sys/thread.h> 31*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 32*7c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/param.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/proc.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/disp.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/mmu.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/class.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/asm_linkage.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/x_call.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/var.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 45*7c478bd9Sstevel@tonic-gate #include <vm/hat.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/mmu.h> 47*7c478bd9Sstevel@tonic-gate #include <vm/as.h> 48*7c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/segments.h> 50*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 51*7c478bd9Sstevel@tonic-gate #include <sys/stack.h> 52*7c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 53*7c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 54*7c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 55*7c478bd9Sstevel@tonic-gate #include <sys/traptrace.h> 56*7c478bd9Sstevel@tonic-gate #include <sys/clock.h> 57*7c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 58*7c478bd9Sstevel@tonic-gate #include <sys/chip.h> 59*7c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 60*7c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 61*7c478bd9Sstevel@tonic-gate #include <sys/fp.h> 62*7c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 63*7c478bd9Sstevel@tonic-gate #include <sys/kdi.h> 64*7c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 65*7c478bd9Sstevel@tonic-gate #include <sys/memnode.h> 66*7c478bd9Sstevel@tonic-gate 67*7c478bd9Sstevel@tonic-gate struct cpu cpus[1]; /* CPU data */ 68*7c478bd9Sstevel@tonic-gate struct cpu *cpu[NCPU] = {&cpus[0]}; /* pointers to all CPUs */ 69*7c478bd9Sstevel@tonic-gate cpu_core_t cpu_core[NCPU]; /* cpu_core structures */ 70*7c478bd9Sstevel@tonic-gate 71*7c478bd9Sstevel@tonic-gate /* 72*7c478bd9Sstevel@tonic-gate * Useful for disabling MP bring-up for an MP capable kernel 73*7c478bd9Sstevel@tonic-gate * (a kernel that was built with MP defined) 74*7c478bd9Sstevel@tonic-gate */ 75*7c478bd9Sstevel@tonic-gate int use_mp = 1; 76*7c478bd9Sstevel@tonic-gate 77*7c478bd9Sstevel@tonic-gate int mp_cpus = 0x1; /* to be set by platform specific module */ 78*7c478bd9Sstevel@tonic-gate 79*7c478bd9Sstevel@tonic-gate /* 80*7c478bd9Sstevel@tonic-gate * This variable is used by the hat layer to decide whether or not 81*7c478bd9Sstevel@tonic-gate * critical sections are needed to prevent race conditions. For sun4m, 82*7c478bd9Sstevel@tonic-gate * this variable is set once enough MP initialization has been done in 83*7c478bd9Sstevel@tonic-gate * order to allow cross calls. 84*7c478bd9Sstevel@tonic-gate */ 85*7c478bd9Sstevel@tonic-gate int flushes_require_xcalls = 0; 86*7c478bd9Sstevel@tonic-gate ulong_t cpu_ready_set = 1; 87*7c478bd9Sstevel@tonic-gate 88*7c478bd9Sstevel@tonic-gate extern void real_mode_start(void); 89*7c478bd9Sstevel@tonic-gate extern void real_mode_end(void); 90*7c478bd9Sstevel@tonic-gate static void mp_startup(void); 91*7c478bd9Sstevel@tonic-gate 92*7c478bd9Sstevel@tonic-gate static void cpu_sep_enable(void); 93*7c478bd9Sstevel@tonic-gate static void cpu_sep_disable(void); 94*7c478bd9Sstevel@tonic-gate static void cpu_asysc_enable(void); 95*7c478bd9Sstevel@tonic-gate static void cpu_asysc_disable(void); 96*7c478bd9Sstevel@tonic-gate 97*7c478bd9Sstevel@tonic-gate extern int tsc_gethrtime_enable; 98*7c478bd9Sstevel@tonic-gate 99*7c478bd9Sstevel@tonic-gate /* 100*7c478bd9Sstevel@tonic-gate * Init CPU info - get CPU type info for processor_info system call. 101*7c478bd9Sstevel@tonic-gate */ 102*7c478bd9Sstevel@tonic-gate void 103*7c478bd9Sstevel@tonic-gate init_cpu_info(struct cpu *cp) 104*7c478bd9Sstevel@tonic-gate { 105*7c478bd9Sstevel@tonic-gate processor_info_t *pi = &cp->cpu_type_info; 106*7c478bd9Sstevel@tonic-gate char buf[CPU_IDSTRLEN]; 107*7c478bd9Sstevel@tonic-gate 108*7c478bd9Sstevel@tonic-gate /* 109*7c478bd9Sstevel@tonic-gate * Get clock-frequency property for the CPU. 110*7c478bd9Sstevel@tonic-gate */ 111*7c478bd9Sstevel@tonic-gate pi->pi_clock = cpu_freq; 112*7c478bd9Sstevel@tonic-gate 113*7c478bd9Sstevel@tonic-gate (void) strcpy(pi->pi_processor_type, "i386"); 114*7c478bd9Sstevel@tonic-gate if (fpu_exists) 115*7c478bd9Sstevel@tonic-gate (void) strcpy(pi->pi_fputypes, "i387 compatible"); 116*7c478bd9Sstevel@tonic-gate 117*7c478bd9Sstevel@tonic-gate (void) cpuid_getidstr(cp, buf, sizeof (buf)); 118*7c478bd9Sstevel@tonic-gate 119*7c478bd9Sstevel@tonic-gate cp->cpu_idstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 120*7c478bd9Sstevel@tonic-gate (void) strcpy(cp->cpu_idstr, buf); 121*7c478bd9Sstevel@tonic-gate 122*7c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr); 123*7c478bd9Sstevel@tonic-gate 124*7c478bd9Sstevel@tonic-gate (void) cpuid_getbrandstr(cp, buf, sizeof (buf)); 125*7c478bd9Sstevel@tonic-gate cp->cpu_brandstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP); 126*7c478bd9Sstevel@tonic-gate (void) strcpy(cp->cpu_brandstr, buf); 127*7c478bd9Sstevel@tonic-gate 128*7c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr); 129*7c478bd9Sstevel@tonic-gate } 130*7c478bd9Sstevel@tonic-gate 131*7c478bd9Sstevel@tonic-gate /* 132*7c478bd9Sstevel@tonic-gate * Configure syscall support on this CPU. 133*7c478bd9Sstevel@tonic-gate */ 134*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 135*7c478bd9Sstevel@tonic-gate static void 136*7c478bd9Sstevel@tonic-gate init_cpu_syscall(struct cpu *cp) 137*7c478bd9Sstevel@tonic-gate { 138*7c478bd9Sstevel@tonic-gate uint64_t value; 139*7c478bd9Sstevel@tonic-gate 140*7c478bd9Sstevel@tonic-gate kpreempt_disable(); 141*7c478bd9Sstevel@tonic-gate 142*7c478bd9Sstevel@tonic-gate #if defined(__amd64) 143*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_ASYSC) { 144*7c478bd9Sstevel@tonic-gate 145*7c478bd9Sstevel@tonic-gate #if !defined(__lint) 146*7c478bd9Sstevel@tonic-gate /* 147*7c478bd9Sstevel@tonic-gate * The syscall instruction imposes a certain ordering on 148*7c478bd9Sstevel@tonic-gate * segment selectors, so we double-check that ordering 149*7c478bd9Sstevel@tonic-gate * here. 150*7c478bd9Sstevel@tonic-gate */ 151*7c478bd9Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 152*7c478bd9Sstevel@tonic-gate ASSERT(UDS_SEL == U32CS_SEL + 8); 153*7c478bd9Sstevel@tonic-gate ASSERT(UCS_SEL == U32CS_SEL + 16); 154*7c478bd9Sstevel@tonic-gate #endif 155*7c478bd9Sstevel@tonic-gate /* 156*7c478bd9Sstevel@tonic-gate * Turn syscall/sysret extensions on. 157*7c478bd9Sstevel@tonic-gate */ 158*7c478bd9Sstevel@tonic-gate cpu_asysc_enable(); 159*7c478bd9Sstevel@tonic-gate 160*7c478bd9Sstevel@tonic-gate /* 161*7c478bd9Sstevel@tonic-gate * Program the magic registers .. 162*7c478bd9Sstevel@tonic-gate */ 163*7c478bd9Sstevel@tonic-gate value = ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32; 164*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_STAR, &value); 165*7c478bd9Sstevel@tonic-gate value = (uintptr_t)sys_syscall; 166*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_LSTAR, &value); 167*7c478bd9Sstevel@tonic-gate value = (uintptr_t)sys_syscall32; 168*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_CSTAR, &value); 169*7c478bd9Sstevel@tonic-gate 170*7c478bd9Sstevel@tonic-gate /* 171*7c478bd9Sstevel@tonic-gate * This list of flags is masked off the incoming 172*7c478bd9Sstevel@tonic-gate * %rfl when we enter the kernel. 173*7c478bd9Sstevel@tonic-gate */ 174*7c478bd9Sstevel@tonic-gate value = PS_IE | PS_T; 175*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_SFMASK, &value); 176*7c478bd9Sstevel@tonic-gate } 177*7c478bd9Sstevel@tonic-gate #endif 178*7c478bd9Sstevel@tonic-gate 179*7c478bd9Sstevel@tonic-gate /* 180*7c478bd9Sstevel@tonic-gate * On 32-bit kernels, we use sysenter/sysexit because it's too 181*7c478bd9Sstevel@tonic-gate * hard to use syscall/sysret, and it is more portable anyway. 182*7c478bd9Sstevel@tonic-gate * 183*7c478bd9Sstevel@tonic-gate * On 64-bit kernels on Nocona machines, the 32-bit syscall 184*7c478bd9Sstevel@tonic-gate * variant isn't available to 32-bit applications, but sysenter is. 185*7c478bd9Sstevel@tonic-gate */ 186*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_SEP) { 187*7c478bd9Sstevel@tonic-gate 188*7c478bd9Sstevel@tonic-gate #if !defined(__lint) 189*7c478bd9Sstevel@tonic-gate /* 190*7c478bd9Sstevel@tonic-gate * The sysenter instruction imposes a certain ordering on 191*7c478bd9Sstevel@tonic-gate * segment selectors, so we double-check that ordering 192*7c478bd9Sstevel@tonic-gate * here. See "sysenter" in Intel document 245471-012, "IA-32 193*7c478bd9Sstevel@tonic-gate * Intel Architecture Software Developer's Manual Volume 2: 194*7c478bd9Sstevel@tonic-gate * Instruction Set Reference" 195*7c478bd9Sstevel@tonic-gate */ 196*7c478bd9Sstevel@tonic-gate ASSERT(KDS_SEL == KCS_SEL + 8); 197*7c478bd9Sstevel@tonic-gate 198*7c478bd9Sstevel@tonic-gate ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3)); 199*7c478bd9Sstevel@tonic-gate ASSERT32(UDS_SEL == UCS_SEL + 8); 200*7c478bd9Sstevel@tonic-gate 201*7c478bd9Sstevel@tonic-gate ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3)); 202*7c478bd9Sstevel@tonic-gate ASSERT64(UDS_SEL == U32CS_SEL + 8); 203*7c478bd9Sstevel@tonic-gate #endif 204*7c478bd9Sstevel@tonic-gate 205*7c478bd9Sstevel@tonic-gate cpu_sep_enable(); 206*7c478bd9Sstevel@tonic-gate 207*7c478bd9Sstevel@tonic-gate /* 208*7c478bd9Sstevel@tonic-gate * resume() sets this value to the base of the threads stack 209*7c478bd9Sstevel@tonic-gate * via a context handler. 210*7c478bd9Sstevel@tonic-gate */ 211*7c478bd9Sstevel@tonic-gate value = 0; 212*7c478bd9Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_ESP, &value); 213*7c478bd9Sstevel@tonic-gate 214*7c478bd9Sstevel@tonic-gate value = (uintptr_t)sys_sysenter; 215*7c478bd9Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_EIP, &value); 216*7c478bd9Sstevel@tonic-gate } 217*7c478bd9Sstevel@tonic-gate 218*7c478bd9Sstevel@tonic-gate kpreempt_enable(); 219*7c478bd9Sstevel@tonic-gate } 220*7c478bd9Sstevel@tonic-gate 221*7c478bd9Sstevel@tonic-gate /* 222*7c478bd9Sstevel@tonic-gate * Multiprocessor initialization. 223*7c478bd9Sstevel@tonic-gate * 224*7c478bd9Sstevel@tonic-gate * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the 225*7c478bd9Sstevel@tonic-gate * startup and idle threads for the specified CPU. 226*7c478bd9Sstevel@tonic-gate */ 227*7c478bd9Sstevel@tonic-gate static void 228*7c478bd9Sstevel@tonic-gate mp_startup_init(int cpun) 229*7c478bd9Sstevel@tonic-gate { 230*7c478bd9Sstevel@tonic-gate #if defined(__amd64) 231*7c478bd9Sstevel@tonic-gate extern void *long_mode_64(void); 232*7c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 233*7c478bd9Sstevel@tonic-gate 234*7c478bd9Sstevel@tonic-gate struct cpu *cp; 235*7c478bd9Sstevel@tonic-gate struct tss *ntss; 236*7c478bd9Sstevel@tonic-gate kthread_id_t tp; 237*7c478bd9Sstevel@tonic-gate caddr_t sp; 238*7c478bd9Sstevel@tonic-gate int size; 239*7c478bd9Sstevel@tonic-gate proc_t *procp; 240*7c478bd9Sstevel@tonic-gate extern void idle(); 241*7c478bd9Sstevel@tonic-gate extern void init_intr_threads(struct cpu *); 242*7c478bd9Sstevel@tonic-gate 243*7c478bd9Sstevel@tonic-gate struct cpu_tables *tablesp; 244*7c478bd9Sstevel@tonic-gate extern chip_t cpu0_chip; 245*7c478bd9Sstevel@tonic-gate rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va; 246*7c478bd9Sstevel@tonic-gate 247*7c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 248*7c478bd9Sstevel@tonic-gate trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun]; 249*7c478bd9Sstevel@tonic-gate #endif 250*7c478bd9Sstevel@tonic-gate 251*7c478bd9Sstevel@tonic-gate ASSERT(cpun < NCPU && cpu[cpun] == NULL); 252*7c478bd9Sstevel@tonic-gate 253*7c478bd9Sstevel@tonic-gate if ((cp = kmem_zalloc(sizeof (*cp), KM_NOSLEEP)) == NULL) { 254*7c478bd9Sstevel@tonic-gate panic("mp_startup_init: cpu%d: " 255*7c478bd9Sstevel@tonic-gate "no memory for cpu structure", cpun); 256*7c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 257*7c478bd9Sstevel@tonic-gate } 258*7c478bd9Sstevel@tonic-gate procp = curthread->t_procp; 259*7c478bd9Sstevel@tonic-gate 260*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 261*7c478bd9Sstevel@tonic-gate /* 262*7c478bd9Sstevel@tonic-gate * Initialize the dispatcher first. 263*7c478bd9Sstevel@tonic-gate */ 264*7c478bd9Sstevel@tonic-gate disp_cpu_init(cp); 265*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 266*7c478bd9Sstevel@tonic-gate 267*7c478bd9Sstevel@tonic-gate /* 268*7c478bd9Sstevel@tonic-gate * Allocate and initialize the startup thread for this CPU. 269*7c478bd9Sstevel@tonic-gate * Interrupt and process switch stacks get allocated later 270*7c478bd9Sstevel@tonic-gate * when the CPU starts running. 271*7c478bd9Sstevel@tonic-gate */ 272*7c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, NULL, NULL, 0, procp, 273*7c478bd9Sstevel@tonic-gate TS_STOPPED, maxclsyspri); 274*7c478bd9Sstevel@tonic-gate 275*7c478bd9Sstevel@tonic-gate /* 276*7c478bd9Sstevel@tonic-gate * Set state to TS_ONPROC since this thread will start running 277*7c478bd9Sstevel@tonic-gate * as soon as the CPU comes online. 278*7c478bd9Sstevel@tonic-gate * 279*7c478bd9Sstevel@tonic-gate * All the other fields of the thread structure are setup by 280*7c478bd9Sstevel@tonic-gate * thread_create(). 281*7c478bd9Sstevel@tonic-gate */ 282*7c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cp); 283*7c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 284*7c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 285*7c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 286*7c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 287*7c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 288*7c478bd9Sstevel@tonic-gate 289*7c478bd9Sstevel@tonic-gate /* 290*7c478bd9Sstevel@tonic-gate * Setup thread to start in mp_startup. 291*7c478bd9Sstevel@tonic-gate */ 292*7c478bd9Sstevel@tonic-gate sp = tp->t_stk; 293*7c478bd9Sstevel@tonic-gate tp->t_pc = (uintptr_t)mp_startup; 294*7c478bd9Sstevel@tonic-gate tp->t_sp = (uintptr_t)(sp - MINFRAME); 295*7c478bd9Sstevel@tonic-gate 296*7c478bd9Sstevel@tonic-gate cp->cpu_id = cpun; 297*7c478bd9Sstevel@tonic-gate cp->cpu_self = cp; 298*7c478bd9Sstevel@tonic-gate cp->cpu_mask = 1 << cpun; 299*7c478bd9Sstevel@tonic-gate cp->cpu_thread = tp; 300*7c478bd9Sstevel@tonic-gate cp->cpu_lwp = NULL; 301*7c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; 302*7c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(tp); 303*7c478bd9Sstevel@tonic-gate 304*7c478bd9Sstevel@tonic-gate /* 305*7c478bd9Sstevel@tonic-gate * Bootstrap cpu_chip in case mp_startup blocks 306*7c478bd9Sstevel@tonic-gate */ 307*7c478bd9Sstevel@tonic-gate cp->cpu_chip = &cpu0_chip; 308*7c478bd9Sstevel@tonic-gate 309*7c478bd9Sstevel@tonic-gate /* 310*7c478bd9Sstevel@tonic-gate * Now, initialize per-CPU idle thread for this CPU. 311*7c478bd9Sstevel@tonic-gate */ 312*7c478bd9Sstevel@tonic-gate tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1); 313*7c478bd9Sstevel@tonic-gate 314*7c478bd9Sstevel@tonic-gate cp->cpu_idle_thread = tp; 315*7c478bd9Sstevel@tonic-gate 316*7c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 317*7c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 318*7c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 319*7c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 320*7c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 321*7c478bd9Sstevel@tonic-gate 322*7c478bd9Sstevel@tonic-gate /* 323*7c478bd9Sstevel@tonic-gate * Perform CPC intialization on the new CPU. 324*7c478bd9Sstevel@tonic-gate */ 325*7c478bd9Sstevel@tonic-gate kcpc_hw_init(cp); 326*7c478bd9Sstevel@tonic-gate 327*7c478bd9Sstevel@tonic-gate /* 328*7c478bd9Sstevel@tonic-gate * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2 329*7c478bd9Sstevel@tonic-gate * for each CPU. 330*7c478bd9Sstevel@tonic-gate */ 331*7c478bd9Sstevel@tonic-gate 332*7c478bd9Sstevel@tonic-gate setup_vaddr_for_ppcopy(cp); 333*7c478bd9Sstevel@tonic-gate 334*7c478bd9Sstevel@tonic-gate /* 335*7c478bd9Sstevel@tonic-gate * Allocate space for page directory, stack, tss, gdt and idt. 336*7c478bd9Sstevel@tonic-gate * This assumes that kmem_alloc will return memory which is aligned 337*7c478bd9Sstevel@tonic-gate * to the next higher power of 2 or a page(if size > MAXABIG) 338*7c478bd9Sstevel@tonic-gate * If this assumption goes wrong at any time due to change in 339*7c478bd9Sstevel@tonic-gate * kmem alloc, things may not work as the page directory has to be 340*7c478bd9Sstevel@tonic-gate * page aligned 341*7c478bd9Sstevel@tonic-gate */ 342*7c478bd9Sstevel@tonic-gate if ((tablesp = kmem_zalloc(sizeof (*tablesp), KM_NOSLEEP)) == NULL) 343*7c478bd9Sstevel@tonic-gate panic("mp_startup_init: cpu%d cannot allocate tables", cpun); 344*7c478bd9Sstevel@tonic-gate 345*7c478bd9Sstevel@tonic-gate if ((uintptr_t)tablesp & ~MMU_STD_PAGEMASK) { 346*7c478bd9Sstevel@tonic-gate kmem_free(tablesp, sizeof (struct cpu_tables)); 347*7c478bd9Sstevel@tonic-gate size = sizeof (struct cpu_tables) + MMU_STD_PAGESIZE; 348*7c478bd9Sstevel@tonic-gate tablesp = kmem_zalloc(size, KM_NOSLEEP); 349*7c478bd9Sstevel@tonic-gate tablesp = (struct cpu_tables *) 350*7c478bd9Sstevel@tonic-gate (((uintptr_t)tablesp + MMU_STD_PAGESIZE) & 351*7c478bd9Sstevel@tonic-gate MMU_STD_PAGEMASK); 352*7c478bd9Sstevel@tonic-gate } 353*7c478bd9Sstevel@tonic-gate 354*7c478bd9Sstevel@tonic-gate ntss = cp->cpu_tss = &tablesp->ct_tss; 355*7c478bd9Sstevel@tonic-gate cp->cpu_gdt = tablesp->ct_gdt; 356*7c478bd9Sstevel@tonic-gate bcopy(CPU->cpu_gdt, cp->cpu_gdt, NGDT * (sizeof (user_desc_t))); 357*7c478bd9Sstevel@tonic-gate 358*7c478bd9Sstevel@tonic-gate #if defined(__amd64) 359*7c478bd9Sstevel@tonic-gate 360*7c478bd9Sstevel@tonic-gate /* 361*7c478bd9Sstevel@tonic-gate * #DF (double fault). 362*7c478bd9Sstevel@tonic-gate */ 363*7c478bd9Sstevel@tonic-gate ntss->tss_ist1 = 364*7c478bd9Sstevel@tonic-gate (uint64_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)]; 365*7c478bd9Sstevel@tonic-gate 366*7c478bd9Sstevel@tonic-gate #elif defined(__i386) 367*7c478bd9Sstevel@tonic-gate 368*7c478bd9Sstevel@tonic-gate ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp = 369*7c478bd9Sstevel@tonic-gate (uint32_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)]; 370*7c478bd9Sstevel@tonic-gate 371*7c478bd9Sstevel@tonic-gate ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL; 372*7c478bd9Sstevel@tonic-gate 373*7c478bd9Sstevel@tonic-gate ntss->tss_eip = (uint32_t)mp_startup; 374*7c478bd9Sstevel@tonic-gate 375*7c478bd9Sstevel@tonic-gate ntss->tss_cs = KCS_SEL; 376*7c478bd9Sstevel@tonic-gate ntss->tss_fs = KFS_SEL; 377*7c478bd9Sstevel@tonic-gate ntss->tss_gs = KGS_SEL; 378*7c478bd9Sstevel@tonic-gate 379*7c478bd9Sstevel@tonic-gate /* 380*7c478bd9Sstevel@tonic-gate * setup kernel %gs. 381*7c478bd9Sstevel@tonic-gate */ 382*7c478bd9Sstevel@tonic-gate set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA, 383*7c478bd9Sstevel@tonic-gate SEL_KPL, 0, 1); 384*7c478bd9Sstevel@tonic-gate 385*7c478bd9Sstevel@tonic-gate #endif /* __i386 */ 386*7c478bd9Sstevel@tonic-gate 387*7c478bd9Sstevel@tonic-gate /* 388*7c478bd9Sstevel@tonic-gate * Set I/O bit map offset equal to size of TSS segment limit 389*7c478bd9Sstevel@tonic-gate * for no I/O permission map. This will cause all user I/O 390*7c478bd9Sstevel@tonic-gate * instructions to generate #gp fault. 391*7c478bd9Sstevel@tonic-gate */ 392*7c478bd9Sstevel@tonic-gate ntss->tss_bitmapbase = sizeof (*ntss); 393*7c478bd9Sstevel@tonic-gate 394*7c478bd9Sstevel@tonic-gate /* 395*7c478bd9Sstevel@tonic-gate * setup kernel tss. 396*7c478bd9Sstevel@tonic-gate */ 397*7c478bd9Sstevel@tonic-gate set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss, 398*7c478bd9Sstevel@tonic-gate sizeof (*cp->cpu_tss) -1, SDT_SYSTSS, SEL_KPL); 399*7c478bd9Sstevel@tonic-gate 400*7c478bd9Sstevel@tonic-gate /* 401*7c478bd9Sstevel@tonic-gate * If we have more than one node, each cpu gets a copy of IDT 402*7c478bd9Sstevel@tonic-gate * local to its node. If this is a Pentium box, we use cpu 0's 403*7c478bd9Sstevel@tonic-gate * IDT. cpu 0's IDT has been made read-only to workaround the 404*7c478bd9Sstevel@tonic-gate * cmpxchgl register bug 405*7c478bd9Sstevel@tonic-gate */ 406*7c478bd9Sstevel@tonic-gate cp->cpu_idt = CPU->cpu_idt; 407*7c478bd9Sstevel@tonic-gate if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) { 408*7c478bd9Sstevel@tonic-gate cp->cpu_idt = kmem_alloc(sizeof (idt0), KM_SLEEP); 409*7c478bd9Sstevel@tonic-gate bcopy(idt0, cp->cpu_idt, sizeof (idt0)); 410*7c478bd9Sstevel@tonic-gate } 411*7c478bd9Sstevel@tonic-gate 412*7c478bd9Sstevel@tonic-gate /* 413*7c478bd9Sstevel@tonic-gate * Get interrupt priority data from cpu 0 414*7c478bd9Sstevel@tonic-gate */ 415*7c478bd9Sstevel@tonic-gate cp->cpu_pri_data = CPU->cpu_pri_data; 416*7c478bd9Sstevel@tonic-gate 417*7c478bd9Sstevel@tonic-gate hat_cpu_online(cp); 418*7c478bd9Sstevel@tonic-gate 419*7c478bd9Sstevel@tonic-gate /* Should remove all entries for the current process/thread here */ 420*7c478bd9Sstevel@tonic-gate 421*7c478bd9Sstevel@tonic-gate /* 422*7c478bd9Sstevel@tonic-gate * Fill up the real mode platter to make it easy for real mode code to 423*7c478bd9Sstevel@tonic-gate * kick it off. This area should really be one passed by boot to kernel 424*7c478bd9Sstevel@tonic-gate * and guaranteed to be below 1MB and aligned to 16 bytes. Should also 425*7c478bd9Sstevel@tonic-gate * have identical physical and virtual address in paged mode. 426*7c478bd9Sstevel@tonic-gate */ 427*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_idt_base = cp->cpu_idt; 428*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_idt_lim = sizeof (idt0) - 1; 429*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_gdt_base = cp->cpu_gdt; 430*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_gdt_lim = sizeof (gdt0) -1; 431*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_pdbr = getcr3(); 432*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_cpu = cpun; 433*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_x86feature = x86_feature; 434*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_cr4 = cr4_value; 435*7c478bd9Sstevel@tonic-gate 436*7c478bd9Sstevel@tonic-gate #if defined(__amd64) 437*7c478bd9Sstevel@tonic-gate if (getcr3() > 0xffffffffUL) 438*7c478bd9Sstevel@tonic-gate panic("Cannot initialize CPUs; kernel's 64-bit page tables\n" 439*7c478bd9Sstevel@tonic-gate "located above 4G in physical memory (@ 0x%llx).", 440*7c478bd9Sstevel@tonic-gate (unsigned long long)getcr3()); 441*7c478bd9Sstevel@tonic-gate 442*7c478bd9Sstevel@tonic-gate /* 443*7c478bd9Sstevel@tonic-gate * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY 444*7c478bd9Sstevel@tonic-gate * by code in real_mode_start(): 445*7c478bd9Sstevel@tonic-gate * 446*7c478bd9Sstevel@tonic-gate * GDT[0]: NULL selector 447*7c478bd9Sstevel@tonic-gate * GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1 448*7c478bd9Sstevel@tonic-gate * 449*7c478bd9Sstevel@tonic-gate * Clear the IDT as interrupts will be off and a limit of 0 will cause 450*7c478bd9Sstevel@tonic-gate * the CPU to triple fault and reset on an NMI, seemingly as reasonable 451*7c478bd9Sstevel@tonic-gate * a course of action as any other, though it may cause the entire 452*7c478bd9Sstevel@tonic-gate * platform to reset in some cases... 453*7c478bd9Sstevel@tonic-gate */ 454*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_gdt[0] = 0ULL; 455*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL; 456*7c478bd9Sstevel@tonic-gate 457*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_gdt_lim = (ushort_t) 458*7c478bd9Sstevel@tonic-gate (sizeof (real_mode_platter->rm_temp_gdt) - 1); 459*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_gdt_base = rm_platter_pa + 460*7c478bd9Sstevel@tonic-gate (uint32_t)(&((rm_platter_t *)0)->rm_temp_gdt); 461*7c478bd9Sstevel@tonic-gate 462*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_idt_lim = 0; 463*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_temp_idt_base = 0; 464*7c478bd9Sstevel@tonic-gate 465*7c478bd9Sstevel@tonic-gate /* 466*7c478bd9Sstevel@tonic-gate * Since the CPU needs to jump to protected mode using an identity 467*7c478bd9Sstevel@tonic-gate * mapped address, we need to calculate it here. 468*7c478bd9Sstevel@tonic-gate */ 469*7c478bd9Sstevel@tonic-gate real_mode_platter->rm_longmode64_addr = rm_platter_pa + 470*7c478bd9Sstevel@tonic-gate ((uint32_t)long_mode_64 - (uint32_t)real_mode_start); 471*7c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 472*7c478bd9Sstevel@tonic-gate 473*7c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 474*7c478bd9Sstevel@tonic-gate /* 475*7c478bd9Sstevel@tonic-gate * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers for this 476*7c478bd9Sstevel@tonic-gate * CPU. 477*7c478bd9Sstevel@tonic-gate */ 478*7c478bd9Sstevel@tonic-gate ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP); 479*7c478bd9Sstevel@tonic-gate ttc->ttc_next = ttc->ttc_first; 480*7c478bd9Sstevel@tonic-gate ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize; 481*7c478bd9Sstevel@tonic-gate #endif 482*7c478bd9Sstevel@tonic-gate 483*7c478bd9Sstevel@tonic-gate /* 484*7c478bd9Sstevel@tonic-gate * Record that we have another CPU. 485*7c478bd9Sstevel@tonic-gate */ 486*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 487*7c478bd9Sstevel@tonic-gate /* 488*7c478bd9Sstevel@tonic-gate * Initialize the interrupt threads for this CPU 489*7c478bd9Sstevel@tonic-gate */ 490*7c478bd9Sstevel@tonic-gate init_intr_threads(cp); 491*7c478bd9Sstevel@tonic-gate /* 492*7c478bd9Sstevel@tonic-gate * Add CPU to list of available CPUs. It'll be on the active list 493*7c478bd9Sstevel@tonic-gate * after mp_startup(). 494*7c478bd9Sstevel@tonic-gate */ 495*7c478bd9Sstevel@tonic-gate cpu_add_unit(cp); 496*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 497*7c478bd9Sstevel@tonic-gate } 498*7c478bd9Sstevel@tonic-gate 499*7c478bd9Sstevel@tonic-gate /* 500*7c478bd9Sstevel@tonic-gate * Apply workarounds for known errata, and warn about those that are absent. 501*7c478bd9Sstevel@tonic-gate * 502*7c478bd9Sstevel@tonic-gate * System vendors occasionally create configurations which contain different 503*7c478bd9Sstevel@tonic-gate * revisions of the CPUs that are almost but not exactly the same. At the 504*7c478bd9Sstevel@tonic-gate * time of writing, this meant that their clock rates were the same, their 505*7c478bd9Sstevel@tonic-gate * feature sets were the same, but the required workaround were -not- 506*7c478bd9Sstevel@tonic-gate * necessarily the same. So, this routine is invoked on -every- CPU soon 507*7c478bd9Sstevel@tonic-gate * after starting to make sure that the resulting system contains the most 508*7c478bd9Sstevel@tonic-gate * pessimal set of workarounds needed to cope with *any* of the CPUs in the 509*7c478bd9Sstevel@tonic-gate * system. 510*7c478bd9Sstevel@tonic-gate * 511*7c478bd9Sstevel@tonic-gate * These workarounds are based on Rev 3.50 of the Revision Guide for 512*7c478bd9Sstevel@tonic-gate * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, May 2005. 513*7c478bd9Sstevel@tonic-gate */ 514*7c478bd9Sstevel@tonic-gate 515*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 516*7c478bd9Sstevel@tonic-gate int opteron_erratum_91; /* if non-zero -> at least one cpu has it */ 517*7c478bd9Sstevel@tonic-gate #endif 518*7c478bd9Sstevel@tonic-gate 519*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 520*7c478bd9Sstevel@tonic-gate int opteron_erratum_93; /* if non-zero -> at least one cpu has it */ 521*7c478bd9Sstevel@tonic-gate #endif 522*7c478bd9Sstevel@tonic-gate 523*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 524*7c478bd9Sstevel@tonic-gate int opteron_erratum_100; /* if non-zero -> at least one cpu has it */ 525*7c478bd9Sstevel@tonic-gate #endif 526*7c478bd9Sstevel@tonic-gate 527*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 528*7c478bd9Sstevel@tonic-gate int opteron_erratum_109; /* if non-zero -> at least one cpu has it */ 529*7c478bd9Sstevel@tonic-gate #endif 530*7c478bd9Sstevel@tonic-gate 531*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 532*7c478bd9Sstevel@tonic-gate int opteron_erratum_121; /* if non-zero -> at least one cpu has it */ 533*7c478bd9Sstevel@tonic-gate #endif 534*7c478bd9Sstevel@tonic-gate 535*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 536*7c478bd9Sstevel@tonic-gate int opteron_erratum_122; /* if non-zero -> at least one cpu has it */ 537*7c478bd9Sstevel@tonic-gate #endif 538*7c478bd9Sstevel@tonic-gate 539*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 540*7c478bd9Sstevel@tonic-gate int opteron_erratum_123; /* if non-zero -> at least one cpu has it */ 541*7c478bd9Sstevel@tonic-gate #endif 542*7c478bd9Sstevel@tonic-gate 543*7c478bd9Sstevel@tonic-gate 544*7c478bd9Sstevel@tonic-gate #define WARNING(cpu, n) \ 545*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu%d: no workaround for erratum %d", \ 546*7c478bd9Sstevel@tonic-gate (cpu)->cpu_id, (n)) 547*7c478bd9Sstevel@tonic-gate 548*7c478bd9Sstevel@tonic-gate uint_t 549*7c478bd9Sstevel@tonic-gate workaround_errata(struct cpu *cpu) 550*7c478bd9Sstevel@tonic-gate { 551*7c478bd9Sstevel@tonic-gate uint_t missing = 0; 552*7c478bd9Sstevel@tonic-gate 553*7c478bd9Sstevel@tonic-gate ASSERT(cpu == CPU); 554*7c478bd9Sstevel@tonic-gate 555*7c478bd9Sstevel@tonic-gate /*LINTED*/ 556*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 88) > 0) { 557*7c478bd9Sstevel@tonic-gate /* 558*7c478bd9Sstevel@tonic-gate * SWAPGS May Fail To Read Correct GS Base 559*7c478bd9Sstevel@tonic-gate */ 560*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_88) 561*7c478bd9Sstevel@tonic-gate /* 562*7c478bd9Sstevel@tonic-gate * The workaround is an mfence in the relevant assembler code 563*7c478bd9Sstevel@tonic-gate */ 564*7c478bd9Sstevel@tonic-gate #else 565*7c478bd9Sstevel@tonic-gate WARNING(cpu, 88); 566*7c478bd9Sstevel@tonic-gate missing++; 567*7c478bd9Sstevel@tonic-gate #endif 568*7c478bd9Sstevel@tonic-gate } 569*7c478bd9Sstevel@tonic-gate 570*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 91) > 0) { 571*7c478bd9Sstevel@tonic-gate /* 572*7c478bd9Sstevel@tonic-gate * Software Prefetches May Report A Page Fault 573*7c478bd9Sstevel@tonic-gate */ 574*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91) 575*7c478bd9Sstevel@tonic-gate /* 576*7c478bd9Sstevel@tonic-gate * fix is in trap.c 577*7c478bd9Sstevel@tonic-gate */ 578*7c478bd9Sstevel@tonic-gate opteron_erratum_91++; 579*7c478bd9Sstevel@tonic-gate #else 580*7c478bd9Sstevel@tonic-gate WARNING(cpu, 91); 581*7c478bd9Sstevel@tonic-gate missing++; 582*7c478bd9Sstevel@tonic-gate #endif 583*7c478bd9Sstevel@tonic-gate } 584*7c478bd9Sstevel@tonic-gate 585*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 93) > 0) { 586*7c478bd9Sstevel@tonic-gate /* 587*7c478bd9Sstevel@tonic-gate * RSM Auto-Halt Restart Returns to Incorrect RIP 588*7c478bd9Sstevel@tonic-gate */ 589*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93) 590*7c478bd9Sstevel@tonic-gate /* 591*7c478bd9Sstevel@tonic-gate * fix is in trap.c 592*7c478bd9Sstevel@tonic-gate */ 593*7c478bd9Sstevel@tonic-gate opteron_erratum_93++; 594*7c478bd9Sstevel@tonic-gate #else 595*7c478bd9Sstevel@tonic-gate WARNING(cpu, 93); 596*7c478bd9Sstevel@tonic-gate missing++; 597*7c478bd9Sstevel@tonic-gate #endif 598*7c478bd9Sstevel@tonic-gate } 599*7c478bd9Sstevel@tonic-gate 600*7c478bd9Sstevel@tonic-gate /*LINTED*/ 601*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 95) > 0) { 602*7c478bd9Sstevel@tonic-gate /* 603*7c478bd9Sstevel@tonic-gate * RET Instruction May Return to Incorrect EIP 604*7c478bd9Sstevel@tonic-gate */ 605*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_95) 606*7c478bd9Sstevel@tonic-gate #if defined(_LP64) 607*7c478bd9Sstevel@tonic-gate /* 608*7c478bd9Sstevel@tonic-gate * Workaround this by ensuring that 32-bit user code and 609*7c478bd9Sstevel@tonic-gate * 64-bit kernel code never occupy the same address 610*7c478bd9Sstevel@tonic-gate * range mod 4G. 611*7c478bd9Sstevel@tonic-gate */ 612*7c478bd9Sstevel@tonic-gate if (_userlimit32 > 0xc0000000ul) 613*7c478bd9Sstevel@tonic-gate *(uintptr_t *)&_userlimit32 = 0xc0000000ul; 614*7c478bd9Sstevel@tonic-gate 615*7c478bd9Sstevel@tonic-gate /*LINTED*/ 616*7c478bd9Sstevel@tonic-gate ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u); 617*7c478bd9Sstevel@tonic-gate #endif /* _LP64 */ 618*7c478bd9Sstevel@tonic-gate #else 619*7c478bd9Sstevel@tonic-gate WARNING(cpu, 95); 620*7c478bd9Sstevel@tonic-gate missing++; 621*7c478bd9Sstevel@tonic-gate #endif /* OPTERON_ERRATUM_95 */ 622*7c478bd9Sstevel@tonic-gate } 623*7c478bd9Sstevel@tonic-gate 624*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 100) > 0) { 625*7c478bd9Sstevel@tonic-gate /* 626*7c478bd9Sstevel@tonic-gate * Compatibility Mode Branches Transfer to Illegal Address 627*7c478bd9Sstevel@tonic-gate */ 628*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100) 629*7c478bd9Sstevel@tonic-gate /* 630*7c478bd9Sstevel@tonic-gate * fix is in trap.c 631*7c478bd9Sstevel@tonic-gate */ 632*7c478bd9Sstevel@tonic-gate opteron_erratum_100++; 633*7c478bd9Sstevel@tonic-gate #else 634*7c478bd9Sstevel@tonic-gate WARNING(cpu, 100); 635*7c478bd9Sstevel@tonic-gate missing++; 636*7c478bd9Sstevel@tonic-gate #endif 637*7c478bd9Sstevel@tonic-gate } 638*7c478bd9Sstevel@tonic-gate 639*7c478bd9Sstevel@tonic-gate /*LINTED*/ 640*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 108) > 0) { 641*7c478bd9Sstevel@tonic-gate /* 642*7c478bd9Sstevel@tonic-gate * CPUID Instruction May Return Incorrect Model Number In 643*7c478bd9Sstevel@tonic-gate * Some Processors 644*7c478bd9Sstevel@tonic-gate */ 645*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108) 646*7c478bd9Sstevel@tonic-gate /* 647*7c478bd9Sstevel@tonic-gate * (Our cpuid-handling code corrects the model number on 648*7c478bd9Sstevel@tonic-gate * those processors) 649*7c478bd9Sstevel@tonic-gate */ 650*7c478bd9Sstevel@tonic-gate #else 651*7c478bd9Sstevel@tonic-gate WARNING(cpu, 108); 652*7c478bd9Sstevel@tonic-gate missing++; 653*7c478bd9Sstevel@tonic-gate #endif 654*7c478bd9Sstevel@tonic-gate } 655*7c478bd9Sstevel@tonic-gate 656*7c478bd9Sstevel@tonic-gate /*LINTED*/ 657*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 109) > 0) { 658*7c478bd9Sstevel@tonic-gate /* 659*7c478bd9Sstevel@tonic-gate * Certain Reverse REP MOVS May Produce Unpredictable Behaviour 660*7c478bd9Sstevel@tonic-gate */ 661*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 662*7c478bd9Sstevel@tonic-gate uint64_t patchlevel; 663*7c478bd9Sstevel@tonic-gate 664*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel); 665*7c478bd9Sstevel@tonic-gate /* workaround is to print a warning to upgrade BIOS */ 666*7c478bd9Sstevel@tonic-gate if (patchlevel == 0) 667*7c478bd9Sstevel@tonic-gate opteron_erratum_109++; 668*7c478bd9Sstevel@tonic-gate #else 669*7c478bd9Sstevel@tonic-gate WARNING(cpu, 109); 670*7c478bd9Sstevel@tonic-gate missing++; 671*7c478bd9Sstevel@tonic-gate #endif 672*7c478bd9Sstevel@tonic-gate } 673*7c478bd9Sstevel@tonic-gate /*LINTED*/ 674*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 121) > 0) { 675*7c478bd9Sstevel@tonic-gate /* 676*7c478bd9Sstevel@tonic-gate * Sequential Execution Across Non_Canonical Boundary Caused 677*7c478bd9Sstevel@tonic-gate * Processor Hang 678*7c478bd9Sstevel@tonic-gate */ 679*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 680*7c478bd9Sstevel@tonic-gate static int lma; 681*7c478bd9Sstevel@tonic-gate 682*7c478bd9Sstevel@tonic-gate if (opteron_erratum_121) 683*7c478bd9Sstevel@tonic-gate opteron_erratum_121++; 684*7c478bd9Sstevel@tonic-gate 685*7c478bd9Sstevel@tonic-gate /* 686*7c478bd9Sstevel@tonic-gate * Erratum 121 is only present in long (64 bit) mode. 687*7c478bd9Sstevel@tonic-gate * Workaround is to include the page immediately before the 688*7c478bd9Sstevel@tonic-gate * va hole to eliminate the possibility of system hangs due to 689*7c478bd9Sstevel@tonic-gate * sequential execution across the va hole boundary. 690*7c478bd9Sstevel@tonic-gate */ 691*7c478bd9Sstevel@tonic-gate if (lma == 0) { 692*7c478bd9Sstevel@tonic-gate uint64_t efer; 693*7c478bd9Sstevel@tonic-gate 694*7c478bd9Sstevel@tonic-gate /* 695*7c478bd9Sstevel@tonic-gate * check LMA once: assume all cpus are in long mode 696*7c478bd9Sstevel@tonic-gate * or not. 697*7c478bd9Sstevel@tonic-gate */ 698*7c478bd9Sstevel@tonic-gate lma = 1; 699*7c478bd9Sstevel@tonic-gate 700*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &efer); 701*7c478bd9Sstevel@tonic-gate if (efer & AMD_EFER_LMA) { 702*7c478bd9Sstevel@tonic-gate if (hole_start) { 703*7c478bd9Sstevel@tonic-gate hole_start -= PAGESIZE; 704*7c478bd9Sstevel@tonic-gate } else { 705*7c478bd9Sstevel@tonic-gate /* 706*7c478bd9Sstevel@tonic-gate * hole_start not yet initialized by 707*7c478bd9Sstevel@tonic-gate * mmu_init. Initialize hole_start 708*7c478bd9Sstevel@tonic-gate * with value to be subtracted. 709*7c478bd9Sstevel@tonic-gate */ 710*7c478bd9Sstevel@tonic-gate hole_start = PAGESIZE; 711*7c478bd9Sstevel@tonic-gate } 712*7c478bd9Sstevel@tonic-gate opteron_erratum_121++; 713*7c478bd9Sstevel@tonic-gate } 714*7c478bd9Sstevel@tonic-gate } 715*7c478bd9Sstevel@tonic-gate #else 716*7c478bd9Sstevel@tonic-gate WARNING(cpu, 121); 717*7c478bd9Sstevel@tonic-gate missing++; 718*7c478bd9Sstevel@tonic-gate #endif 719*7c478bd9Sstevel@tonic-gate } 720*7c478bd9Sstevel@tonic-gate 721*7c478bd9Sstevel@tonic-gate /*LINTED*/ 722*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 122) > 0) { 723*7c478bd9Sstevel@tonic-gate /* 724*7c478bd9Sstevel@tonic-gate * TLB Flush Filter May Cause Cohenrency Problem in 725*7c478bd9Sstevel@tonic-gate * Multiprocessor Systems 726*7c478bd9Sstevel@tonic-gate */ 727*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122) 728*7c478bd9Sstevel@tonic-gate /* 729*7c478bd9Sstevel@tonic-gate * Erratum 122 is only present in MP configurations (multi-core 730*7c478bd9Sstevel@tonic-gate * or multi-processor). 731*7c478bd9Sstevel@tonic-gate */ 732*7c478bd9Sstevel@tonic-gate 733*7c478bd9Sstevel@tonic-gate if (opteron_erratum_122 || lgrp_plat_node_cnt > 1 || 734*7c478bd9Sstevel@tonic-gate cpuid_get_ncpu_per_chip(cpu) > 1) { 735*7c478bd9Sstevel@tonic-gate uint64_t hwcrval; 736*7c478bd9Sstevel@tonic-gate 737*7c478bd9Sstevel@tonic-gate /* disable TLB Flush Filter */ 738*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_HWCR, &hwcrval); 739*7c478bd9Sstevel@tonic-gate hwcrval |= AMD_HWCR_FFDIS; 740*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_HWCR, &hwcrval); 741*7c478bd9Sstevel@tonic-gate opteron_erratum_122++; 742*7c478bd9Sstevel@tonic-gate } 743*7c478bd9Sstevel@tonic-gate 744*7c478bd9Sstevel@tonic-gate #else 745*7c478bd9Sstevel@tonic-gate WARNING(cpu, 122); 746*7c478bd9Sstevel@tonic-gate missing++; 747*7c478bd9Sstevel@tonic-gate #endif 748*7c478bd9Sstevel@tonic-gate } 749*7c478bd9Sstevel@tonic-gate /*LINTED*/ 750*7c478bd9Sstevel@tonic-gate if (cpuid_opteron_erratum(cpu, 123) > 0) { 751*7c478bd9Sstevel@tonic-gate /* 752*7c478bd9Sstevel@tonic-gate * Bypassed Reads May Cause Data Corruption of System Hang in 753*7c478bd9Sstevel@tonic-gate * Dual Core Processors 754*7c478bd9Sstevel@tonic-gate */ 755*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 756*7c478bd9Sstevel@tonic-gate /* 757*7c478bd9Sstevel@tonic-gate * Erratum 123 applies only to multi-core cpus. 758*7c478bd9Sstevel@tonic-gate */ 759*7c478bd9Sstevel@tonic-gate 760*7c478bd9Sstevel@tonic-gate if (cpuid_get_ncpu_per_chip(cpu) > 1) { 761*7c478bd9Sstevel@tonic-gate uint64_t patchlevel; 762*7c478bd9Sstevel@tonic-gate 763*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel); 764*7c478bd9Sstevel@tonic-gate /* workaround is to print a warning to upgrade BIOS */ 765*7c478bd9Sstevel@tonic-gate if (patchlevel == 0) 766*7c478bd9Sstevel@tonic-gate opteron_erratum_123++; 767*7c478bd9Sstevel@tonic-gate } 768*7c478bd9Sstevel@tonic-gate #else 769*7c478bd9Sstevel@tonic-gate WARNING(cpu, 123); 770*7c478bd9Sstevel@tonic-gate missing++; 771*7c478bd9Sstevel@tonic-gate #endif 772*7c478bd9Sstevel@tonic-gate } 773*7c478bd9Sstevel@tonic-gate return (missing); 774*7c478bd9Sstevel@tonic-gate } 775*7c478bd9Sstevel@tonic-gate 776*7c478bd9Sstevel@tonic-gate void 777*7c478bd9Sstevel@tonic-gate workaround_errata_end() 778*7c478bd9Sstevel@tonic-gate { 779*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109) 780*7c478bd9Sstevel@tonic-gate if (opteron_erratum_109) { 781*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "!BIOS microcode patch for AMD Processor" 782*7c478bd9Sstevel@tonic-gate " Erratum 109 was not detected. Updating BIOS with the" 783*7c478bd9Sstevel@tonic-gate " microcode patch is highly recommended."); 784*7c478bd9Sstevel@tonic-gate } 785*7c478bd9Sstevel@tonic-gate #endif 786*7c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123) 787*7c478bd9Sstevel@tonic-gate if (opteron_erratum_123) { 788*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "!BIOS microcode patch for AMD Processor" 789*7c478bd9Sstevel@tonic-gate " Erratum 123 was not detected. Updating BIOS with the" 790*7c478bd9Sstevel@tonic-gate " microcode patch is highly recommended."); 791*7c478bd9Sstevel@tonic-gate } 792*7c478bd9Sstevel@tonic-gate #endif 793*7c478bd9Sstevel@tonic-gate } 794*7c478bd9Sstevel@tonic-gate 795*7c478bd9Sstevel@tonic-gate static ushort_t *mp_map_warm_reset_vector(); 796*7c478bd9Sstevel@tonic-gate static void mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector); 797*7c478bd9Sstevel@tonic-gate 798*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 799*7c478bd9Sstevel@tonic-gate void 800*7c478bd9Sstevel@tonic-gate start_other_cpus(int cprboot) 801*7c478bd9Sstevel@tonic-gate { 802*7c478bd9Sstevel@tonic-gate unsigned who; 803*7c478bd9Sstevel@tonic-gate int cpuid = getbootcpuid(); 804*7c478bd9Sstevel@tonic-gate int delays = 0; 805*7c478bd9Sstevel@tonic-gate int started_cpu; 806*7c478bd9Sstevel@tonic-gate ushort_t *warm_reset_vector = NULL; 807*7c478bd9Sstevel@tonic-gate extern int procset; 808*7c478bd9Sstevel@tonic-gate 809*7c478bd9Sstevel@tonic-gate /* 810*7c478bd9Sstevel@tonic-gate * Initialize our own cpu_info. 811*7c478bd9Sstevel@tonic-gate */ 812*7c478bd9Sstevel@tonic-gate init_cpu_info(CPU); 813*7c478bd9Sstevel@tonic-gate 814*7c478bd9Sstevel@tonic-gate /* 815*7c478bd9Sstevel@tonic-gate * Initialize our syscall handlers 816*7c478bd9Sstevel@tonic-gate */ 817*7c478bd9Sstevel@tonic-gate init_cpu_syscall(CPU); 818*7c478bd9Sstevel@tonic-gate 819*7c478bd9Sstevel@tonic-gate /* 820*7c478bd9Sstevel@tonic-gate * if only 1 cpu or not using MP, skip the rest of this 821*7c478bd9Sstevel@tonic-gate */ 822*7c478bd9Sstevel@tonic-gate if (!(mp_cpus & ~(1 << cpuid)) || use_mp == 0) { 823*7c478bd9Sstevel@tonic-gate if (use_mp == 0) 824*7c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?***** Not in MP mode\n"); 825*7c478bd9Sstevel@tonic-gate goto done; 826*7c478bd9Sstevel@tonic-gate } 827*7c478bd9Sstevel@tonic-gate 828*7c478bd9Sstevel@tonic-gate /* 829*7c478bd9Sstevel@tonic-gate * perform such initialization as is needed 830*7c478bd9Sstevel@tonic-gate * to be able to take CPUs on- and off-line. 831*7c478bd9Sstevel@tonic-gate */ 832*7c478bd9Sstevel@tonic-gate cpu_pause_init(); 833*7c478bd9Sstevel@tonic-gate 834*7c478bd9Sstevel@tonic-gate xc_init(); /* initialize processor crosscalls */ 835*7c478bd9Sstevel@tonic-gate 836*7c478bd9Sstevel@tonic-gate /* 837*7c478bd9Sstevel@tonic-gate * Copy the real mode code at "real_mode_start" to the 838*7c478bd9Sstevel@tonic-gate * page at rm_platter_va. 839*7c478bd9Sstevel@tonic-gate */ 840*7c478bd9Sstevel@tonic-gate warm_reset_vector = mp_map_warm_reset_vector(); 841*7c478bd9Sstevel@tonic-gate if (warm_reset_vector == NULL) 842*7c478bd9Sstevel@tonic-gate goto done; 843*7c478bd9Sstevel@tonic-gate 844*7c478bd9Sstevel@tonic-gate bcopy((caddr_t)real_mode_start, 845*7c478bd9Sstevel@tonic-gate (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code, 846*7c478bd9Sstevel@tonic-gate (size_t)real_mode_end - (size_t)real_mode_start); 847*7c478bd9Sstevel@tonic-gate 848*7c478bd9Sstevel@tonic-gate flushes_require_xcalls = 1; 849*7c478bd9Sstevel@tonic-gate 850*7c478bd9Sstevel@tonic-gate affinity_set(CPU_CURRENT); 851*7c478bd9Sstevel@tonic-gate 852*7c478bd9Sstevel@tonic-gate for (who = 0; who < NCPU; who++) { 853*7c478bd9Sstevel@tonic-gate if (who == cpuid) 854*7c478bd9Sstevel@tonic-gate continue; 855*7c478bd9Sstevel@tonic-gate 856*7c478bd9Sstevel@tonic-gate if ((mp_cpus & (1 << who)) == 0) 857*7c478bd9Sstevel@tonic-gate continue; 858*7c478bd9Sstevel@tonic-gate 859*7c478bd9Sstevel@tonic-gate mp_startup_init(who); 860*7c478bd9Sstevel@tonic-gate started_cpu = 1; 861*7c478bd9Sstevel@tonic-gate (*cpu_startf)(who, rm_platter_pa); 862*7c478bd9Sstevel@tonic-gate 863*7c478bd9Sstevel@tonic-gate while ((procset & (1 << who)) == 0) { 864*7c478bd9Sstevel@tonic-gate 865*7c478bd9Sstevel@tonic-gate delay(1); 866*7c478bd9Sstevel@tonic-gate if (++delays > (20 * hz)) { 867*7c478bd9Sstevel@tonic-gate 868*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 869*7c478bd9Sstevel@tonic-gate "cpu%d failed to start", who); 870*7c478bd9Sstevel@tonic-gate 871*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 872*7c478bd9Sstevel@tonic-gate cpu[who]->cpu_flags = 0; 873*7c478bd9Sstevel@tonic-gate cpu_del_unit(who); 874*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 875*7c478bd9Sstevel@tonic-gate 876*7c478bd9Sstevel@tonic-gate started_cpu = 0; 877*7c478bd9Sstevel@tonic-gate break; 878*7c478bd9Sstevel@tonic-gate } 879*7c478bd9Sstevel@tonic-gate } 880*7c478bd9Sstevel@tonic-gate if (!started_cpu) 881*7c478bd9Sstevel@tonic-gate continue; 882*7c478bd9Sstevel@tonic-gate if (tsc_gethrtime_enable) 883*7c478bd9Sstevel@tonic-gate tsc_sync_master(who); 884*7c478bd9Sstevel@tonic-gate 885*7c478bd9Sstevel@tonic-gate 886*7c478bd9Sstevel@tonic-gate if (dtrace_cpu_init != NULL) { 887*7c478bd9Sstevel@tonic-gate /* 888*7c478bd9Sstevel@tonic-gate * DTrace CPU initialization expects cpu_lock 889*7c478bd9Sstevel@tonic-gate * to be held. 890*7c478bd9Sstevel@tonic-gate */ 891*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 892*7c478bd9Sstevel@tonic-gate (*dtrace_cpu_init)(who); 893*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 894*7c478bd9Sstevel@tonic-gate } 895*7c478bd9Sstevel@tonic-gate } 896*7c478bd9Sstevel@tonic-gate 897*7c478bd9Sstevel@tonic-gate affinity_clear(); 898*7c478bd9Sstevel@tonic-gate 899*7c478bd9Sstevel@tonic-gate for (who = 0; who < NCPU; who++) { 900*7c478bd9Sstevel@tonic-gate if (who == cpuid) 901*7c478bd9Sstevel@tonic-gate continue; 902*7c478bd9Sstevel@tonic-gate 903*7c478bd9Sstevel@tonic-gate if (!(procset & (1 << who))) 904*7c478bd9Sstevel@tonic-gate continue; 905*7c478bd9Sstevel@tonic-gate 906*7c478bd9Sstevel@tonic-gate while (!(cpu_ready_set & (1 << who))) 907*7c478bd9Sstevel@tonic-gate delay(1); 908*7c478bd9Sstevel@tonic-gate } 909*7c478bd9Sstevel@tonic-gate 910*7c478bd9Sstevel@tonic-gate done: 911*7c478bd9Sstevel@tonic-gate workaround_errata_end(); 912*7c478bd9Sstevel@tonic-gate 913*7c478bd9Sstevel@tonic-gate if (warm_reset_vector != NULL) 914*7c478bd9Sstevel@tonic-gate mp_unmap_warm_reset_vector(warm_reset_vector); 915*7c478bd9Sstevel@tonic-gate hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE, 916*7c478bd9Sstevel@tonic-gate HAT_UNLOAD); 917*7c478bd9Sstevel@tonic-gate } 918*7c478bd9Sstevel@tonic-gate 919*7c478bd9Sstevel@tonic-gate /* 920*7c478bd9Sstevel@tonic-gate * Dummy functions - no i86pc platforms support dynamic cpu allocation. 921*7c478bd9Sstevel@tonic-gate */ 922*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 923*7c478bd9Sstevel@tonic-gate int 924*7c478bd9Sstevel@tonic-gate mp_cpu_configure(int cpuid) 925*7c478bd9Sstevel@tonic-gate { 926*7c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 927*7c478bd9Sstevel@tonic-gate } 928*7c478bd9Sstevel@tonic-gate 929*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 930*7c478bd9Sstevel@tonic-gate int 931*7c478bd9Sstevel@tonic-gate mp_cpu_unconfigure(int cpuid) 932*7c478bd9Sstevel@tonic-gate { 933*7c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 934*7c478bd9Sstevel@tonic-gate } 935*7c478bd9Sstevel@tonic-gate 936*7c478bd9Sstevel@tonic-gate /* 937*7c478bd9Sstevel@tonic-gate * Startup function for 'other' CPUs (besides boot cpu). 938*7c478bd9Sstevel@tonic-gate * Resumed from cpu_startup. 939*7c478bd9Sstevel@tonic-gate */ 940*7c478bd9Sstevel@tonic-gate void 941*7c478bd9Sstevel@tonic-gate mp_startup(void) 942*7c478bd9Sstevel@tonic-gate { 943*7c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; 944*7c478bd9Sstevel@tonic-gate extern int procset; 945*7c478bd9Sstevel@tonic-gate uint_t new_x86_feature; 946*7c478bd9Sstevel@tonic-gate 947*7c478bd9Sstevel@tonic-gate new_x86_feature = cpuid_pass1(cp); 948*7c478bd9Sstevel@tonic-gate 949*7c478bd9Sstevel@tonic-gate /* 950*7c478bd9Sstevel@tonic-gate * We need to Sync MTRR with cpu0's MTRR. We have to do 951*7c478bd9Sstevel@tonic-gate * this with interrupts disabled. 952*7c478bd9Sstevel@tonic-gate */ 953*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_MTRR) 954*7c478bd9Sstevel@tonic-gate mtrr_sync(); 955*7c478bd9Sstevel@tonic-gate /* 956*7c478bd9Sstevel@tonic-gate * Enable machine check architecture 957*7c478bd9Sstevel@tonic-gate */ 958*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_MCA) 959*7c478bd9Sstevel@tonic-gate setup_mca(); 960*7c478bd9Sstevel@tonic-gate 961*7c478bd9Sstevel@tonic-gate /* 962*7c478bd9Sstevel@tonic-gate * Initialize this CPU's syscall handlers 963*7c478bd9Sstevel@tonic-gate */ 964*7c478bd9Sstevel@tonic-gate init_cpu_syscall(cp); 965*7c478bd9Sstevel@tonic-gate 966*7c478bd9Sstevel@tonic-gate /* 967*7c478bd9Sstevel@tonic-gate * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the 968*7c478bd9Sstevel@tonic-gate * highest level at which a routine is permitted to block on 969*7c478bd9Sstevel@tonic-gate * an adaptive mutex (allows for cpu poke interrupt in case 970*7c478bd9Sstevel@tonic-gate * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks 971*7c478bd9Sstevel@tonic-gate * device interrupts that may end up in the hat layer issuing cross 972*7c478bd9Sstevel@tonic-gate * calls before CPU_READY is set. 973*7c478bd9Sstevel@tonic-gate */ 974*7c478bd9Sstevel@tonic-gate (void) splx(ipltospl(LOCK_LEVEL)); 975*7c478bd9Sstevel@tonic-gate 976*7c478bd9Sstevel@tonic-gate /* 977*7c478bd9Sstevel@tonic-gate * Do a sanity check to make sure this new CPU is a sane thing 978*7c478bd9Sstevel@tonic-gate * to add to the collection of processors running this system. 979*7c478bd9Sstevel@tonic-gate * 980*7c478bd9Sstevel@tonic-gate * XXX Clearly this needs to get more sophisticated, if x86 981*7c478bd9Sstevel@tonic-gate * systems start to get built out of heterogenous CPUs; as is 982*7c478bd9Sstevel@tonic-gate * likely to happen once the number of processors in a configuration 983*7c478bd9Sstevel@tonic-gate * gets large enough. 984*7c478bd9Sstevel@tonic-gate */ 985*7c478bd9Sstevel@tonic-gate if ((x86_feature & new_x86_feature) != x86_feature) { 986*7c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "?cpu%d: %b\n", 987*7c478bd9Sstevel@tonic-gate cp->cpu_id, new_x86_feature, FMT_X86_FEATURE); 988*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id); 989*7c478bd9Sstevel@tonic-gate } 990*7c478bd9Sstevel@tonic-gate 991*7c478bd9Sstevel@tonic-gate /* 992*7c478bd9Sstevel@tonic-gate * We could be more sophisticated here, and just mark the CPU 993*7c478bd9Sstevel@tonic-gate * as "faulted" but at this point we'll opt for the easier 994*7c478bd9Sstevel@tonic-gate * answer of dieing horribly. Provided the boot cpu is ok, 995*7c478bd9Sstevel@tonic-gate * the system can be recovered by booting with use_mp set to zero. 996*7c478bd9Sstevel@tonic-gate */ 997*7c478bd9Sstevel@tonic-gate if (workaround_errata(cp) != 0) 998*7c478bd9Sstevel@tonic-gate panic("critical workaround(s) missing for cpu%d", cp->cpu_id); 999*7c478bd9Sstevel@tonic-gate 1000*7c478bd9Sstevel@tonic-gate cpuid_pass2(cp); 1001*7c478bd9Sstevel@tonic-gate cpuid_pass3(cp); 1002*7c478bd9Sstevel@tonic-gate (void) cpuid_pass4(cp); 1003*7c478bd9Sstevel@tonic-gate 1004*7c478bd9Sstevel@tonic-gate init_cpu_info(cp); 1005*7c478bd9Sstevel@tonic-gate 1006*7c478bd9Sstevel@tonic-gate add_cpunode2devtree(cp->cpu_id, cp->cpu_m.mcpu_cpi); 1007*7c478bd9Sstevel@tonic-gate 1008*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 1009*7c478bd9Sstevel@tonic-gate procset |= 1 << cp->cpu_id; 1010*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 1011*7c478bd9Sstevel@tonic-gate 1012*7c478bd9Sstevel@tonic-gate if (tsc_gethrtime_enable) 1013*7c478bd9Sstevel@tonic-gate tsc_sync_slave(); 1014*7c478bd9Sstevel@tonic-gate 1015*7c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 1016*7c478bd9Sstevel@tonic-gate /* 1017*7c478bd9Sstevel@tonic-gate * It's unfortunate that chip_cpu_init() has to be called here. 1018*7c478bd9Sstevel@tonic-gate * It really belongs in cpu_add_unit(), but unfortunately it is 1019*7c478bd9Sstevel@tonic-gate * dependent on the cpuid probing, which must be done in the 1020*7c478bd9Sstevel@tonic-gate * context of the current CPU. Care must be taken on x86 to ensure 1021*7c478bd9Sstevel@tonic-gate * that mp_startup can safely block even though chip_cpu_init() and 1022*7c478bd9Sstevel@tonic-gate * cpu_add_active() have not yet been called. 1023*7c478bd9Sstevel@tonic-gate */ 1024*7c478bd9Sstevel@tonic-gate chip_cpu_init(cp); 1025*7c478bd9Sstevel@tonic-gate chip_cpu_startup(cp); 1026*7c478bd9Sstevel@tonic-gate 1027*7c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_ENABLE | CPU_EXISTS; 1028*7c478bd9Sstevel@tonic-gate cpu_add_active(cp); 1029*7c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 1030*7c478bd9Sstevel@tonic-gate 1031*7c478bd9Sstevel@tonic-gate (void) spl0(); /* enable interrupts */ 1032*7c478bd9Sstevel@tonic-gate 1033*7c478bd9Sstevel@tonic-gate if (boothowto & RB_DEBUG) 1034*7c478bd9Sstevel@tonic-gate kdi_dvec_cpu_init(cp); 1035*7c478bd9Sstevel@tonic-gate 1036*7c478bd9Sstevel@tonic-gate /* 1037*7c478bd9Sstevel@tonic-gate * Setting the bit in cpu_ready_set must be the last operation in 1038*7c478bd9Sstevel@tonic-gate * processor initialization; the boot CPU will continue to boot once 1039*7c478bd9Sstevel@tonic-gate * it sees this bit set for all active CPUs. 1040*7c478bd9Sstevel@tonic-gate */ 1041*7c478bd9Sstevel@tonic-gate CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id); 1042*7c478bd9Sstevel@tonic-gate 1043*7c478bd9Sstevel@tonic-gate /* 1044*7c478bd9Sstevel@tonic-gate * Because mp_startup() gets fired off after init() starts, we 1045*7c478bd9Sstevel@tonic-gate * can't use the '?' trick to do 'boot -v' printing - so we 1046*7c478bd9Sstevel@tonic-gate * always direct the 'cpu .. online' messages to the log. 1047*7c478bd9Sstevel@tonic-gate */ 1048*7c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", 1049*7c478bd9Sstevel@tonic-gate cp->cpu_id); 1050*7c478bd9Sstevel@tonic-gate 1051*7c478bd9Sstevel@tonic-gate /* 1052*7c478bd9Sstevel@tonic-gate * Now we are done with the startup thread, so free it up. 1053*7c478bd9Sstevel@tonic-gate */ 1054*7c478bd9Sstevel@tonic-gate thread_exit(); 1055*7c478bd9Sstevel@tonic-gate panic("mp_startup: cannot return"); 1056*7c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 1057*7c478bd9Sstevel@tonic-gate } 1058*7c478bd9Sstevel@tonic-gate 1059*7c478bd9Sstevel@tonic-gate 1060*7c478bd9Sstevel@tonic-gate /* 1061*7c478bd9Sstevel@tonic-gate * Start CPU on user request. 1062*7c478bd9Sstevel@tonic-gate */ 1063*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1064*7c478bd9Sstevel@tonic-gate int 1065*7c478bd9Sstevel@tonic-gate mp_cpu_start(struct cpu *cp) 1066*7c478bd9Sstevel@tonic-gate { 1067*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1068*7c478bd9Sstevel@tonic-gate if (cp->cpu_id == getbootcpuid()) 1069*7c478bd9Sstevel@tonic-gate return (EBUSY); /* Cannot start boot CPU */ 1070*7c478bd9Sstevel@tonic-gate return (0); 1071*7c478bd9Sstevel@tonic-gate } 1072*7c478bd9Sstevel@tonic-gate 1073*7c478bd9Sstevel@tonic-gate /* 1074*7c478bd9Sstevel@tonic-gate * Stop CPU on user request. 1075*7c478bd9Sstevel@tonic-gate */ 1076*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1077*7c478bd9Sstevel@tonic-gate int 1078*7c478bd9Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp) 1079*7c478bd9Sstevel@tonic-gate { 1080*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1081*7c478bd9Sstevel@tonic-gate if (cp->cpu_id == getbootcpuid()) 1082*7c478bd9Sstevel@tonic-gate return (EBUSY); /* Cannot stop boot CPU */ 1083*7c478bd9Sstevel@tonic-gate 1084*7c478bd9Sstevel@tonic-gate return (0); 1085*7c478bd9Sstevel@tonic-gate } 1086*7c478bd9Sstevel@tonic-gate 1087*7c478bd9Sstevel@tonic-gate /* 1088*7c478bd9Sstevel@tonic-gate * Power on CPU. 1089*7c478bd9Sstevel@tonic-gate */ 1090*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1091*7c478bd9Sstevel@tonic-gate int 1092*7c478bd9Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp) 1093*7c478bd9Sstevel@tonic-gate { 1094*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1095*7c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 1096*7c478bd9Sstevel@tonic-gate } 1097*7c478bd9Sstevel@tonic-gate 1098*7c478bd9Sstevel@tonic-gate /* 1099*7c478bd9Sstevel@tonic-gate * Power off CPU. 1100*7c478bd9Sstevel@tonic-gate */ 1101*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1102*7c478bd9Sstevel@tonic-gate int 1103*7c478bd9Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp) 1104*7c478bd9Sstevel@tonic-gate { 1105*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1106*7c478bd9Sstevel@tonic-gate return (ENOTSUP); /* not supported */ 1107*7c478bd9Sstevel@tonic-gate } 1108*7c478bd9Sstevel@tonic-gate 1109*7c478bd9Sstevel@tonic-gate 1110*7c478bd9Sstevel@tonic-gate /* 1111*7c478bd9Sstevel@tonic-gate * Take the specified CPU out of participation in interrupts. 1112*7c478bd9Sstevel@tonic-gate */ 1113*7c478bd9Sstevel@tonic-gate int 1114*7c478bd9Sstevel@tonic-gate cpu_disable_intr(struct cpu *cp) 1115*7c478bd9Sstevel@tonic-gate { 1116*7c478bd9Sstevel@tonic-gate /* 1117*7c478bd9Sstevel@tonic-gate * cannot disable interrupts on boot cpu 1118*7c478bd9Sstevel@tonic-gate */ 1119*7c478bd9Sstevel@tonic-gate if (cp == cpu[getbootcpuid()]) 1120*7c478bd9Sstevel@tonic-gate return (EBUSY); 1121*7c478bd9Sstevel@tonic-gate 1122*7c478bd9Sstevel@tonic-gate if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS) 1123*7c478bd9Sstevel@tonic-gate return (EBUSY); 1124*7c478bd9Sstevel@tonic-gate 1125*7c478bd9Sstevel@tonic-gate cp->cpu_flags &= ~CPU_ENABLE; 1126*7c478bd9Sstevel@tonic-gate return (0); 1127*7c478bd9Sstevel@tonic-gate } 1128*7c478bd9Sstevel@tonic-gate 1129*7c478bd9Sstevel@tonic-gate /* 1130*7c478bd9Sstevel@tonic-gate * Allow the specified CPU to participate in interrupts. 1131*7c478bd9Sstevel@tonic-gate */ 1132*7c478bd9Sstevel@tonic-gate void 1133*7c478bd9Sstevel@tonic-gate cpu_enable_intr(struct cpu *cp) 1134*7c478bd9Sstevel@tonic-gate { 1135*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1136*7c478bd9Sstevel@tonic-gate if (cp == cpu[getbootcpuid()]) 1137*7c478bd9Sstevel@tonic-gate return; 1138*7c478bd9Sstevel@tonic-gate 1139*7c478bd9Sstevel@tonic-gate cp->cpu_flags |= CPU_ENABLE; 1140*7c478bd9Sstevel@tonic-gate psm_enable_intr(cp->cpu_id); 1141*7c478bd9Sstevel@tonic-gate } 1142*7c478bd9Sstevel@tonic-gate 1143*7c478bd9Sstevel@tonic-gate 1144*7c478bd9Sstevel@tonic-gate /* 1145*7c478bd9Sstevel@tonic-gate * return the cpu id of the initial startup cpu 1146*7c478bd9Sstevel@tonic-gate */ 1147*7c478bd9Sstevel@tonic-gate processorid_t 1148*7c478bd9Sstevel@tonic-gate getbootcpuid(void) 1149*7c478bd9Sstevel@tonic-gate { 1150*7c478bd9Sstevel@tonic-gate return (0); 1151*7c478bd9Sstevel@tonic-gate } 1152*7c478bd9Sstevel@tonic-gate 1153*7c478bd9Sstevel@tonic-gate static ushort_t * 1154*7c478bd9Sstevel@tonic-gate mp_map_warm_reset_vector() 1155*7c478bd9Sstevel@tonic-gate { 1156*7c478bd9Sstevel@tonic-gate ushort_t *warm_reset_vector; 1157*7c478bd9Sstevel@tonic-gate 1158*7c478bd9Sstevel@tonic-gate if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR, 1159*7c478bd9Sstevel@tonic-gate sizeof (ushort_t *), PROT_READ|PROT_WRITE))) 1160*7c478bd9Sstevel@tonic-gate return (NULL); 1161*7c478bd9Sstevel@tonic-gate 1162*7c478bd9Sstevel@tonic-gate /* 1163*7c478bd9Sstevel@tonic-gate * setup secondary cpu bios boot up vector 1164*7c478bd9Sstevel@tonic-gate */ 1165*7c478bd9Sstevel@tonic-gate *warm_reset_vector = (ushort_t)((caddr_t) 1166*7c478bd9Sstevel@tonic-gate ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va 1167*7c478bd9Sstevel@tonic-gate + ((ulong_t)rm_platter_va & 0xf)); 1168*7c478bd9Sstevel@tonic-gate warm_reset_vector++; 1169*7c478bd9Sstevel@tonic-gate *warm_reset_vector = (ushort_t)(rm_platter_pa >> 4); 1170*7c478bd9Sstevel@tonic-gate 1171*7c478bd9Sstevel@tonic-gate --warm_reset_vector; 1172*7c478bd9Sstevel@tonic-gate return (warm_reset_vector); 1173*7c478bd9Sstevel@tonic-gate } 1174*7c478bd9Sstevel@tonic-gate 1175*7c478bd9Sstevel@tonic-gate static void 1176*7c478bd9Sstevel@tonic-gate mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector) 1177*7c478bd9Sstevel@tonic-gate { 1178*7c478bd9Sstevel@tonic-gate psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *)); 1179*7c478bd9Sstevel@tonic-gate } 1180*7c478bd9Sstevel@tonic-gate 1181*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1182*7c478bd9Sstevel@tonic-gate void 1183*7c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp) 1184*7c478bd9Sstevel@tonic-gate {} 1185*7c478bd9Sstevel@tonic-gate 1186*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1187*7c478bd9Sstevel@tonic-gate void 1188*7c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp) 1189*7c478bd9Sstevel@tonic-gate {} 1190*7c478bd9Sstevel@tonic-gate 1191*7c478bd9Sstevel@tonic-gate /* 1192*7c478bd9Sstevel@tonic-gate * The following two routines are used as context operators on threads belonging 1193*7c478bd9Sstevel@tonic-gate * to processes with a private LDT (see sysi86). Due to the rarity of such 1194*7c478bd9Sstevel@tonic-gate * processes, these routines are currently written for best code readability and 1195*7c478bd9Sstevel@tonic-gate * organization rather than speed. We could avoid checking x86_feature at every 1196*7c478bd9Sstevel@tonic-gate * context switch by installing different context ops, depending on the 1197*7c478bd9Sstevel@tonic-gate * x86_feature flags, at LDT creation time -- one for each combination of fast 1198*7c478bd9Sstevel@tonic-gate * syscall feature flags. 1199*7c478bd9Sstevel@tonic-gate */ 1200*7c478bd9Sstevel@tonic-gate 1201*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1202*7c478bd9Sstevel@tonic-gate void 1203*7c478bd9Sstevel@tonic-gate cpu_fast_syscall_disable(void *arg) 1204*7c478bd9Sstevel@tonic-gate { 1205*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_SEP) 1206*7c478bd9Sstevel@tonic-gate cpu_sep_disable(); 1207*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_ASYSC) 1208*7c478bd9Sstevel@tonic-gate cpu_asysc_disable(); 1209*7c478bd9Sstevel@tonic-gate } 1210*7c478bd9Sstevel@tonic-gate 1211*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1212*7c478bd9Sstevel@tonic-gate void 1213*7c478bd9Sstevel@tonic-gate cpu_fast_syscall_enable(void *arg) 1214*7c478bd9Sstevel@tonic-gate { 1215*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_SEP) 1216*7c478bd9Sstevel@tonic-gate cpu_sep_enable(); 1217*7c478bd9Sstevel@tonic-gate if (x86_feature & X86_ASYSC) 1218*7c478bd9Sstevel@tonic-gate cpu_asysc_enable(); 1219*7c478bd9Sstevel@tonic-gate } 1220*7c478bd9Sstevel@tonic-gate 1221*7c478bd9Sstevel@tonic-gate static void 1222*7c478bd9Sstevel@tonic-gate cpu_sep_enable(void) 1223*7c478bd9Sstevel@tonic-gate { 1224*7c478bd9Sstevel@tonic-gate uint64_t value; 1225*7c478bd9Sstevel@tonic-gate 1226*7c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 1227*7c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1228*7c478bd9Sstevel@tonic-gate 1229*7c478bd9Sstevel@tonic-gate value = KCS_SEL; 1230*7c478bd9Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_CS, &value); 1231*7c478bd9Sstevel@tonic-gate } 1232*7c478bd9Sstevel@tonic-gate 1233*7c478bd9Sstevel@tonic-gate static void 1234*7c478bd9Sstevel@tonic-gate cpu_sep_disable(void) 1235*7c478bd9Sstevel@tonic-gate { 1236*7c478bd9Sstevel@tonic-gate uint64_t value; 1237*7c478bd9Sstevel@tonic-gate 1238*7c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_SEP); 1239*7c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1240*7c478bd9Sstevel@tonic-gate 1241*7c478bd9Sstevel@tonic-gate /* 1242*7c478bd9Sstevel@tonic-gate * Setting the SYSENTER_CS_MSR register to 0 causes software executing 1243*7c478bd9Sstevel@tonic-gate * the sysenter or sysexit instruction to trigger a #gp fault. 1244*7c478bd9Sstevel@tonic-gate */ 1245*7c478bd9Sstevel@tonic-gate value = 0; 1246*7c478bd9Sstevel@tonic-gate wrmsr(MSR_INTC_SEP_CS, &value); 1247*7c478bd9Sstevel@tonic-gate } 1248*7c478bd9Sstevel@tonic-gate 1249*7c478bd9Sstevel@tonic-gate static void 1250*7c478bd9Sstevel@tonic-gate cpu_asysc_enable(void) 1251*7c478bd9Sstevel@tonic-gate { 1252*7c478bd9Sstevel@tonic-gate uint64_t value; 1253*7c478bd9Sstevel@tonic-gate 1254*7c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 1255*7c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1256*7c478bd9Sstevel@tonic-gate 1257*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &value); 1258*7c478bd9Sstevel@tonic-gate value |= AMD_EFER_SCE; 1259*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_EFER, &value); 1260*7c478bd9Sstevel@tonic-gate } 1261*7c478bd9Sstevel@tonic-gate 1262*7c478bd9Sstevel@tonic-gate static void 1263*7c478bd9Sstevel@tonic-gate cpu_asysc_disable(void) 1264*7c478bd9Sstevel@tonic-gate { 1265*7c478bd9Sstevel@tonic-gate uint64_t value; 1266*7c478bd9Sstevel@tonic-gate 1267*7c478bd9Sstevel@tonic-gate ASSERT(x86_feature & X86_ASYSC); 1268*7c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 1269*7c478bd9Sstevel@tonic-gate 1270*7c478bd9Sstevel@tonic-gate /* 1271*7c478bd9Sstevel@tonic-gate * Turn off the SCE (syscall enable) bit in the EFER register. Software 1272*7c478bd9Sstevel@tonic-gate * executing syscall or sysret with this bit off will incur a #ud trap. 1273*7c478bd9Sstevel@tonic-gate */ 1274*7c478bd9Sstevel@tonic-gate (void) rdmsr(MSR_AMD_EFER, &value); 1275*7c478bd9Sstevel@tonic-gate value &= ~AMD_EFER_SCE; 1276*7c478bd9Sstevel@tonic-gate wrmsr(MSR_AMD_EFER, &value); 1277*7c478bd9Sstevel@tonic-gate } 1278