xref: /illumos-gate/usr/src/uts/i86pc/os/mp_startup.c (revision fb2f18f820d90b001aea4fb27dd654bc1263c440)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5100b72f4Sandrei  * Common Development and Distribution License (the "License").
6100b72f4Sandrei  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*fb2f18f8Sesaxe  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate #include <sys/types.h>
297c478bd9Sstevel@tonic-gate #include <sys/thread.h>
307c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
317c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
327c478bd9Sstevel@tonic-gate #include <sys/param.h>
337c478bd9Sstevel@tonic-gate #include <sys/proc.h>
347c478bd9Sstevel@tonic-gate #include <sys/disp.h>
357c478bd9Sstevel@tonic-gate #include <sys/mmu.h>
367c478bd9Sstevel@tonic-gate #include <sys/class.h>
377c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
387c478bd9Sstevel@tonic-gate #include <sys/debug.h>
397c478bd9Sstevel@tonic-gate #include <sys/asm_linkage.h>
407c478bd9Sstevel@tonic-gate #include <sys/x_call.h>
417c478bd9Sstevel@tonic-gate #include <sys/systm.h>
427c478bd9Sstevel@tonic-gate #include <sys/var.h>
437c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
447c478bd9Sstevel@tonic-gate #include <vm/hat.h>
457c478bd9Sstevel@tonic-gate #include <sys/mmu.h>
467c478bd9Sstevel@tonic-gate #include <vm/as.h>
477c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
487c478bd9Sstevel@tonic-gate #include <sys/segments.h>
497c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
507c478bd9Sstevel@tonic-gate #include <sys/stack.h>
517c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h>
527c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h>
537c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
547c478bd9Sstevel@tonic-gate #include <sys/traptrace.h>
557c478bd9Sstevel@tonic-gate #include <sys/clock.h>
567c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h>
57*fb2f18f8Sesaxe #include <sys/pg.h>
58*fb2f18f8Sesaxe #include <sys/cmt.h>
597c478bd9Sstevel@tonic-gate #include <sys/dtrace.h>
607c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
617c478bd9Sstevel@tonic-gate #include <sys/fp.h>
627c478bd9Sstevel@tonic-gate #include <sys/reboot.h>
637c478bd9Sstevel@tonic-gate #include <sys/kdi.h>
647c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h>
657c478bd9Sstevel@tonic-gate #include <sys/memnode.h>
66ef50d8c0Sesaxe #include <sys/pci_cfgspace.h>
677aec1d6eScindi #include <sys/cpu_module.h>
687c478bd9Sstevel@tonic-gate 
697c478bd9Sstevel@tonic-gate struct cpu	cpus[1];			/* CPU data */
707c478bd9Sstevel@tonic-gate struct cpu	*cpu[NCPU] = {&cpus[0]};	/* pointers to all CPUs */
717c478bd9Sstevel@tonic-gate cpu_core_t	cpu_core[NCPU];			/* cpu_core structures */
727c478bd9Sstevel@tonic-gate 
737c478bd9Sstevel@tonic-gate /*
747c478bd9Sstevel@tonic-gate  * Useful for disabling MP bring-up for an MP capable kernel
757c478bd9Sstevel@tonic-gate  * (a kernel that was built with MP defined)
767c478bd9Sstevel@tonic-gate  */
777c478bd9Sstevel@tonic-gate int use_mp = 1;
787c478bd9Sstevel@tonic-gate 
7941791439Sandrei /*
8041791439Sandrei  * To be set by a PSM to indicate what CPUs are available on the system.
8141791439Sandrei  */
8241791439Sandrei cpuset_t mp_cpus = 1;
837c478bd9Sstevel@tonic-gate 
847c478bd9Sstevel@tonic-gate /*
857c478bd9Sstevel@tonic-gate  * This variable is used by the hat layer to decide whether or not
867c478bd9Sstevel@tonic-gate  * critical sections are needed to prevent race conditions.  For sun4m,
877c478bd9Sstevel@tonic-gate  * this variable is set once enough MP initialization has been done in
887c478bd9Sstevel@tonic-gate  * order to allow cross calls.
897c478bd9Sstevel@tonic-gate  */
907c478bd9Sstevel@tonic-gate int flushes_require_xcalls = 0;
9141791439Sandrei cpuset_t	cpu_ready_set = 1;
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate extern	void	real_mode_start(void);
947c478bd9Sstevel@tonic-gate extern	void	real_mode_end(void);
957c478bd9Sstevel@tonic-gate static 	void	mp_startup(void);
967c478bd9Sstevel@tonic-gate 
977c478bd9Sstevel@tonic-gate static void cpu_sep_enable(void);
987c478bd9Sstevel@tonic-gate static void cpu_sep_disable(void);
997c478bd9Sstevel@tonic-gate static void cpu_asysc_enable(void);
1007c478bd9Sstevel@tonic-gate static void cpu_asysc_disable(void);
1017c478bd9Sstevel@tonic-gate 
1027c478bd9Sstevel@tonic-gate extern int tsc_gethrtime_enable;
1037c478bd9Sstevel@tonic-gate 
1047c478bd9Sstevel@tonic-gate /*
1057c478bd9Sstevel@tonic-gate  * Init CPU info - get CPU type info for processor_info system call.
1067c478bd9Sstevel@tonic-gate  */
1077c478bd9Sstevel@tonic-gate void
1087c478bd9Sstevel@tonic-gate init_cpu_info(struct cpu *cp)
1097c478bd9Sstevel@tonic-gate {
1107c478bd9Sstevel@tonic-gate 	processor_info_t *pi = &cp->cpu_type_info;
1117c478bd9Sstevel@tonic-gate 	char buf[CPU_IDSTRLEN];
1127c478bd9Sstevel@tonic-gate 
1137c478bd9Sstevel@tonic-gate 	/*
1147c478bd9Sstevel@tonic-gate 	 * Get clock-frequency property for the CPU.
1157c478bd9Sstevel@tonic-gate 	 */
1167c478bd9Sstevel@tonic-gate 	pi->pi_clock = cpu_freq;
1177c478bd9Sstevel@tonic-gate 
1187c478bd9Sstevel@tonic-gate 	(void) strcpy(pi->pi_processor_type, "i386");
1197c478bd9Sstevel@tonic-gate 	if (fpu_exists)
1207c478bd9Sstevel@tonic-gate 		(void) strcpy(pi->pi_fputypes, "i387 compatible");
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate 	(void) cpuid_getidstr(cp, buf, sizeof (buf));
1237c478bd9Sstevel@tonic-gate 
1247c478bd9Sstevel@tonic-gate 	cp->cpu_idstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP);
1257c478bd9Sstevel@tonic-gate 	(void) strcpy(cp->cpu_idstr, buf);
1267c478bd9Sstevel@tonic-gate 
1277c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1287c478bd9Sstevel@tonic-gate 
1297c478bd9Sstevel@tonic-gate 	(void) cpuid_getbrandstr(cp, buf, sizeof (buf));
1307c478bd9Sstevel@tonic-gate 	cp->cpu_brandstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP);
1317c478bd9Sstevel@tonic-gate 	(void) strcpy(cp->cpu_brandstr, buf);
1327c478bd9Sstevel@tonic-gate 
1337c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1347c478bd9Sstevel@tonic-gate }
1357c478bd9Sstevel@tonic-gate 
1367c478bd9Sstevel@tonic-gate /*
1377c478bd9Sstevel@tonic-gate  * Configure syscall support on this CPU.
1387c478bd9Sstevel@tonic-gate  */
1397c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1407c478bd9Sstevel@tonic-gate static void
1417c478bd9Sstevel@tonic-gate init_cpu_syscall(struct cpu *cp)
1427c478bd9Sstevel@tonic-gate {
1437c478bd9Sstevel@tonic-gate 	kpreempt_disable();
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate #if defined(__amd64)
1467c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC) {
1477c478bd9Sstevel@tonic-gate 
1487c478bd9Sstevel@tonic-gate #if !defined(__lint)
1497c478bd9Sstevel@tonic-gate 		/*
1507c478bd9Sstevel@tonic-gate 		 * The syscall instruction imposes a certain ordering on
1517c478bd9Sstevel@tonic-gate 		 * segment selectors, so we double-check that ordering
1527c478bd9Sstevel@tonic-gate 		 * here.
1537c478bd9Sstevel@tonic-gate 		 */
1547c478bd9Sstevel@tonic-gate 		ASSERT(KDS_SEL == KCS_SEL + 8);
1557c478bd9Sstevel@tonic-gate 		ASSERT(UDS_SEL == U32CS_SEL + 8);
1567c478bd9Sstevel@tonic-gate 		ASSERT(UCS_SEL == U32CS_SEL + 16);
1577c478bd9Sstevel@tonic-gate #endif
1587c478bd9Sstevel@tonic-gate 		/*
1597c478bd9Sstevel@tonic-gate 		 * Turn syscall/sysret extensions on.
1607c478bd9Sstevel@tonic-gate 		 */
1617c478bd9Sstevel@tonic-gate 		cpu_asysc_enable();
1627c478bd9Sstevel@tonic-gate 
1637c478bd9Sstevel@tonic-gate 		/*
1647c478bd9Sstevel@tonic-gate 		 * Program the magic registers ..
1657c478bd9Sstevel@tonic-gate 		 */
1660ac7d7d8Skucharsk 		wrmsr(MSR_AMD_STAR, ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) <<
1670ac7d7d8Skucharsk 		    32);
1680ac7d7d8Skucharsk 		wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
1690ac7d7d8Skucharsk 		wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);
1707c478bd9Sstevel@tonic-gate 
1717c478bd9Sstevel@tonic-gate 		/*
1727c478bd9Sstevel@tonic-gate 		 * This list of flags is masked off the incoming
1737c478bd9Sstevel@tonic-gate 		 * %rfl when we enter the kernel.
1747c478bd9Sstevel@tonic-gate 		 */
1750ac7d7d8Skucharsk 		wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T));
1767c478bd9Sstevel@tonic-gate 	}
1777c478bd9Sstevel@tonic-gate #endif
1787c478bd9Sstevel@tonic-gate 
1797c478bd9Sstevel@tonic-gate 	/*
1807c478bd9Sstevel@tonic-gate 	 * On 32-bit kernels, we use sysenter/sysexit because it's too
1817c478bd9Sstevel@tonic-gate 	 * hard to use syscall/sysret, and it is more portable anyway.
1827c478bd9Sstevel@tonic-gate 	 *
1837c478bd9Sstevel@tonic-gate 	 * On 64-bit kernels on Nocona machines, the 32-bit syscall
1847c478bd9Sstevel@tonic-gate 	 * variant isn't available to 32-bit applications, but sysenter is.
1857c478bd9Sstevel@tonic-gate 	 */
1867c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP) {
1877c478bd9Sstevel@tonic-gate 
1887c478bd9Sstevel@tonic-gate #if !defined(__lint)
1897c478bd9Sstevel@tonic-gate 		/*
1907c478bd9Sstevel@tonic-gate 		 * The sysenter instruction imposes a certain ordering on
1917c478bd9Sstevel@tonic-gate 		 * segment selectors, so we double-check that ordering
1927c478bd9Sstevel@tonic-gate 		 * here. See "sysenter" in Intel document 245471-012, "IA-32
1937c478bd9Sstevel@tonic-gate 		 * Intel Architecture Software Developer's Manual Volume 2:
1947c478bd9Sstevel@tonic-gate 		 * Instruction Set Reference"
1957c478bd9Sstevel@tonic-gate 		 */
1967c478bd9Sstevel@tonic-gate 		ASSERT(KDS_SEL == KCS_SEL + 8);
1977c478bd9Sstevel@tonic-gate 
1987c478bd9Sstevel@tonic-gate 		ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3));
1997c478bd9Sstevel@tonic-gate 		ASSERT32(UDS_SEL == UCS_SEL + 8);
2007c478bd9Sstevel@tonic-gate 
2017c478bd9Sstevel@tonic-gate 		ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3));
2027c478bd9Sstevel@tonic-gate 		ASSERT64(UDS_SEL == U32CS_SEL + 8);
2037c478bd9Sstevel@tonic-gate #endif
2047c478bd9Sstevel@tonic-gate 
2057c478bd9Sstevel@tonic-gate 		cpu_sep_enable();
2067c478bd9Sstevel@tonic-gate 
2077c478bd9Sstevel@tonic-gate 		/*
2087c478bd9Sstevel@tonic-gate 		 * resume() sets this value to the base of the threads stack
2097c478bd9Sstevel@tonic-gate 		 * via a context handler.
2107c478bd9Sstevel@tonic-gate 		 */
2110ac7d7d8Skucharsk 		wrmsr(MSR_INTC_SEP_ESP, 0ULL);
2120ac7d7d8Skucharsk 		wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);
2137c478bd9Sstevel@tonic-gate 	}
2147c478bd9Sstevel@tonic-gate 
2157c478bd9Sstevel@tonic-gate 	kpreempt_enable();
2167c478bd9Sstevel@tonic-gate }
2177c478bd9Sstevel@tonic-gate 
2187c478bd9Sstevel@tonic-gate /*
2197c478bd9Sstevel@tonic-gate  * Multiprocessor initialization.
2207c478bd9Sstevel@tonic-gate  *
2217c478bd9Sstevel@tonic-gate  * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the
2227c478bd9Sstevel@tonic-gate  * startup and idle threads for the specified CPU.
2237c478bd9Sstevel@tonic-gate  */
2247c478bd9Sstevel@tonic-gate static void
2257c478bd9Sstevel@tonic-gate mp_startup_init(int cpun)
2267c478bd9Sstevel@tonic-gate {
2277c478bd9Sstevel@tonic-gate #if defined(__amd64)
2287c478bd9Sstevel@tonic-gate extern void *long_mode_64(void);
2297c478bd9Sstevel@tonic-gate #endif	/* __amd64 */
2307c478bd9Sstevel@tonic-gate 
2317c478bd9Sstevel@tonic-gate 	struct cpu *cp;
2327c478bd9Sstevel@tonic-gate 	struct tss *ntss;
2337c478bd9Sstevel@tonic-gate 	kthread_id_t tp;
2347c478bd9Sstevel@tonic-gate 	caddr_t	sp;
2357c478bd9Sstevel@tonic-gate 	int size;
2367c478bd9Sstevel@tonic-gate 	proc_t *procp;
2377c478bd9Sstevel@tonic-gate 	extern void idle();
2387c478bd9Sstevel@tonic-gate 
2397c478bd9Sstevel@tonic-gate 	struct cpu_tables *tablesp;
2407c478bd9Sstevel@tonic-gate 	rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va;
2417c478bd9Sstevel@tonic-gate 
2427c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE
2437c478bd9Sstevel@tonic-gate 	trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun];
2447c478bd9Sstevel@tonic-gate #endif
2457c478bd9Sstevel@tonic-gate 
2467c478bd9Sstevel@tonic-gate 	ASSERT(cpun < NCPU && cpu[cpun] == NULL);
2477c478bd9Sstevel@tonic-gate 
2487c478bd9Sstevel@tonic-gate 	if ((cp = kmem_zalloc(sizeof (*cp), KM_NOSLEEP)) == NULL) {
2497c478bd9Sstevel@tonic-gate 		panic("mp_startup_init: cpu%d: "
2507c478bd9Sstevel@tonic-gate 		    "no memory for cpu structure", cpun);
2517c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
2527c478bd9Sstevel@tonic-gate 	}
2537c478bd9Sstevel@tonic-gate 	procp = curthread->t_procp;
2547c478bd9Sstevel@tonic-gate 
2557c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
2567c478bd9Sstevel@tonic-gate 	/*
2577c478bd9Sstevel@tonic-gate 	 * Initialize the dispatcher first.
2587c478bd9Sstevel@tonic-gate 	 */
2597c478bd9Sstevel@tonic-gate 	disp_cpu_init(cp);
2607c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
2617c478bd9Sstevel@tonic-gate 
262affbd3ccSkchow 	cpu_vm_data_init(cp);
263affbd3ccSkchow 
2647c478bd9Sstevel@tonic-gate 	/*
2657c478bd9Sstevel@tonic-gate 	 * Allocate and initialize the startup thread for this CPU.
2667c478bd9Sstevel@tonic-gate 	 * Interrupt and process switch stacks get allocated later
2677c478bd9Sstevel@tonic-gate 	 * when the CPU starts running.
2687c478bd9Sstevel@tonic-gate 	 */
2697c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, 0, NULL, NULL, 0, procp,
2707c478bd9Sstevel@tonic-gate 	    TS_STOPPED, maxclsyspri);
2717c478bd9Sstevel@tonic-gate 
2727c478bd9Sstevel@tonic-gate 	/*
2737c478bd9Sstevel@tonic-gate 	 * Set state to TS_ONPROC since this thread will start running
2747c478bd9Sstevel@tonic-gate 	 * as soon as the CPU comes online.
2757c478bd9Sstevel@tonic-gate 	 *
2767c478bd9Sstevel@tonic-gate 	 * All the other fields of the thread structure are setup by
2777c478bd9Sstevel@tonic-gate 	 * thread_create().
2787c478bd9Sstevel@tonic-gate 	 */
2797c478bd9Sstevel@tonic-gate 	THREAD_ONPROC(tp, cp);
2807c478bd9Sstevel@tonic-gate 	tp->t_preempt = 1;
2817c478bd9Sstevel@tonic-gate 	tp->t_bound_cpu = cp;
2827c478bd9Sstevel@tonic-gate 	tp->t_affinitycnt = 1;
2837c478bd9Sstevel@tonic-gate 	tp->t_cpu = cp;
2847c478bd9Sstevel@tonic-gate 	tp->t_disp_queue = cp->cpu_disp;
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate 	/*
2877c478bd9Sstevel@tonic-gate 	 * Setup thread to start in mp_startup.
2887c478bd9Sstevel@tonic-gate 	 */
2897c478bd9Sstevel@tonic-gate 	sp = tp->t_stk;
2907c478bd9Sstevel@tonic-gate 	tp->t_pc = (uintptr_t)mp_startup;
2917c478bd9Sstevel@tonic-gate 	tp->t_sp = (uintptr_t)(sp - MINFRAME);
2927c478bd9Sstevel@tonic-gate 
2937c478bd9Sstevel@tonic-gate 	cp->cpu_id = cpun;
2947c478bd9Sstevel@tonic-gate 	cp->cpu_self = cp;
2957c478bd9Sstevel@tonic-gate 	cp->cpu_thread = tp;
2967c478bd9Sstevel@tonic-gate 	cp->cpu_lwp = NULL;
2977c478bd9Sstevel@tonic-gate 	cp->cpu_dispthread = tp;
2987c478bd9Sstevel@tonic-gate 	cp->cpu_dispatch_pri = DISP_PRIO(tp);
2997c478bd9Sstevel@tonic-gate 
300da43ceabSsethg 	/*
301da43ceabSsethg 	 * cpu_base_spl must be set explicitly here to prevent any blocking
302da43ceabSsethg 	 * operations in mp_startup from causing the spl of the cpu to drop
303da43ceabSsethg 	 * to 0 (allowing device interrupts before we're ready) in resume().
304da43ceabSsethg 	 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY.
305da43ceabSsethg 	 * As an extra bit of security on DEBUG kernels, this is enforced with
306da43ceabSsethg 	 * an assertion in mp_startup() -- before cpu_base_spl is set to its
307da43ceabSsethg 	 * proper value.
308da43ceabSsethg 	 */
309da43ceabSsethg 	cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
310da43ceabSsethg 
3117c478bd9Sstevel@tonic-gate 	/*
3127c478bd9Sstevel@tonic-gate 	 * Now, initialize per-CPU idle thread for this CPU.
3137c478bd9Sstevel@tonic-gate 	 */
3147c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1);
3157c478bd9Sstevel@tonic-gate 
3167c478bd9Sstevel@tonic-gate 	cp->cpu_idle_thread = tp;
3177c478bd9Sstevel@tonic-gate 
3187c478bd9Sstevel@tonic-gate 	tp->t_preempt = 1;
3197c478bd9Sstevel@tonic-gate 	tp->t_bound_cpu = cp;
3207c478bd9Sstevel@tonic-gate 	tp->t_affinitycnt = 1;
3217c478bd9Sstevel@tonic-gate 	tp->t_cpu = cp;
3227c478bd9Sstevel@tonic-gate 	tp->t_disp_queue = cp->cpu_disp;
3237c478bd9Sstevel@tonic-gate 
324394b433dSesaxe 	/*
325*fb2f18f8Sesaxe 	 * Bootstrap the CPU's PG data
326394b433dSesaxe 	 */
327*fb2f18f8Sesaxe 	pg_cpu_bootstrap(cp);
328394b433dSesaxe 
3297c478bd9Sstevel@tonic-gate 	/*
3307c478bd9Sstevel@tonic-gate 	 * Perform CPC intialization on the new CPU.
3317c478bd9Sstevel@tonic-gate 	 */
3327c478bd9Sstevel@tonic-gate 	kcpc_hw_init(cp);
3337c478bd9Sstevel@tonic-gate 
3347c478bd9Sstevel@tonic-gate 	/*
3357c478bd9Sstevel@tonic-gate 	 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2
3367c478bd9Sstevel@tonic-gate 	 * for each CPU.
3377c478bd9Sstevel@tonic-gate 	 */
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate 	setup_vaddr_for_ppcopy(cp);
3407c478bd9Sstevel@tonic-gate 
3417c478bd9Sstevel@tonic-gate 	/*
3427c478bd9Sstevel@tonic-gate 	 * Allocate space for page directory, stack, tss, gdt and idt.
3437c478bd9Sstevel@tonic-gate 	 * This assumes that kmem_alloc will return memory which is aligned
3447c478bd9Sstevel@tonic-gate 	 * to the next higher power of 2 or a page(if size > MAXABIG)
3457c478bd9Sstevel@tonic-gate 	 * If this assumption goes wrong at any time due to change in
3467c478bd9Sstevel@tonic-gate 	 * kmem alloc, things may not work as the page directory has to be
3477c478bd9Sstevel@tonic-gate 	 * page aligned
3487c478bd9Sstevel@tonic-gate 	 */
3497c478bd9Sstevel@tonic-gate 	if ((tablesp = kmem_zalloc(sizeof (*tablesp), KM_NOSLEEP)) == NULL)
3507c478bd9Sstevel@tonic-gate 		panic("mp_startup_init: cpu%d cannot allocate tables", cpun);
3517c478bd9Sstevel@tonic-gate 
3527c478bd9Sstevel@tonic-gate 	if ((uintptr_t)tablesp & ~MMU_STD_PAGEMASK) {
3537c478bd9Sstevel@tonic-gate 		kmem_free(tablesp, sizeof (struct cpu_tables));
3547c478bd9Sstevel@tonic-gate 		size = sizeof (struct cpu_tables) + MMU_STD_PAGESIZE;
3557c478bd9Sstevel@tonic-gate 		tablesp = kmem_zalloc(size, KM_NOSLEEP);
3567c478bd9Sstevel@tonic-gate 		tablesp = (struct cpu_tables *)
3577c478bd9Sstevel@tonic-gate 		    (((uintptr_t)tablesp + MMU_STD_PAGESIZE) &
3587c478bd9Sstevel@tonic-gate 		    MMU_STD_PAGEMASK);
3597c478bd9Sstevel@tonic-gate 	}
3607c478bd9Sstevel@tonic-gate 
3617c478bd9Sstevel@tonic-gate 	ntss = cp->cpu_tss = &tablesp->ct_tss;
3625f9a4ecdSrab 
3635f9a4ecdSrab 	if ((tablesp->ct_gdt = kmem_zalloc(PAGESIZE, KM_NOSLEEP)) == NULL)
3645f9a4ecdSrab 		panic("mp_startup_init: cpu%d cannot allocate GDT", cpun);
3657c478bd9Sstevel@tonic-gate 	cp->cpu_gdt = tablesp->ct_gdt;
3667c478bd9Sstevel@tonic-gate 	bcopy(CPU->cpu_gdt, cp->cpu_gdt, NGDT * (sizeof (user_desc_t)));
3677c478bd9Sstevel@tonic-gate 
3687c478bd9Sstevel@tonic-gate #if defined(__amd64)
3697c478bd9Sstevel@tonic-gate 
3707c478bd9Sstevel@tonic-gate 	/*
3717c478bd9Sstevel@tonic-gate 	 * #DF (double fault).
3727c478bd9Sstevel@tonic-gate 	 */
3737c478bd9Sstevel@tonic-gate 	ntss->tss_ist1 =
3747c478bd9Sstevel@tonic-gate 	    (uint64_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)];
3757c478bd9Sstevel@tonic-gate 
3767c478bd9Sstevel@tonic-gate #elif defined(__i386)
3777c478bd9Sstevel@tonic-gate 
3787c478bd9Sstevel@tonic-gate 	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
3797c478bd9Sstevel@tonic-gate 	    (uint32_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)];
3807c478bd9Sstevel@tonic-gate 
3817c478bd9Sstevel@tonic-gate 	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;
3827c478bd9Sstevel@tonic-gate 
3837c478bd9Sstevel@tonic-gate 	ntss->tss_eip = (uint32_t)mp_startup;
3847c478bd9Sstevel@tonic-gate 
3857c478bd9Sstevel@tonic-gate 	ntss->tss_cs = KCS_SEL;
3867c478bd9Sstevel@tonic-gate 	ntss->tss_fs = KFS_SEL;
3877c478bd9Sstevel@tonic-gate 	ntss->tss_gs = KGS_SEL;
3887c478bd9Sstevel@tonic-gate 
3897c478bd9Sstevel@tonic-gate 	/*
3907c478bd9Sstevel@tonic-gate 	 * setup kernel %gs.
3917c478bd9Sstevel@tonic-gate 	 */
3927c478bd9Sstevel@tonic-gate 	set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
3937c478bd9Sstevel@tonic-gate 	    SEL_KPL, 0, 1);
3947c478bd9Sstevel@tonic-gate 
3957c478bd9Sstevel@tonic-gate #endif	/* __i386 */
3967c478bd9Sstevel@tonic-gate 
3977c478bd9Sstevel@tonic-gate 	/*
3987c478bd9Sstevel@tonic-gate 	 * Set I/O bit map offset equal to size of TSS segment limit
3997c478bd9Sstevel@tonic-gate 	 * for no I/O permission map. This will cause all user I/O
4007c478bd9Sstevel@tonic-gate 	 * instructions to generate #gp fault.
4017c478bd9Sstevel@tonic-gate 	 */
4027c478bd9Sstevel@tonic-gate 	ntss->tss_bitmapbase = sizeof (*ntss);
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate 	/*
4057c478bd9Sstevel@tonic-gate 	 * setup kernel tss.
4067c478bd9Sstevel@tonic-gate 	 */
4077c478bd9Sstevel@tonic-gate 	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
4087c478bd9Sstevel@tonic-gate 	    sizeof (*cp->cpu_tss) -1, SDT_SYSTSS, SEL_KPL);
4097c478bd9Sstevel@tonic-gate 
4107c478bd9Sstevel@tonic-gate 	/*
4117c478bd9Sstevel@tonic-gate 	 * If we have more than one node, each cpu gets a copy of IDT
4127c478bd9Sstevel@tonic-gate 	 * local to its node. If this is a Pentium box, we use cpu 0's
4137c478bd9Sstevel@tonic-gate 	 * IDT. cpu 0's IDT has been made read-only to workaround the
4147c478bd9Sstevel@tonic-gate 	 * cmpxchgl register bug
4157c478bd9Sstevel@tonic-gate 	 */
4167c478bd9Sstevel@tonic-gate 	cp->cpu_idt = CPU->cpu_idt;
4177c478bd9Sstevel@tonic-gate 	if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) {
4187c478bd9Sstevel@tonic-gate 		cp->cpu_idt = kmem_alloc(sizeof (idt0), KM_SLEEP);
4197c478bd9Sstevel@tonic-gate 		bcopy(idt0, cp->cpu_idt, sizeof (idt0));
4207c478bd9Sstevel@tonic-gate 	}
4217c478bd9Sstevel@tonic-gate 
4227c478bd9Sstevel@tonic-gate 	/*
4237c478bd9Sstevel@tonic-gate 	 * Get interrupt priority data from cpu 0
4247c478bd9Sstevel@tonic-gate 	 */
4257c478bd9Sstevel@tonic-gate 	cp->cpu_pri_data = CPU->cpu_pri_data;
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate 	hat_cpu_online(cp);
4287c478bd9Sstevel@tonic-gate 
4297c478bd9Sstevel@tonic-gate 	/* Should remove all entries for the current process/thread here */
4307c478bd9Sstevel@tonic-gate 
4317c478bd9Sstevel@tonic-gate 	/*
4327c478bd9Sstevel@tonic-gate 	 * Fill up the real mode platter to make it easy for real mode code to
4337c478bd9Sstevel@tonic-gate 	 * kick it off. This area should really be one passed by boot to kernel
4347c478bd9Sstevel@tonic-gate 	 * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
4357c478bd9Sstevel@tonic-gate 	 * have identical physical and virtual address in paged mode.
4367c478bd9Sstevel@tonic-gate 	 */
4377c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_idt_base = cp->cpu_idt;
4387c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_idt_lim = sizeof (idt0) - 1;
4397c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_gdt_base = cp->cpu_gdt;
4407c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_gdt_lim = sizeof (gdt0) -1;
4417c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_pdbr = getcr3();
4427c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_cpu = cpun;
4437c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_x86feature = x86_feature;
4447c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_cr4 = cr4_value;
4457c478bd9Sstevel@tonic-gate 
4467c478bd9Sstevel@tonic-gate #if defined(__amd64)
4477c478bd9Sstevel@tonic-gate 	if (getcr3() > 0xffffffffUL)
4487c478bd9Sstevel@tonic-gate 		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
4497c478bd9Sstevel@tonic-gate 			"located above 4G in physical memory (@ 0x%llx).",
4507c478bd9Sstevel@tonic-gate 			(unsigned long long)getcr3());
4517c478bd9Sstevel@tonic-gate 
4527c478bd9Sstevel@tonic-gate 	/*
4537c478bd9Sstevel@tonic-gate 	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
4547c478bd9Sstevel@tonic-gate 	 * by code in real_mode_start():
4557c478bd9Sstevel@tonic-gate 	 *
4567c478bd9Sstevel@tonic-gate 	 * GDT[0]:  NULL selector
4577c478bd9Sstevel@tonic-gate 	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
4587c478bd9Sstevel@tonic-gate 	 *
4597c478bd9Sstevel@tonic-gate 	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
4607c478bd9Sstevel@tonic-gate 	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
4617c478bd9Sstevel@tonic-gate 	 * a course of action as any other, though it may cause the entire
4627c478bd9Sstevel@tonic-gate 	 * platform to reset in some cases...
4637c478bd9Sstevel@tonic-gate 	 */
4647c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt[0] = 0ULL;
4657c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
4667c478bd9Sstevel@tonic-gate 
4677c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt_lim = (ushort_t)
4687c478bd9Sstevel@tonic-gate 	    (sizeof (real_mode_platter->rm_temp_gdt) - 1);
4697c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt_base = rm_platter_pa +
4707c478bd9Sstevel@tonic-gate 	    (uint32_t)(&((rm_platter_t *)0)->rm_temp_gdt);
4717c478bd9Sstevel@tonic-gate 
4727c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_idt_lim = 0;
4737c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_idt_base = 0;
4747c478bd9Sstevel@tonic-gate 
4757c478bd9Sstevel@tonic-gate 	/*
4767c478bd9Sstevel@tonic-gate 	 * Since the CPU needs to jump to protected mode using an identity
4777c478bd9Sstevel@tonic-gate 	 * mapped address, we need to calculate it here.
4787c478bd9Sstevel@tonic-gate 	 */
4797c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_longmode64_addr = rm_platter_pa +
4807c478bd9Sstevel@tonic-gate 	    ((uint32_t)long_mode_64 - (uint32_t)real_mode_start);
4817c478bd9Sstevel@tonic-gate #endif	/* __amd64 */
4827c478bd9Sstevel@tonic-gate 
4837c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE
4847c478bd9Sstevel@tonic-gate 	/*
4857c478bd9Sstevel@tonic-gate 	 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers for this
4867c478bd9Sstevel@tonic-gate 	 * CPU.
4877c478bd9Sstevel@tonic-gate 	 */
4887c478bd9Sstevel@tonic-gate 	ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP);
4897c478bd9Sstevel@tonic-gate 	ttc->ttc_next = ttc->ttc_first;
4907c478bd9Sstevel@tonic-gate 	ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize;
4917c478bd9Sstevel@tonic-gate #endif
4927c478bd9Sstevel@tonic-gate 
4937c478bd9Sstevel@tonic-gate 	/*
4947c478bd9Sstevel@tonic-gate 	 * Record that we have another CPU.
4957c478bd9Sstevel@tonic-gate 	 */
4967c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
4977c478bd9Sstevel@tonic-gate 	/*
4987c478bd9Sstevel@tonic-gate 	 * Initialize the interrupt threads for this CPU
4997c478bd9Sstevel@tonic-gate 	 */
500100b72f4Sandrei 	cpu_intr_alloc(cp, NINTR_THREADS);
5017c478bd9Sstevel@tonic-gate 	/*
5027c478bd9Sstevel@tonic-gate 	 * Add CPU to list of available CPUs.  It'll be on the active list
5037c478bd9Sstevel@tonic-gate 	 * after mp_startup().
5047c478bd9Sstevel@tonic-gate 	 */
5057c478bd9Sstevel@tonic-gate 	cpu_add_unit(cp);
5067c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
5077c478bd9Sstevel@tonic-gate }
5087c478bd9Sstevel@tonic-gate 
5097c478bd9Sstevel@tonic-gate /*
5107c478bd9Sstevel@tonic-gate  * Apply workarounds for known errata, and warn about those that are absent.
5117c478bd9Sstevel@tonic-gate  *
5127c478bd9Sstevel@tonic-gate  * System vendors occasionally create configurations which contain different
5137c478bd9Sstevel@tonic-gate  * revisions of the CPUs that are almost but not exactly the same.  At the
5147c478bd9Sstevel@tonic-gate  * time of writing, this meant that their clock rates were the same, their
5157c478bd9Sstevel@tonic-gate  * feature sets were the same, but the required workaround were -not-
5167c478bd9Sstevel@tonic-gate  * necessarily the same.  So, this routine is invoked on -every- CPU soon
5177c478bd9Sstevel@tonic-gate  * after starting to make sure that the resulting system contains the most
5187c478bd9Sstevel@tonic-gate  * pessimal set of workarounds needed to cope with *any* of the CPUs in the
5197c478bd9Sstevel@tonic-gate  * system.
5207c478bd9Sstevel@tonic-gate  *
521ef50d8c0Sesaxe  * workaround_errata is invoked early in mlsetup() for CPU 0, and in
522ef50d8c0Sesaxe  * mp_startup() for all slave CPUs. Slaves process workaround_errata prior
523ef50d8c0Sesaxe  * to acknowledging their readiness to the master, so this routine will
524ef50d8c0Sesaxe  * never be executed by multiple CPUs in parallel, thus making updates to
525ef50d8c0Sesaxe  * global data safe.
526ef50d8c0Sesaxe  *
5272201b277Skucharsk  * These workarounds are based on Rev 3.57 of the Revision Guide for
5282201b277Skucharsk  * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005.
5297c478bd9Sstevel@tonic-gate  */
5307c478bd9Sstevel@tonic-gate 
5317c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91)
5327c478bd9Sstevel@tonic-gate int opteron_erratum_91;		/* if non-zero -> at least one cpu has it */
5337c478bd9Sstevel@tonic-gate #endif
5347c478bd9Sstevel@tonic-gate 
5357c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93)
5367c478bd9Sstevel@tonic-gate int opteron_erratum_93;		/* if non-zero -> at least one cpu has it */
5377c478bd9Sstevel@tonic-gate #endif
5387c478bd9Sstevel@tonic-gate 
5397c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100)
5407c478bd9Sstevel@tonic-gate int opteron_erratum_100;	/* if non-zero -> at least one cpu has it */
5417c478bd9Sstevel@tonic-gate #endif
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
5447c478bd9Sstevel@tonic-gate int opteron_erratum_109;	/* if non-zero -> at least one cpu has it */
5457c478bd9Sstevel@tonic-gate #endif
5467c478bd9Sstevel@tonic-gate 
5477c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5487c478bd9Sstevel@tonic-gate int opteron_erratum_121;	/* if non-zero -> at least one cpu has it */
5497c478bd9Sstevel@tonic-gate #endif
5507c478bd9Sstevel@tonic-gate 
5517c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122)
5527c478bd9Sstevel@tonic-gate int opteron_erratum_122;	/* if non-zero -> at least one cpu has it */
5537c478bd9Sstevel@tonic-gate #endif
5547c478bd9Sstevel@tonic-gate 
5557c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123)
5567c478bd9Sstevel@tonic-gate int opteron_erratum_123;	/* if non-zero -> at least one cpu has it */
5577c478bd9Sstevel@tonic-gate #endif
5587c478bd9Sstevel@tonic-gate 
5592201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
5602201b277Skucharsk int opteron_erratum_131;	/* if non-zero -> at least one cpu has it */
5612201b277Skucharsk #endif
5627c478bd9Sstevel@tonic-gate 
563ef50d8c0Sesaxe #if defined(OPTERON_WORKAROUND_6336786)
564ef50d8c0Sesaxe int opteron_workaround_6336786;	/* non-zero -> WA relevant and applied */
565ef50d8c0Sesaxe int opteron_workaround_6336786_UP = 0;	/* Not needed for UP */
566ef50d8c0Sesaxe #endif
567ef50d8c0Sesaxe 
568ee88d2b9Skchow #if defined(OPTERON_WORKAROUND_6323525)
569ee88d2b9Skchow int opteron_workaround_6323525;	/* if non-zero -> at least one cpu has it */
570ee88d2b9Skchow #endif
571ee88d2b9Skchow 
5727c478bd9Sstevel@tonic-gate #define	WARNING(cpu, n)						\
5737c478bd9Sstevel@tonic-gate 	cmn_err(CE_WARN, "cpu%d: no workaround for erratum %d",	\
5747c478bd9Sstevel@tonic-gate 	    (cpu)->cpu_id, (n))
5757c478bd9Sstevel@tonic-gate 
5767c478bd9Sstevel@tonic-gate uint_t
5777c478bd9Sstevel@tonic-gate workaround_errata(struct cpu *cpu)
5787c478bd9Sstevel@tonic-gate {
5797c478bd9Sstevel@tonic-gate 	uint_t missing = 0;
5807c478bd9Sstevel@tonic-gate 
5817c478bd9Sstevel@tonic-gate 	ASSERT(cpu == CPU);
5827c478bd9Sstevel@tonic-gate 
5837c478bd9Sstevel@tonic-gate 	/*LINTED*/
5847c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 88) > 0) {
5857c478bd9Sstevel@tonic-gate 		/*
5867c478bd9Sstevel@tonic-gate 		 * SWAPGS May Fail To Read Correct GS Base
5877c478bd9Sstevel@tonic-gate 		 */
5887c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_88)
5897c478bd9Sstevel@tonic-gate 		/*
5907c478bd9Sstevel@tonic-gate 		 * The workaround is an mfence in the relevant assembler code
5917c478bd9Sstevel@tonic-gate 		 */
5927c478bd9Sstevel@tonic-gate #else
5937c478bd9Sstevel@tonic-gate 		WARNING(cpu, 88);
5947c478bd9Sstevel@tonic-gate 		missing++;
5957c478bd9Sstevel@tonic-gate #endif
5967c478bd9Sstevel@tonic-gate 	}
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 91) > 0) {
5997c478bd9Sstevel@tonic-gate 		/*
6007c478bd9Sstevel@tonic-gate 		 * Software Prefetches May Report A Page Fault
6017c478bd9Sstevel@tonic-gate 		 */
6027c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91)
6037c478bd9Sstevel@tonic-gate 		/*
6047c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6057c478bd9Sstevel@tonic-gate 		 */
6067c478bd9Sstevel@tonic-gate 		opteron_erratum_91++;
6077c478bd9Sstevel@tonic-gate #else
6087c478bd9Sstevel@tonic-gate 		WARNING(cpu, 91);
6097c478bd9Sstevel@tonic-gate 		missing++;
6107c478bd9Sstevel@tonic-gate #endif
6117c478bd9Sstevel@tonic-gate 	}
6127c478bd9Sstevel@tonic-gate 
6137c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 93) > 0) {
6147c478bd9Sstevel@tonic-gate 		/*
6157c478bd9Sstevel@tonic-gate 		 * RSM Auto-Halt Restart Returns to Incorrect RIP
6167c478bd9Sstevel@tonic-gate 		 */
6177c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93)
6187c478bd9Sstevel@tonic-gate 		/*
6197c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6207c478bd9Sstevel@tonic-gate 		 */
6217c478bd9Sstevel@tonic-gate 		opteron_erratum_93++;
6227c478bd9Sstevel@tonic-gate #else
6237c478bd9Sstevel@tonic-gate 		WARNING(cpu, 93);
6247c478bd9Sstevel@tonic-gate 		missing++;
6257c478bd9Sstevel@tonic-gate #endif
6267c478bd9Sstevel@tonic-gate 	}
6277c478bd9Sstevel@tonic-gate 
6287c478bd9Sstevel@tonic-gate 	/*LINTED*/
6297c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 95) > 0) {
6307c478bd9Sstevel@tonic-gate 		/*
6317c478bd9Sstevel@tonic-gate 		 * RET Instruction May Return to Incorrect EIP
6327c478bd9Sstevel@tonic-gate 		 */
6337c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_95)
6347c478bd9Sstevel@tonic-gate #if defined(_LP64)
6357c478bd9Sstevel@tonic-gate 		/*
6367c478bd9Sstevel@tonic-gate 		 * Workaround this by ensuring that 32-bit user code and
6377c478bd9Sstevel@tonic-gate 		 * 64-bit kernel code never occupy the same address
6387c478bd9Sstevel@tonic-gate 		 * range mod 4G.
6397c478bd9Sstevel@tonic-gate 		 */
6407c478bd9Sstevel@tonic-gate 		if (_userlimit32 > 0xc0000000ul)
6417c478bd9Sstevel@tonic-gate 			*(uintptr_t *)&_userlimit32 = 0xc0000000ul;
6427c478bd9Sstevel@tonic-gate 
6437c478bd9Sstevel@tonic-gate 		/*LINTED*/
6447c478bd9Sstevel@tonic-gate 		ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u);
6457c478bd9Sstevel@tonic-gate #endif	/* _LP64 */
6467c478bd9Sstevel@tonic-gate #else
6477c478bd9Sstevel@tonic-gate 		WARNING(cpu, 95);
6487c478bd9Sstevel@tonic-gate 		missing++;
6497c478bd9Sstevel@tonic-gate #endif	/* OPTERON_ERRATUM_95 */
6507c478bd9Sstevel@tonic-gate 	}
6517c478bd9Sstevel@tonic-gate 
6527c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 100) > 0) {
6537c478bd9Sstevel@tonic-gate 		/*
6547c478bd9Sstevel@tonic-gate 		 * Compatibility Mode Branches Transfer to Illegal Address
6557c478bd9Sstevel@tonic-gate 		 */
6567c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100)
6577c478bd9Sstevel@tonic-gate 		/*
6587c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6597c478bd9Sstevel@tonic-gate 		 */
6607c478bd9Sstevel@tonic-gate 		opteron_erratum_100++;
6617c478bd9Sstevel@tonic-gate #else
6627c478bd9Sstevel@tonic-gate 		WARNING(cpu, 100);
6637c478bd9Sstevel@tonic-gate 		missing++;
6647c478bd9Sstevel@tonic-gate #endif
6657c478bd9Sstevel@tonic-gate 	}
6667c478bd9Sstevel@tonic-gate 
6677c478bd9Sstevel@tonic-gate 	/*LINTED*/
6687c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 108) > 0) {
6697c478bd9Sstevel@tonic-gate 		/*
6707c478bd9Sstevel@tonic-gate 		 * CPUID Instruction May Return Incorrect Model Number In
6717c478bd9Sstevel@tonic-gate 		 * Some Processors
6727c478bd9Sstevel@tonic-gate 		 */
6737c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108)
6747c478bd9Sstevel@tonic-gate 		/*
6757c478bd9Sstevel@tonic-gate 		 * (Our cpuid-handling code corrects the model number on
6767c478bd9Sstevel@tonic-gate 		 * those processors)
6777c478bd9Sstevel@tonic-gate 		 */
6787c478bd9Sstevel@tonic-gate #else
6797c478bd9Sstevel@tonic-gate 		WARNING(cpu, 108);
6807c478bd9Sstevel@tonic-gate 		missing++;
6817c478bd9Sstevel@tonic-gate #endif
6827c478bd9Sstevel@tonic-gate 	}
6837c478bd9Sstevel@tonic-gate 
6847c478bd9Sstevel@tonic-gate 	/*LINTED*/
6857c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 109) > 0) {
6867c478bd9Sstevel@tonic-gate 		/*
6877c478bd9Sstevel@tonic-gate 		 * Certain Reverse REP MOVS May Produce Unpredictable Behaviour
6887c478bd9Sstevel@tonic-gate 		 */
6897c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
6907c478bd9Sstevel@tonic-gate 
6917c478bd9Sstevel@tonic-gate 		/* workaround is to print a warning to upgrade BIOS */
6920ac7d7d8Skucharsk 		if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
6937c478bd9Sstevel@tonic-gate 			opteron_erratum_109++;
6947c478bd9Sstevel@tonic-gate #else
6957c478bd9Sstevel@tonic-gate 		WARNING(cpu, 109);
6967c478bd9Sstevel@tonic-gate 		missing++;
6977c478bd9Sstevel@tonic-gate #endif
6987c478bd9Sstevel@tonic-gate 	}
6997c478bd9Sstevel@tonic-gate 	/*LINTED*/
7007c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 121) > 0) {
7017c478bd9Sstevel@tonic-gate 		/*
7027c478bd9Sstevel@tonic-gate 		 * Sequential Execution Across Non_Canonical Boundary Caused
7037c478bd9Sstevel@tonic-gate 		 * Processor Hang
7047c478bd9Sstevel@tonic-gate 		 */
7057c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
7067c478bd9Sstevel@tonic-gate 		static int	lma;
7077c478bd9Sstevel@tonic-gate 
7087c478bd9Sstevel@tonic-gate 		if (opteron_erratum_121)
7097c478bd9Sstevel@tonic-gate 			opteron_erratum_121++;
7107c478bd9Sstevel@tonic-gate 
7117c478bd9Sstevel@tonic-gate 		/*
7127c478bd9Sstevel@tonic-gate 		 * Erratum 121 is only present in long (64 bit) mode.
7137c478bd9Sstevel@tonic-gate 		 * Workaround is to include the page immediately before the
7147c478bd9Sstevel@tonic-gate 		 * va hole to eliminate the possibility of system hangs due to
7157c478bd9Sstevel@tonic-gate 		 * sequential execution across the va hole boundary.
7167c478bd9Sstevel@tonic-gate 		 */
7177c478bd9Sstevel@tonic-gate 		if (lma == 0) {
7187c478bd9Sstevel@tonic-gate 			/*
7197c478bd9Sstevel@tonic-gate 			 * check LMA once: assume all cpus are in long mode
7207c478bd9Sstevel@tonic-gate 			 * or not.
7217c478bd9Sstevel@tonic-gate 			 */
7227c478bd9Sstevel@tonic-gate 			lma = 1;
7237c478bd9Sstevel@tonic-gate 
7240ac7d7d8Skucharsk 			if (rdmsr(MSR_AMD_EFER) & AMD_EFER_LMA) {
7257c478bd9Sstevel@tonic-gate 				if (hole_start) {
7267c478bd9Sstevel@tonic-gate 					hole_start -= PAGESIZE;
7277c478bd9Sstevel@tonic-gate 				} else {
7287c478bd9Sstevel@tonic-gate 					/*
7297c478bd9Sstevel@tonic-gate 					 * hole_start not yet initialized by
7307c478bd9Sstevel@tonic-gate 					 * mmu_init. Initialize hole_start
7317c478bd9Sstevel@tonic-gate 					 * with value to be subtracted.
7327c478bd9Sstevel@tonic-gate 					 */
7337c478bd9Sstevel@tonic-gate 					hole_start = PAGESIZE;
7347c478bd9Sstevel@tonic-gate 				}
7357c478bd9Sstevel@tonic-gate 				opteron_erratum_121++;
7367c478bd9Sstevel@tonic-gate 			}
7377c478bd9Sstevel@tonic-gate 		}
7387c478bd9Sstevel@tonic-gate #else
7397c478bd9Sstevel@tonic-gate 		WARNING(cpu, 121);
7407c478bd9Sstevel@tonic-gate 		missing++;
7417c478bd9Sstevel@tonic-gate #endif
7427c478bd9Sstevel@tonic-gate 	}
7437c478bd9Sstevel@tonic-gate 
7447c478bd9Sstevel@tonic-gate 	/*LINTED*/
7457c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 122) > 0) {
7467c478bd9Sstevel@tonic-gate 		/*
7477c478bd9Sstevel@tonic-gate 		 * TLB Flush Filter May Cause Cohenrency Problem in
7487c478bd9Sstevel@tonic-gate 		 * Multiprocessor Systems
7497c478bd9Sstevel@tonic-gate 		 */
7507c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122)
7517c478bd9Sstevel@tonic-gate 		/*
7527c478bd9Sstevel@tonic-gate 		 * Erratum 122 is only present in MP configurations (multi-core
7537c478bd9Sstevel@tonic-gate 		 * or multi-processor).
7547c478bd9Sstevel@tonic-gate 		 */
7557c478bd9Sstevel@tonic-gate 
7567c478bd9Sstevel@tonic-gate 		if (opteron_erratum_122 || lgrp_plat_node_cnt > 1 ||
7577c478bd9Sstevel@tonic-gate 		    cpuid_get_ncpu_per_chip(cpu) > 1) {
7587c478bd9Sstevel@tonic-gate 			/* disable TLB Flush Filter */
7590ac7d7d8Skucharsk 			wrmsr(MSR_AMD_HWCR, rdmsr(MSR_AMD_HWCR) |
7600ac7d7d8Skucharsk 			    (uint64_t)(uintptr_t)AMD_HWCR_FFDIS);
7617c478bd9Sstevel@tonic-gate 			opteron_erratum_122++;
7627c478bd9Sstevel@tonic-gate 		}
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate #else
7657c478bd9Sstevel@tonic-gate 		WARNING(cpu, 122);
7667c478bd9Sstevel@tonic-gate 		missing++;
7677c478bd9Sstevel@tonic-gate #endif
7687c478bd9Sstevel@tonic-gate 	}
769403c216aSkchow 
770403c216aSkchow #if defined(OPTERON_ERRATUM_123)
7717c478bd9Sstevel@tonic-gate 	/*LINTED*/
7727c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 123) > 0) {
7737c478bd9Sstevel@tonic-gate 		/*
7747c478bd9Sstevel@tonic-gate 		 * Bypassed Reads May Cause Data Corruption of System Hang in
7757c478bd9Sstevel@tonic-gate 		 * Dual Core Processors
7767c478bd9Sstevel@tonic-gate 		 */
7777c478bd9Sstevel@tonic-gate 		/*
7787c478bd9Sstevel@tonic-gate 		 * Erratum 123 applies only to multi-core cpus.
7797c478bd9Sstevel@tonic-gate 		 */
7807c478bd9Sstevel@tonic-gate 
7817c478bd9Sstevel@tonic-gate 		if (cpuid_get_ncpu_per_chip(cpu) > 1) {
7827c478bd9Sstevel@tonic-gate 			/* workaround is to print a warning to upgrade BIOS */
7830ac7d7d8Skucharsk 			if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
7847c478bd9Sstevel@tonic-gate 				opteron_erratum_123++;
7857c478bd9Sstevel@tonic-gate 		}
7867c478bd9Sstevel@tonic-gate 	}
787403c216aSkchow #endif
7882201b277Skucharsk 
7892201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
7902201b277Skucharsk 	/*LINTED*/
7912201b277Skucharsk 	if (cpuid_opteron_erratum(cpu, 131) > 0) {
7922201b277Skucharsk 		/*
7932201b277Skucharsk 		 * Multiprocessor Systems with Four or More Cores May Deadlock
7942201b277Skucharsk 		 * Waiting for a Probe Response
7952201b277Skucharsk 		 */
7962201b277Skucharsk 		/*
7972201b277Skucharsk 		 * Erratum 131 applies to any system with four or more cores.
7982201b277Skucharsk 		 */
7992201b277Skucharsk 		if ((opteron_erratum_131 == 0) && ((lgrp_plat_node_cnt *
8002201b277Skucharsk 		    cpuid_get_ncpu_per_chip(cpu)) >= 4)) {
801cb9f16ebSkchow 			uint64_t nbcfg;
802cb9f16ebSkchow 			uint64_t wabits;
803cb9f16ebSkchow 
8042201b277Skucharsk 			/*
805cb9f16ebSkchow 			 * Print a warning if neither of the workarounds
806cb9f16ebSkchow 			 * for Erratum 131 is present.
8072201b277Skucharsk 			 */
808cb9f16ebSkchow 
809cb9f16ebSkchow 			wabits = AMD_NB_CFG_SRQ_HEARTBEAT |
810cb9f16ebSkchow 			    AMD_NB_CFG_SRQ_SPR;
811cb9f16ebSkchow 
812cb9f16ebSkchow 			nbcfg = rdmsr(MSR_AMD_NB_CFG);
813cb9f16ebSkchow 			if ((nbcfg & wabits) == 0) {
8142201b277Skucharsk 				opteron_erratum_131++;
815cb9f16ebSkchow 			} else {
816cb9f16ebSkchow 				/* cannot have both workarounds set */
817cb9f16ebSkchow 				ASSERT((nbcfg & wabits) != wabits);
818cb9f16ebSkchow 			}
8192201b277Skucharsk 		}
820ef50d8c0Sesaxe 	}
8212201b277Skucharsk #endif
822ef50d8c0Sesaxe 
823ef50d8c0Sesaxe #if defined(OPTERON_WORKAROUND_6336786)
824ef50d8c0Sesaxe 	/*
825ef50d8c0Sesaxe 	 * This isn't really erratum, but for convenience the
826ef50d8c0Sesaxe 	 * detection/workaround code lives here and in cpuid_opteron_erratum.
827ef50d8c0Sesaxe 	 */
828ef50d8c0Sesaxe 	if (cpuid_opteron_erratum(cpu, 6336786) > 0) {
829ef50d8c0Sesaxe 		int	node;
830ef50d8c0Sesaxe 		uint8_t data;
831ef50d8c0Sesaxe 
832ef50d8c0Sesaxe 		/*
833ef50d8c0Sesaxe 		 * Disable C1-Clock ramping on multi-core/multi-processor
834ef50d8c0Sesaxe 		 * K8 platforms to guard against TSC drift.
835ef50d8c0Sesaxe 		 */
836ef50d8c0Sesaxe 		if (opteron_workaround_6336786) {
837ef50d8c0Sesaxe 			opteron_workaround_6336786++;
838ef50d8c0Sesaxe 		} else if ((lgrp_plat_node_cnt *
839ef50d8c0Sesaxe 		    cpuid_get_ncpu_per_chip(cpu) >= 2) ||
840ef50d8c0Sesaxe 		    opteron_workaround_6336786_UP) {
841ef50d8c0Sesaxe 			for (node = 0; node < lgrp_plat_node_cnt; node++) {
842ef50d8c0Sesaxe 				/*
843ef50d8c0Sesaxe 				 * Clear PMM7[1:0] (function 3, offset 0x87)
844ef50d8c0Sesaxe 				 * Northbridge device is the node id + 24.
845ef50d8c0Sesaxe 				 */
846ef50d8c0Sesaxe 				data = pci_getb_func(0, node + 24, 3, 0x87);
847ef50d8c0Sesaxe 				data &= 0xFC;
848ef50d8c0Sesaxe 				pci_putb_func(0, node + 24, 3, 0x87, data);
849ef50d8c0Sesaxe 			}
850ef50d8c0Sesaxe 			opteron_workaround_6336786++;
851ef50d8c0Sesaxe 		}
8522201b277Skucharsk 	}
853ef50d8c0Sesaxe #endif
854ee88d2b9Skchow 
855ee88d2b9Skchow #if defined(OPTERON_WORKAROUND_6323525)
856ee88d2b9Skchow 	/*LINTED*/
857ee88d2b9Skchow 	/*
858ee88d2b9Skchow 	 * Mutex primitives don't work as expected.
859ee88d2b9Skchow 	 */
860ee88d2b9Skchow 	if (cpuid_opteron_erratum(cpu, 6323525) > 0) {
861ee88d2b9Skchow 
862ee88d2b9Skchow 		/*
863ee88d2b9Skchow 		 * problem only occurs with 2 or more cores. If bit in
864ee88d2b9Skchow 		 * MSR_BU_CFG set, then not applicable. The workaround
865ee88d2b9Skchow 		 * is to patch the semaphone routines with the lfence
866ee88d2b9Skchow 		 * instruction to provide necessary load memory barrier with
867ee88d2b9Skchow 		 * possible subsequent read-modify-write ops.
868ee88d2b9Skchow 		 *
869ee88d2b9Skchow 		 * It is too early in boot to call the patch routine so
870ee88d2b9Skchow 		 * set erratum variable to be done in startup_end().
871ee88d2b9Skchow 		 */
872ee88d2b9Skchow 		if (opteron_workaround_6323525) {
873ee88d2b9Skchow 			opteron_workaround_6323525++;
874ee88d2b9Skchow 		} else if ((x86_feature & X86_SSE2) && ((lgrp_plat_node_cnt *
875ee88d2b9Skchow 		    cpuid_get_ncpu_per_chip(cpu)) >= 2)) {
876ee88d2b9Skchow 			if ((xrdmsr(MSR_BU_CFG) & 0x02) == 0)
877ee88d2b9Skchow 				opteron_workaround_6323525++;
878ee88d2b9Skchow 		}
879ee88d2b9Skchow 	}
880ee88d2b9Skchow #endif
8817c478bd9Sstevel@tonic-gate 	return (missing);
8827c478bd9Sstevel@tonic-gate }
8837c478bd9Sstevel@tonic-gate 
8847c478bd9Sstevel@tonic-gate void
8857c478bd9Sstevel@tonic-gate workaround_errata_end()
8867c478bd9Sstevel@tonic-gate {
8877c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
8887c478bd9Sstevel@tonic-gate 	if (opteron_erratum_109) {
8892201b277Skucharsk 		cmn_err(CE_WARN,
8902201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
8912201b277Skucharsk 		    " processor\nerratum 109 was not detected; updating your"
8922201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
8932201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
8942201b277Skucharsk 		    " system\noperation may occur.\n");
8957c478bd9Sstevel@tonic-gate 	}
8962201b277Skucharsk #endif	/* OPTERON_ERRATUM_109 */
8977c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123)
8987c478bd9Sstevel@tonic-gate 	if (opteron_erratum_123) {
8992201b277Skucharsk 		cmn_err(CE_WARN,
9002201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
9012201b277Skucharsk 		    " processor\nerratum 123 was not detected; updating your"
9022201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
9032201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
9042201b277Skucharsk 		    " system\noperation may occur.\n");
9057c478bd9Sstevel@tonic-gate 	}
9062201b277Skucharsk #endif	/* OPTERON_ERRATUM_123 */
9072201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
9082201b277Skucharsk 	if (opteron_erratum_131) {
9092201b277Skucharsk 		cmn_err(CE_WARN,
9102201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
9112201b277Skucharsk 		    " processor\nerratum 131 was not detected; updating your"
9122201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
9132201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
9142201b277Skucharsk 		    " system\noperation may occur.\n");
9152201b277Skucharsk 	}
9162201b277Skucharsk #endif	/* OPTERON_ERRATUM_131 */
9177c478bd9Sstevel@tonic-gate }
9187c478bd9Sstevel@tonic-gate 
9197c478bd9Sstevel@tonic-gate static ushort_t *mp_map_warm_reset_vector();
9207c478bd9Sstevel@tonic-gate static void mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector);
9217c478bd9Sstevel@tonic-gate 
92241791439Sandrei static cpuset_t procset = 1;
92341791439Sandrei 
9247c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9257c478bd9Sstevel@tonic-gate void
9267c478bd9Sstevel@tonic-gate start_other_cpus(int cprboot)
9277c478bd9Sstevel@tonic-gate {
92841791439Sandrei 	unsigned int who;
92941791439Sandrei 	int skipped = 0;
930d90554ebSdmick 	int cpuid = 0;
9317c478bd9Sstevel@tonic-gate 	int delays = 0;
9327c478bd9Sstevel@tonic-gate 	int started_cpu;
9337c478bd9Sstevel@tonic-gate 	ushort_t *warm_reset_vector = NULL;
9347c478bd9Sstevel@tonic-gate 
9357c478bd9Sstevel@tonic-gate 	/*
9367c478bd9Sstevel@tonic-gate 	 * Initialize our own cpu_info.
9377c478bd9Sstevel@tonic-gate 	 */
9387c478bd9Sstevel@tonic-gate 	init_cpu_info(CPU);
9397c478bd9Sstevel@tonic-gate 
9407c478bd9Sstevel@tonic-gate 	/*
9417c478bd9Sstevel@tonic-gate 	 * Initialize our syscall handlers
9427c478bd9Sstevel@tonic-gate 	 */
9437c478bd9Sstevel@tonic-gate 	init_cpu_syscall(CPU);
9447c478bd9Sstevel@tonic-gate 
9457c478bd9Sstevel@tonic-gate 	/*
9467c478bd9Sstevel@tonic-gate 	 * if only 1 cpu or not using MP, skip the rest of this
9477c478bd9Sstevel@tonic-gate 	 */
94841791439Sandrei 	if (CPUSET_ISEQUAL(mp_cpus, cpu_ready_set) || use_mp == 0) {
9497c478bd9Sstevel@tonic-gate 		if (use_mp == 0)
9507c478bd9Sstevel@tonic-gate 			cmn_err(CE_CONT, "?***** Not in MP mode\n");
9517c478bd9Sstevel@tonic-gate 		goto done;
9527c478bd9Sstevel@tonic-gate 	}
9537c478bd9Sstevel@tonic-gate 
9547c478bd9Sstevel@tonic-gate 	/*
9557c478bd9Sstevel@tonic-gate 	 * perform such initialization as is needed
9567c478bd9Sstevel@tonic-gate 	 * to be able to take CPUs on- and off-line.
9577c478bd9Sstevel@tonic-gate 	 */
9587c478bd9Sstevel@tonic-gate 	cpu_pause_init();
9597c478bd9Sstevel@tonic-gate 
9607c478bd9Sstevel@tonic-gate 	xc_init();		/* initialize processor crosscalls */
9617c478bd9Sstevel@tonic-gate 
9627c478bd9Sstevel@tonic-gate 	/*
9637c478bd9Sstevel@tonic-gate 	 * Copy the real mode code at "real_mode_start" to the
9647c478bd9Sstevel@tonic-gate 	 * page at rm_platter_va.
9657c478bd9Sstevel@tonic-gate 	 */
9667c478bd9Sstevel@tonic-gate 	warm_reset_vector = mp_map_warm_reset_vector();
9677c478bd9Sstevel@tonic-gate 	if (warm_reset_vector == NULL)
9687c478bd9Sstevel@tonic-gate 		goto done;
9697c478bd9Sstevel@tonic-gate 
9707c478bd9Sstevel@tonic-gate 	bcopy((caddr_t)real_mode_start,
9717c478bd9Sstevel@tonic-gate 	    (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code,
9727c478bd9Sstevel@tonic-gate 	    (size_t)real_mode_end - (size_t)real_mode_start);
9737c478bd9Sstevel@tonic-gate 
9747c478bd9Sstevel@tonic-gate 	flushes_require_xcalls = 1;
9757c478bd9Sstevel@tonic-gate 
9765205ae23Snf 	ASSERT(CPU_IN_SET(procset, cpuid));
9775205ae23Snf 	ASSERT(CPU_IN_SET(cpu_ready_set, cpuid));
9785205ae23Snf 
9795205ae23Snf 	/*
9805205ae23Snf 	 * We lock our affinity to the master CPU to ensure that all slave CPUs
9815205ae23Snf 	 * do their TSC syncs with the same CPU.
9825205ae23Snf 	 */
9837c478bd9Sstevel@tonic-gate 	affinity_set(CPU_CURRENT);
9847c478bd9Sstevel@tonic-gate 
9857c478bd9Sstevel@tonic-gate 	for (who = 0; who < NCPU; who++) {
9867c478bd9Sstevel@tonic-gate 		if (who == cpuid)
9877c478bd9Sstevel@tonic-gate 			continue;
9885205ae23Snf 
9895205ae23Snf 		delays = 0;
9905205ae23Snf 
99141791439Sandrei 		if (!CPU_IN_SET(mp_cpus, who))
99241791439Sandrei 			continue;
9937c478bd9Sstevel@tonic-gate 
99441791439Sandrei 		if (ncpus >= max_ncpus) {
99541791439Sandrei 			skipped = who;
9967c478bd9Sstevel@tonic-gate 			continue;
99741791439Sandrei 		}
9987c478bd9Sstevel@tonic-gate 
9997c478bd9Sstevel@tonic-gate 		mp_startup_init(who);
10007c478bd9Sstevel@tonic-gate 		started_cpu = 1;
10017c478bd9Sstevel@tonic-gate 		(*cpu_startf)(who, rm_platter_pa);
10027c478bd9Sstevel@tonic-gate 
100341791439Sandrei 		while (!CPU_IN_SET(procset, who)) {
10047c478bd9Sstevel@tonic-gate 			delay(1);
10057c478bd9Sstevel@tonic-gate 			if (++delays > (20 * hz)) {
10067c478bd9Sstevel@tonic-gate 
10077c478bd9Sstevel@tonic-gate 				cmn_err(CE_WARN,
10087c478bd9Sstevel@tonic-gate 				    "cpu%d failed to start", who);
10097c478bd9Sstevel@tonic-gate 
10107c478bd9Sstevel@tonic-gate 				mutex_enter(&cpu_lock);
10117c478bd9Sstevel@tonic-gate 				cpu[who]->cpu_flags = 0;
1012affbd3ccSkchow 				cpu_vm_data_destroy(cpu[who]);
10137c478bd9Sstevel@tonic-gate 				cpu_del_unit(who);
10147c478bd9Sstevel@tonic-gate 				mutex_exit(&cpu_lock);
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 				started_cpu = 0;
10177c478bd9Sstevel@tonic-gate 				break;
10187c478bd9Sstevel@tonic-gate 			}
10197c478bd9Sstevel@tonic-gate 		}
10207c478bd9Sstevel@tonic-gate 		if (!started_cpu)
10217c478bd9Sstevel@tonic-gate 			continue;
10227c478bd9Sstevel@tonic-gate 		if (tsc_gethrtime_enable)
10237c478bd9Sstevel@tonic-gate 			tsc_sync_master(who);
10247c478bd9Sstevel@tonic-gate 
10257c478bd9Sstevel@tonic-gate 	}
10267c478bd9Sstevel@tonic-gate 
10277c478bd9Sstevel@tonic-gate 	affinity_clear();
10287c478bd9Sstevel@tonic-gate 
10295205ae23Snf 	/*
10305205ae23Snf 	 * Wait for all CPUs that booted (have presence in procset)
10315205ae23Snf 	 * to come online (have presence in cpu_ready_set).  Note
10325205ae23Snf 	 * that the start CPU already satisfies both of these, so no
10335205ae23Snf 	 * special case is needed.
10345205ae23Snf 	 */
10357c478bd9Sstevel@tonic-gate 	for (who = 0; who < NCPU; who++) {
103641791439Sandrei 		if (!CPU_IN_SET(procset, who))
10377c478bd9Sstevel@tonic-gate 			continue;
10387c478bd9Sstevel@tonic-gate 
103941791439Sandrei 		while (!CPU_IN_SET(cpu_ready_set, who))
10407c478bd9Sstevel@tonic-gate 			delay(1);
10417c478bd9Sstevel@tonic-gate 	}
10427c478bd9Sstevel@tonic-gate 
104341791439Sandrei 	if (skipped) {
104441791439Sandrei 		cmn_err(CE_NOTE,
104541791439Sandrei 		    "System detected %d CPU(s), but "
104641791439Sandrei 		    "only %d CPU(s) were enabled during boot.",
104741791439Sandrei 		    skipped + 1, ncpus);
104841791439Sandrei 		cmn_err(CE_NOTE,
104941791439Sandrei 		    "Use \"boot-ncpus\" parameter to enable more CPU(s). "
105041791439Sandrei 		    "See eeprom(1M).");
105141791439Sandrei 	}
105241791439Sandrei 
10537c478bd9Sstevel@tonic-gate done:
10547c478bd9Sstevel@tonic-gate 	workaround_errata_end();
10557c478bd9Sstevel@tonic-gate 
10567c478bd9Sstevel@tonic-gate 	if (warm_reset_vector != NULL)
10577c478bd9Sstevel@tonic-gate 		mp_unmap_warm_reset_vector(warm_reset_vector);
10587c478bd9Sstevel@tonic-gate 	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
10597c478bd9Sstevel@tonic-gate 	    HAT_UNLOAD);
10603ad553a7Sgavinm 
10613ad553a7Sgavinm 	cmi_post_mpstartup();
10627c478bd9Sstevel@tonic-gate }
10637c478bd9Sstevel@tonic-gate 
10647c478bd9Sstevel@tonic-gate /*
10657c478bd9Sstevel@tonic-gate  * Dummy functions - no i86pc platforms support dynamic cpu allocation.
10667c478bd9Sstevel@tonic-gate  */
10677c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10687c478bd9Sstevel@tonic-gate int
10697c478bd9Sstevel@tonic-gate mp_cpu_configure(int cpuid)
10707c478bd9Sstevel@tonic-gate {
10717c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
10727c478bd9Sstevel@tonic-gate }
10737c478bd9Sstevel@tonic-gate 
10747c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10757c478bd9Sstevel@tonic-gate int
10767c478bd9Sstevel@tonic-gate mp_cpu_unconfigure(int cpuid)
10777c478bd9Sstevel@tonic-gate {
10787c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
10797c478bd9Sstevel@tonic-gate }
10807c478bd9Sstevel@tonic-gate 
10817c478bd9Sstevel@tonic-gate /*
10827c478bd9Sstevel@tonic-gate  * Startup function for 'other' CPUs (besides boot cpu).
1083498697c5Sdmick  * Called from real_mode_start.
1084b4b46911Skchow  *
1085b4b46911Skchow  * WARNING: until CPU_READY is set, mp_startup and routines called by
1086b4b46911Skchow  * mp_startup should not call routines (e.g. kmem_free) that could call
1087b4b46911Skchow  * hat_unload which requires CPU_READY to be set.
10887c478bd9Sstevel@tonic-gate  */
10897c478bd9Sstevel@tonic-gate void
10907c478bd9Sstevel@tonic-gate mp_startup(void)
10917c478bd9Sstevel@tonic-gate {
10927c478bd9Sstevel@tonic-gate 	struct cpu *cp = CPU;
10937c478bd9Sstevel@tonic-gate 	uint_t new_x86_feature;
10947c478bd9Sstevel@tonic-gate 
109524a74e86Sdmick 	/*
109624a74e86Sdmick 	 * We need to get TSC on this proc synced (i.e., any delta
109724a74e86Sdmick 	 * from cpu0 accounted for) as soon as we can, because many
109824a74e86Sdmick 	 * many things use gethrtime/pc_gethrestime, including
109924a74e86Sdmick 	 * interrupts, cmn_err, etc.
110024a74e86Sdmick 	 */
110124a74e86Sdmick 
110224a74e86Sdmick 	/* Let cpu0 continue into tsc_sync_master() */
110324a74e86Sdmick 	CPUSET_ATOMIC_ADD(procset, cp->cpu_id);
110424a74e86Sdmick 
110524a74e86Sdmick 	if (tsc_gethrtime_enable)
110624a74e86Sdmick 		tsc_sync_slave();
110724a74e86Sdmick 
1108498697c5Sdmick 	/*
1109498697c5Sdmick 	 * Once this was done from assembly, but it's safer here; if
1110498697c5Sdmick 	 * it blocks, we need to be able to swtch() to and from, and
1111498697c5Sdmick 	 * since we get here by calling t_pc, we need to do that call
1112498697c5Sdmick 	 * before swtch() overwrites it.
1113498697c5Sdmick 	 */
1114498697c5Sdmick 
1115498697c5Sdmick 	(void) (*ap_mlsetup)();
1116498697c5Sdmick 
11177c478bd9Sstevel@tonic-gate 	new_x86_feature = cpuid_pass1(cp);
11187c478bd9Sstevel@tonic-gate 
11197c478bd9Sstevel@tonic-gate 	/*
11207c478bd9Sstevel@tonic-gate 	 * We need to Sync MTRR with cpu0's MTRR. We have to do
11217c478bd9Sstevel@tonic-gate 	 * this with interrupts disabled.
11227c478bd9Sstevel@tonic-gate 	 */
11237c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_MTRR)
11247c478bd9Sstevel@tonic-gate 		mtrr_sync();
11257c478bd9Sstevel@tonic-gate 
11267c478bd9Sstevel@tonic-gate 	/*
11277c478bd9Sstevel@tonic-gate 	 * Initialize this CPU's syscall handlers
11287c478bd9Sstevel@tonic-gate 	 */
11297c478bd9Sstevel@tonic-gate 	init_cpu_syscall(cp);
11307c478bd9Sstevel@tonic-gate 
11317c478bd9Sstevel@tonic-gate 	/*
11327c478bd9Sstevel@tonic-gate 	 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the
11337c478bd9Sstevel@tonic-gate 	 * highest level at which a routine is permitted to block on
11347c478bd9Sstevel@tonic-gate 	 * an adaptive mutex (allows for cpu poke interrupt in case
11357c478bd9Sstevel@tonic-gate 	 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks
11367c478bd9Sstevel@tonic-gate 	 * device interrupts that may end up in the hat layer issuing cross
11377c478bd9Sstevel@tonic-gate 	 * calls before CPU_READY is set.
11387c478bd9Sstevel@tonic-gate 	 */
11397c478bd9Sstevel@tonic-gate 	(void) splx(ipltospl(LOCK_LEVEL));
11407c478bd9Sstevel@tonic-gate 
11417c478bd9Sstevel@tonic-gate 	/*
11427c478bd9Sstevel@tonic-gate 	 * Do a sanity check to make sure this new CPU is a sane thing
11437c478bd9Sstevel@tonic-gate 	 * to add to the collection of processors running this system.
11447c478bd9Sstevel@tonic-gate 	 *
11457c478bd9Sstevel@tonic-gate 	 * XXX	Clearly this needs to get more sophisticated, if x86
11467c478bd9Sstevel@tonic-gate 	 * systems start to get built out of heterogenous CPUs; as is
11477c478bd9Sstevel@tonic-gate 	 * likely to happen once the number of processors in a configuration
11487c478bd9Sstevel@tonic-gate 	 * gets large enough.
11497c478bd9Sstevel@tonic-gate 	 */
11507c478bd9Sstevel@tonic-gate 	if ((x86_feature & new_x86_feature) != x86_feature) {
11517c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "?cpu%d: %b\n",
11527c478bd9Sstevel@tonic-gate 		    cp->cpu_id, new_x86_feature, FMT_X86_FEATURE);
11537c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
11547c478bd9Sstevel@tonic-gate 	}
11557c478bd9Sstevel@tonic-gate 
11567c478bd9Sstevel@tonic-gate 	/*
11577c478bd9Sstevel@tonic-gate 	 * We could be more sophisticated here, and just mark the CPU
11587c478bd9Sstevel@tonic-gate 	 * as "faulted" but at this point we'll opt for the easier
11597c478bd9Sstevel@tonic-gate 	 * answer of dieing horribly.  Provided the boot cpu is ok,
11607c478bd9Sstevel@tonic-gate 	 * the system can be recovered by booting with use_mp set to zero.
11617c478bd9Sstevel@tonic-gate 	 */
11627c478bd9Sstevel@tonic-gate 	if (workaround_errata(cp) != 0)
11637c478bd9Sstevel@tonic-gate 		panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
11647c478bd9Sstevel@tonic-gate 
11657c478bd9Sstevel@tonic-gate 	cpuid_pass2(cp);
11667c478bd9Sstevel@tonic-gate 	cpuid_pass3(cp);
11677c478bd9Sstevel@tonic-gate 	(void) cpuid_pass4(cp);
11687c478bd9Sstevel@tonic-gate 
11697c478bd9Sstevel@tonic-gate 	init_cpu_info(cp);
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
11727c478bd9Sstevel@tonic-gate 	/*
1173*fb2f18f8Sesaxe 	 * Processor group initialization for this CPU is dependent on the
1174*fb2f18f8Sesaxe 	 * cpuid probing, which must be done in the context of the current
1175*fb2f18f8Sesaxe 	 * CPU.
11767c478bd9Sstevel@tonic-gate 	 */
1177*fb2f18f8Sesaxe 	pghw_physid_create(cp);
1178*fb2f18f8Sesaxe 	pg_cpu_init(cp);
1179*fb2f18f8Sesaxe 	pg_cmt_cpu_startup(cp);
11807c478bd9Sstevel@tonic-gate 
11817c478bd9Sstevel@tonic-gate 	cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_ENABLE | CPU_EXISTS;
11827c478bd9Sstevel@tonic-gate 	cpu_add_active(cp);
11835205ae23Snf 
11845205ae23Snf 	if (dtrace_cpu_init != NULL) {
11855205ae23Snf 		(*dtrace_cpu_init)(cp->cpu_id);
11865205ae23Snf 	}
11875205ae23Snf 
11887c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
11897c478bd9Sstevel@tonic-gate 
1190aa7b6435Ssethg 	/*
1191aa7b6435Ssethg 	 * Enable preemption here so that contention for any locks acquired
1192aa7b6435Ssethg 	 * later in mp_startup may be preempted if the thread owning those
1193aa7b6435Ssethg 	 * locks is continously executing on other CPUs (for example, this
1194aa7b6435Ssethg 	 * CPU must be preemptible to allow other CPUs to pause it during their
1195aa7b6435Ssethg 	 * startup phases).  It's safe to enable preemption here because the
1196aa7b6435Ssethg 	 * CPU state is pretty-much fully constructed.
1197aa7b6435Ssethg 	 */
1198aa7b6435Ssethg 	curthread->t_preempt = 0;
1199aa7b6435Ssethg 
1200b4b46911Skchow 	add_cpunode2devtree(cp->cpu_id, cp->cpu_m.mcpu_cpi);
1201b4b46911Skchow 
1202da43ceabSsethg 	/* The base spl should still be at LOCK LEVEL here */
1203da43ceabSsethg 	ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1204da43ceabSsethg 	set_base_spl();		/* Restore the spl to its proper value */
1205da43ceabSsethg 
12067c478bd9Sstevel@tonic-gate 	(void) spl0();				/* enable interrupts */
12077c478bd9Sstevel@tonic-gate 
12087aec1d6eScindi 	/*
12097aec1d6eScindi 	 * Set up the CPU module for this CPU.  This can't be done before
12107aec1d6eScindi 	 * this CPU is made CPU_READY, because we may (in heterogeneous systems)
12117aec1d6eScindi 	 * need to go load another CPU module.  The act of attempting to load
12127aec1d6eScindi 	 * a module may trigger a cross-call, which will ASSERT unless this
12137aec1d6eScindi 	 * cpu is CPU_READY.
12147aec1d6eScindi 	 */
12157aec1d6eScindi 	cmi_init();
12167aec1d6eScindi 
12177aec1d6eScindi 	if (x86_feature & X86_MCA)
12187aec1d6eScindi 		cmi_mca_init();
12197aec1d6eScindi 
12207c478bd9Sstevel@tonic-gate 	if (boothowto & RB_DEBUG)
12217c478bd9Sstevel@tonic-gate 		kdi_dvec_cpu_init(cp);
12227c478bd9Sstevel@tonic-gate 
12237c478bd9Sstevel@tonic-gate 	/*
12247c478bd9Sstevel@tonic-gate 	 * Setting the bit in cpu_ready_set must be the last operation in
12257c478bd9Sstevel@tonic-gate 	 * processor initialization; the boot CPU will continue to boot once
12267c478bd9Sstevel@tonic-gate 	 * it sees this bit set for all active CPUs.
12277c478bd9Sstevel@tonic-gate 	 */
12287c478bd9Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
12297c478bd9Sstevel@tonic-gate 
12307c478bd9Sstevel@tonic-gate 	/*
12317c478bd9Sstevel@tonic-gate 	 * Because mp_startup() gets fired off after init() starts, we
12327c478bd9Sstevel@tonic-gate 	 * can't use the '?' trick to do 'boot -v' printing - so we
12337c478bd9Sstevel@tonic-gate 	 * always direct the 'cpu .. online' messages to the log.
12347c478bd9Sstevel@tonic-gate 	 */
12357c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
12367c478bd9Sstevel@tonic-gate 	    cp->cpu_id);
12377c478bd9Sstevel@tonic-gate 
12387c478bd9Sstevel@tonic-gate 	/*
12397c478bd9Sstevel@tonic-gate 	 * Now we are done with the startup thread, so free it up.
12407c478bd9Sstevel@tonic-gate 	 */
12417c478bd9Sstevel@tonic-gate 	thread_exit();
12427c478bd9Sstevel@tonic-gate 	panic("mp_startup: cannot return");
12437c478bd9Sstevel@tonic-gate 	/*NOTREACHED*/
12447c478bd9Sstevel@tonic-gate }
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate 
12477c478bd9Sstevel@tonic-gate /*
12487c478bd9Sstevel@tonic-gate  * Start CPU on user request.
12497c478bd9Sstevel@tonic-gate  */
12507c478bd9Sstevel@tonic-gate /* ARGSUSED */
12517c478bd9Sstevel@tonic-gate int
12527c478bd9Sstevel@tonic-gate mp_cpu_start(struct cpu *cp)
12537c478bd9Sstevel@tonic-gate {
12547c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12557c478bd9Sstevel@tonic-gate 	return (0);
12567c478bd9Sstevel@tonic-gate }
12577c478bd9Sstevel@tonic-gate 
12587c478bd9Sstevel@tonic-gate /*
12597c478bd9Sstevel@tonic-gate  * Stop CPU on user request.
12607c478bd9Sstevel@tonic-gate  */
12617c478bd9Sstevel@tonic-gate /* ARGSUSED */
12627c478bd9Sstevel@tonic-gate int
12637c478bd9Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp)
12647c478bd9Sstevel@tonic-gate {
1265d90554ebSdmick 	extern int cbe_psm_timer_mode;
12667c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1267d90554ebSdmick 
1268d90554ebSdmick 	/*
1269d90554ebSdmick 	 * If TIMER_PERIODIC mode is used, CPU0 is the one running it;
1270d90554ebSdmick 	 * can't stop it.  (This is true only for machines with no TSC.)
1271d90554ebSdmick 	 */
1272d90554ebSdmick 
1273d90554ebSdmick 	if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1274d90554ebSdmick 		return (1);
12757c478bd9Sstevel@tonic-gate 
12767c478bd9Sstevel@tonic-gate 	return (0);
12777c478bd9Sstevel@tonic-gate }
12787c478bd9Sstevel@tonic-gate 
12797c478bd9Sstevel@tonic-gate /*
12807c478bd9Sstevel@tonic-gate  * Power on CPU.
12817c478bd9Sstevel@tonic-gate  */
12827c478bd9Sstevel@tonic-gate /* ARGSUSED */
12837c478bd9Sstevel@tonic-gate int
12847c478bd9Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp)
12857c478bd9Sstevel@tonic-gate {
12867c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12877c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
12887c478bd9Sstevel@tonic-gate }
12897c478bd9Sstevel@tonic-gate 
12907c478bd9Sstevel@tonic-gate /*
12917c478bd9Sstevel@tonic-gate  * Power off CPU.
12927c478bd9Sstevel@tonic-gate  */
12937c478bd9Sstevel@tonic-gate /* ARGSUSED */
12947c478bd9Sstevel@tonic-gate int
12957c478bd9Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp)
12967c478bd9Sstevel@tonic-gate {
12977c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12987c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
12997c478bd9Sstevel@tonic-gate }
13007c478bd9Sstevel@tonic-gate 
13017c478bd9Sstevel@tonic-gate 
13027c478bd9Sstevel@tonic-gate /*
13037c478bd9Sstevel@tonic-gate  * Take the specified CPU out of participation in interrupts.
13047c478bd9Sstevel@tonic-gate  */
13057c478bd9Sstevel@tonic-gate int
13067c478bd9Sstevel@tonic-gate cpu_disable_intr(struct cpu *cp)
13077c478bd9Sstevel@tonic-gate {
13087c478bd9Sstevel@tonic-gate 	if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
13097c478bd9Sstevel@tonic-gate 		return (EBUSY);
13107c478bd9Sstevel@tonic-gate 
13117c478bd9Sstevel@tonic-gate 	cp->cpu_flags &= ~CPU_ENABLE;
13127c478bd9Sstevel@tonic-gate 	return (0);
13137c478bd9Sstevel@tonic-gate }
13147c478bd9Sstevel@tonic-gate 
13157c478bd9Sstevel@tonic-gate /*
13167c478bd9Sstevel@tonic-gate  * Allow the specified CPU to participate in interrupts.
13177c478bd9Sstevel@tonic-gate  */
13187c478bd9Sstevel@tonic-gate void
13197c478bd9Sstevel@tonic-gate cpu_enable_intr(struct cpu *cp)
13207c478bd9Sstevel@tonic-gate {
13217c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
13227c478bd9Sstevel@tonic-gate 	cp->cpu_flags |= CPU_ENABLE;
13237c478bd9Sstevel@tonic-gate 	psm_enable_intr(cp->cpu_id);
13247c478bd9Sstevel@tonic-gate }
13257c478bd9Sstevel@tonic-gate 
13267c478bd9Sstevel@tonic-gate 
13277c478bd9Sstevel@tonic-gate 
13287c478bd9Sstevel@tonic-gate static ushort_t *
13297c478bd9Sstevel@tonic-gate mp_map_warm_reset_vector()
13307c478bd9Sstevel@tonic-gate {
13317c478bd9Sstevel@tonic-gate 	ushort_t *warm_reset_vector;
13327c478bd9Sstevel@tonic-gate 
13337c478bd9Sstevel@tonic-gate 	if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
13347c478bd9Sstevel@tonic-gate 	    sizeof (ushort_t *), PROT_READ|PROT_WRITE)))
13357c478bd9Sstevel@tonic-gate 		return (NULL);
13367c478bd9Sstevel@tonic-gate 
13377c478bd9Sstevel@tonic-gate 	/*
13387c478bd9Sstevel@tonic-gate 	 * setup secondary cpu bios boot up vector
13397c478bd9Sstevel@tonic-gate 	 */
13407c478bd9Sstevel@tonic-gate 	*warm_reset_vector = (ushort_t)((caddr_t)
13417c478bd9Sstevel@tonic-gate 		((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va
13427c478bd9Sstevel@tonic-gate 		+ ((ulong_t)rm_platter_va & 0xf));
13437c478bd9Sstevel@tonic-gate 	warm_reset_vector++;
13447c478bd9Sstevel@tonic-gate 	*warm_reset_vector = (ushort_t)(rm_platter_pa >> 4);
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate 	--warm_reset_vector;
13477c478bd9Sstevel@tonic-gate 	return (warm_reset_vector);
13487c478bd9Sstevel@tonic-gate }
13497c478bd9Sstevel@tonic-gate 
13507c478bd9Sstevel@tonic-gate static void
13517c478bd9Sstevel@tonic-gate mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector)
13527c478bd9Sstevel@tonic-gate {
13537c478bd9Sstevel@tonic-gate 	psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *));
13547c478bd9Sstevel@tonic-gate }
13557c478bd9Sstevel@tonic-gate 
13567c478bd9Sstevel@tonic-gate void
13577c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp)
13587aec1d6eScindi {
13597aec1d6eScindi 	cmi_faulted_enter(cp);
13607aec1d6eScindi }
13617c478bd9Sstevel@tonic-gate 
13627c478bd9Sstevel@tonic-gate void
13637c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp)
13647aec1d6eScindi {
13657aec1d6eScindi 	cmi_faulted_exit(cp);
13667aec1d6eScindi }
13677c478bd9Sstevel@tonic-gate 
13687c478bd9Sstevel@tonic-gate /*
13697c478bd9Sstevel@tonic-gate  * The following two routines are used as context operators on threads belonging
13707c478bd9Sstevel@tonic-gate  * to processes with a private LDT (see sysi86).  Due to the rarity of such
13717c478bd9Sstevel@tonic-gate  * processes, these routines are currently written for best code readability and
13727c478bd9Sstevel@tonic-gate  * organization rather than speed.  We could avoid checking x86_feature at every
13737c478bd9Sstevel@tonic-gate  * context switch by installing different context ops, depending on the
13747c478bd9Sstevel@tonic-gate  * x86_feature flags, at LDT creation time -- one for each combination of fast
13757c478bd9Sstevel@tonic-gate  * syscall feature flags.
13767c478bd9Sstevel@tonic-gate  */
13777c478bd9Sstevel@tonic-gate 
13787c478bd9Sstevel@tonic-gate /*ARGSUSED*/
13797c478bd9Sstevel@tonic-gate void
13807c478bd9Sstevel@tonic-gate cpu_fast_syscall_disable(void *arg)
13817c478bd9Sstevel@tonic-gate {
13827c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP)
13837c478bd9Sstevel@tonic-gate 		cpu_sep_disable();
13847c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC)
13857c478bd9Sstevel@tonic-gate 		cpu_asysc_disable();
13867c478bd9Sstevel@tonic-gate }
13877c478bd9Sstevel@tonic-gate 
13887c478bd9Sstevel@tonic-gate /*ARGSUSED*/
13897c478bd9Sstevel@tonic-gate void
13907c478bd9Sstevel@tonic-gate cpu_fast_syscall_enable(void *arg)
13917c478bd9Sstevel@tonic-gate {
13927c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP)
13937c478bd9Sstevel@tonic-gate 		cpu_sep_enable();
13947c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC)
13957c478bd9Sstevel@tonic-gate 		cpu_asysc_enable();
13967c478bd9Sstevel@tonic-gate }
13977c478bd9Sstevel@tonic-gate 
13987c478bd9Sstevel@tonic-gate static void
13997c478bd9Sstevel@tonic-gate cpu_sep_enable(void)
14007c478bd9Sstevel@tonic-gate {
14017c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_SEP);
14027c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
14037c478bd9Sstevel@tonic-gate 
14040ac7d7d8Skucharsk 	wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
14057c478bd9Sstevel@tonic-gate }
14067c478bd9Sstevel@tonic-gate 
14077c478bd9Sstevel@tonic-gate static void
14087c478bd9Sstevel@tonic-gate cpu_sep_disable(void)
14097c478bd9Sstevel@tonic-gate {
14107c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_SEP);
14117c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
14127c478bd9Sstevel@tonic-gate 
14137c478bd9Sstevel@tonic-gate 	/*
14147c478bd9Sstevel@tonic-gate 	 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
14157c478bd9Sstevel@tonic-gate 	 * the sysenter or sysexit instruction to trigger a #gp fault.
14167c478bd9Sstevel@tonic-gate 	 */
14170ac7d7d8Skucharsk 	wrmsr(MSR_INTC_SEP_CS, 0ULL);
14187c478bd9Sstevel@tonic-gate }
14197c478bd9Sstevel@tonic-gate 
14207c478bd9Sstevel@tonic-gate static void
14217c478bd9Sstevel@tonic-gate cpu_asysc_enable(void)
14227c478bd9Sstevel@tonic-gate {
14237c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_ASYSC);
14247c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
14257c478bd9Sstevel@tonic-gate 
14260ac7d7d8Skucharsk 	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
14270ac7d7d8Skucharsk 	    (uint64_t)(uintptr_t)AMD_EFER_SCE);
14287c478bd9Sstevel@tonic-gate }
14297c478bd9Sstevel@tonic-gate 
14307c478bd9Sstevel@tonic-gate static void
14317c478bd9Sstevel@tonic-gate cpu_asysc_disable(void)
14327c478bd9Sstevel@tonic-gate {
14337c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_ASYSC);
14347c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
14357c478bd9Sstevel@tonic-gate 
14367c478bd9Sstevel@tonic-gate 	/*
14377c478bd9Sstevel@tonic-gate 	 * Turn off the SCE (syscall enable) bit in the EFER register. Software
14387c478bd9Sstevel@tonic-gate 	 * executing syscall or sysret with this bit off will incur a #ud trap.
14397c478bd9Sstevel@tonic-gate 	 */
14400ac7d7d8Skucharsk 	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
14410ac7d7d8Skucharsk 	    ~((uint64_t)(uintptr_t)AMD_EFER_SCE));
14427c478bd9Sstevel@tonic-gate }
1443