xref: /illumos-gate/usr/src/uts/i86pc/os/mp_startup.c (revision 41791439)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5100b72f4Sandrei  * Common Development and Distribution License (the "License").
6100b72f4Sandrei  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22b4b46911Skchow  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate #include <sys/types.h>
297c478bd9Sstevel@tonic-gate #include <sys/thread.h>
307c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
317c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
327c478bd9Sstevel@tonic-gate #include <sys/param.h>
337c478bd9Sstevel@tonic-gate #include <sys/proc.h>
347c478bd9Sstevel@tonic-gate #include <sys/disp.h>
357c478bd9Sstevel@tonic-gate #include <sys/mmu.h>
367c478bd9Sstevel@tonic-gate #include <sys/class.h>
377c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
387c478bd9Sstevel@tonic-gate #include <sys/debug.h>
397c478bd9Sstevel@tonic-gate #include <sys/asm_linkage.h>
407c478bd9Sstevel@tonic-gate #include <sys/x_call.h>
417c478bd9Sstevel@tonic-gate #include <sys/systm.h>
427c478bd9Sstevel@tonic-gate #include <sys/var.h>
437c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
447c478bd9Sstevel@tonic-gate #include <vm/hat.h>
457c478bd9Sstevel@tonic-gate #include <sys/mmu.h>
467c478bd9Sstevel@tonic-gate #include <vm/as.h>
477c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
487c478bd9Sstevel@tonic-gate #include <sys/segments.h>
497c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
507c478bd9Sstevel@tonic-gate #include <sys/stack.h>
517c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h>
527c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h>
537c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
547c478bd9Sstevel@tonic-gate #include <sys/traptrace.h>
557c478bd9Sstevel@tonic-gate #include <sys/clock.h>
567c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h>
577c478bd9Sstevel@tonic-gate #include <sys/chip.h>
587c478bd9Sstevel@tonic-gate #include <sys/dtrace.h>
597c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
607c478bd9Sstevel@tonic-gate #include <sys/fp.h>
617c478bd9Sstevel@tonic-gate #include <sys/reboot.h>
627c478bd9Sstevel@tonic-gate #include <sys/kdi.h>
637c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h>
647c478bd9Sstevel@tonic-gate #include <sys/memnode.h>
65ef50d8c0Sesaxe #include <sys/pci_cfgspace.h>
667aec1d6eScindi #include <sys/cpu_module.h>
677c478bd9Sstevel@tonic-gate 
687c478bd9Sstevel@tonic-gate struct cpu	cpus[1];			/* CPU data */
697c478bd9Sstevel@tonic-gate struct cpu	*cpu[NCPU] = {&cpus[0]};	/* pointers to all CPUs */
707c478bd9Sstevel@tonic-gate cpu_core_t	cpu_core[NCPU];			/* cpu_core structures */
717c478bd9Sstevel@tonic-gate 
727c478bd9Sstevel@tonic-gate /*
737c478bd9Sstevel@tonic-gate  * Useful for disabling MP bring-up for an MP capable kernel
747c478bd9Sstevel@tonic-gate  * (a kernel that was built with MP defined)
757c478bd9Sstevel@tonic-gate  */
767c478bd9Sstevel@tonic-gate int use_mp = 1;
777c478bd9Sstevel@tonic-gate 
78*41791439Sandrei /*
79*41791439Sandrei  * To be set by a PSM to indicate what CPUs are available on the system.
80*41791439Sandrei  */
81*41791439Sandrei cpuset_t mp_cpus = 1;
827c478bd9Sstevel@tonic-gate 
837c478bd9Sstevel@tonic-gate /*
847c478bd9Sstevel@tonic-gate  * This variable is used by the hat layer to decide whether or not
857c478bd9Sstevel@tonic-gate  * critical sections are needed to prevent race conditions.  For sun4m,
867c478bd9Sstevel@tonic-gate  * this variable is set once enough MP initialization has been done in
877c478bd9Sstevel@tonic-gate  * order to allow cross calls.
887c478bd9Sstevel@tonic-gate  */
897c478bd9Sstevel@tonic-gate int flushes_require_xcalls = 0;
90*41791439Sandrei cpuset_t	cpu_ready_set = 1;
917c478bd9Sstevel@tonic-gate 
927c478bd9Sstevel@tonic-gate extern	void	real_mode_start(void);
937c478bd9Sstevel@tonic-gate extern	void	real_mode_end(void);
947c478bd9Sstevel@tonic-gate static 	void	mp_startup(void);
957c478bd9Sstevel@tonic-gate 
967c478bd9Sstevel@tonic-gate static void cpu_sep_enable(void);
977c478bd9Sstevel@tonic-gate static void cpu_sep_disable(void);
987c478bd9Sstevel@tonic-gate static void cpu_asysc_enable(void);
997c478bd9Sstevel@tonic-gate static void cpu_asysc_disable(void);
1007c478bd9Sstevel@tonic-gate 
1017c478bd9Sstevel@tonic-gate extern int tsc_gethrtime_enable;
1027c478bd9Sstevel@tonic-gate 
1037c478bd9Sstevel@tonic-gate /*
1047c478bd9Sstevel@tonic-gate  * Init CPU info - get CPU type info for processor_info system call.
1057c478bd9Sstevel@tonic-gate  */
1067c478bd9Sstevel@tonic-gate void
1077c478bd9Sstevel@tonic-gate init_cpu_info(struct cpu *cp)
1087c478bd9Sstevel@tonic-gate {
1097c478bd9Sstevel@tonic-gate 	processor_info_t *pi = &cp->cpu_type_info;
1107c478bd9Sstevel@tonic-gate 	char buf[CPU_IDSTRLEN];
1117c478bd9Sstevel@tonic-gate 
1127c478bd9Sstevel@tonic-gate 	/*
1137c478bd9Sstevel@tonic-gate 	 * Get clock-frequency property for the CPU.
1147c478bd9Sstevel@tonic-gate 	 */
1157c478bd9Sstevel@tonic-gate 	pi->pi_clock = cpu_freq;
1167c478bd9Sstevel@tonic-gate 
1177c478bd9Sstevel@tonic-gate 	(void) strcpy(pi->pi_processor_type, "i386");
1187c478bd9Sstevel@tonic-gate 	if (fpu_exists)
1197c478bd9Sstevel@tonic-gate 		(void) strcpy(pi->pi_fputypes, "i387 compatible");
1207c478bd9Sstevel@tonic-gate 
1217c478bd9Sstevel@tonic-gate 	(void) cpuid_getidstr(cp, buf, sizeof (buf));
1227c478bd9Sstevel@tonic-gate 
1237c478bd9Sstevel@tonic-gate 	cp->cpu_idstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP);
1247c478bd9Sstevel@tonic-gate 	(void) strcpy(cp->cpu_idstr, buf);
1257c478bd9Sstevel@tonic-gate 
1267c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr);
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate 	(void) cpuid_getbrandstr(cp, buf, sizeof (buf));
1297c478bd9Sstevel@tonic-gate 	cp->cpu_brandstr = kmem_alloc(strlen(buf) + 1, KM_SLEEP);
1307c478bd9Sstevel@tonic-gate 	(void) strcpy(cp->cpu_brandstr, buf);
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr);
1337c478bd9Sstevel@tonic-gate }
1347c478bd9Sstevel@tonic-gate 
1357c478bd9Sstevel@tonic-gate /*
1367c478bd9Sstevel@tonic-gate  * Configure syscall support on this CPU.
1377c478bd9Sstevel@tonic-gate  */
1387c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1397c478bd9Sstevel@tonic-gate static void
1407c478bd9Sstevel@tonic-gate init_cpu_syscall(struct cpu *cp)
1417c478bd9Sstevel@tonic-gate {
1427c478bd9Sstevel@tonic-gate 	kpreempt_disable();
1437c478bd9Sstevel@tonic-gate 
1447c478bd9Sstevel@tonic-gate #if defined(__amd64)
1457c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC) {
1467c478bd9Sstevel@tonic-gate 
1477c478bd9Sstevel@tonic-gate #if !defined(__lint)
1487c478bd9Sstevel@tonic-gate 		/*
1497c478bd9Sstevel@tonic-gate 		 * The syscall instruction imposes a certain ordering on
1507c478bd9Sstevel@tonic-gate 		 * segment selectors, so we double-check that ordering
1517c478bd9Sstevel@tonic-gate 		 * here.
1527c478bd9Sstevel@tonic-gate 		 */
1537c478bd9Sstevel@tonic-gate 		ASSERT(KDS_SEL == KCS_SEL + 8);
1547c478bd9Sstevel@tonic-gate 		ASSERT(UDS_SEL == U32CS_SEL + 8);
1557c478bd9Sstevel@tonic-gate 		ASSERT(UCS_SEL == U32CS_SEL + 16);
1567c478bd9Sstevel@tonic-gate #endif
1577c478bd9Sstevel@tonic-gate 		/*
1587c478bd9Sstevel@tonic-gate 		 * Turn syscall/sysret extensions on.
1597c478bd9Sstevel@tonic-gate 		 */
1607c478bd9Sstevel@tonic-gate 		cpu_asysc_enable();
1617c478bd9Sstevel@tonic-gate 
1627c478bd9Sstevel@tonic-gate 		/*
1637c478bd9Sstevel@tonic-gate 		 * Program the magic registers ..
1647c478bd9Sstevel@tonic-gate 		 */
1650ac7d7d8Skucharsk 		wrmsr(MSR_AMD_STAR, ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) <<
1660ac7d7d8Skucharsk 		    32);
1670ac7d7d8Skucharsk 		wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
1680ac7d7d8Skucharsk 		wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);
1697c478bd9Sstevel@tonic-gate 
1707c478bd9Sstevel@tonic-gate 		/*
1717c478bd9Sstevel@tonic-gate 		 * This list of flags is masked off the incoming
1727c478bd9Sstevel@tonic-gate 		 * %rfl when we enter the kernel.
1737c478bd9Sstevel@tonic-gate 		 */
1740ac7d7d8Skucharsk 		wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T));
1757c478bd9Sstevel@tonic-gate 	}
1767c478bd9Sstevel@tonic-gate #endif
1777c478bd9Sstevel@tonic-gate 
1787c478bd9Sstevel@tonic-gate 	/*
1797c478bd9Sstevel@tonic-gate 	 * On 32-bit kernels, we use sysenter/sysexit because it's too
1807c478bd9Sstevel@tonic-gate 	 * hard to use syscall/sysret, and it is more portable anyway.
1817c478bd9Sstevel@tonic-gate 	 *
1827c478bd9Sstevel@tonic-gate 	 * On 64-bit kernels on Nocona machines, the 32-bit syscall
1837c478bd9Sstevel@tonic-gate 	 * variant isn't available to 32-bit applications, but sysenter is.
1847c478bd9Sstevel@tonic-gate 	 */
1857c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP) {
1867c478bd9Sstevel@tonic-gate 
1877c478bd9Sstevel@tonic-gate #if !defined(__lint)
1887c478bd9Sstevel@tonic-gate 		/*
1897c478bd9Sstevel@tonic-gate 		 * The sysenter instruction imposes a certain ordering on
1907c478bd9Sstevel@tonic-gate 		 * segment selectors, so we double-check that ordering
1917c478bd9Sstevel@tonic-gate 		 * here. See "sysenter" in Intel document 245471-012, "IA-32
1927c478bd9Sstevel@tonic-gate 		 * Intel Architecture Software Developer's Manual Volume 2:
1937c478bd9Sstevel@tonic-gate 		 * Instruction Set Reference"
1947c478bd9Sstevel@tonic-gate 		 */
1957c478bd9Sstevel@tonic-gate 		ASSERT(KDS_SEL == KCS_SEL + 8);
1967c478bd9Sstevel@tonic-gate 
1977c478bd9Sstevel@tonic-gate 		ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3));
1987c478bd9Sstevel@tonic-gate 		ASSERT32(UDS_SEL == UCS_SEL + 8);
1997c478bd9Sstevel@tonic-gate 
2007c478bd9Sstevel@tonic-gate 		ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3));
2017c478bd9Sstevel@tonic-gate 		ASSERT64(UDS_SEL == U32CS_SEL + 8);
2027c478bd9Sstevel@tonic-gate #endif
2037c478bd9Sstevel@tonic-gate 
2047c478bd9Sstevel@tonic-gate 		cpu_sep_enable();
2057c478bd9Sstevel@tonic-gate 
2067c478bd9Sstevel@tonic-gate 		/*
2077c478bd9Sstevel@tonic-gate 		 * resume() sets this value to the base of the threads stack
2087c478bd9Sstevel@tonic-gate 		 * via a context handler.
2097c478bd9Sstevel@tonic-gate 		 */
2100ac7d7d8Skucharsk 		wrmsr(MSR_INTC_SEP_ESP, 0ULL);
2110ac7d7d8Skucharsk 		wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);
2127c478bd9Sstevel@tonic-gate 	}
2137c478bd9Sstevel@tonic-gate 
2147c478bd9Sstevel@tonic-gate 	kpreempt_enable();
2157c478bd9Sstevel@tonic-gate }
2167c478bd9Sstevel@tonic-gate 
2177c478bd9Sstevel@tonic-gate /*
2187c478bd9Sstevel@tonic-gate  * Multiprocessor initialization.
2197c478bd9Sstevel@tonic-gate  *
2207c478bd9Sstevel@tonic-gate  * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the
2217c478bd9Sstevel@tonic-gate  * startup and idle threads for the specified CPU.
2227c478bd9Sstevel@tonic-gate  */
2237c478bd9Sstevel@tonic-gate static void
2247c478bd9Sstevel@tonic-gate mp_startup_init(int cpun)
2257c478bd9Sstevel@tonic-gate {
2267c478bd9Sstevel@tonic-gate #if defined(__amd64)
2277c478bd9Sstevel@tonic-gate extern void *long_mode_64(void);
2287c478bd9Sstevel@tonic-gate #endif	/* __amd64 */
2297c478bd9Sstevel@tonic-gate 
2307c478bd9Sstevel@tonic-gate 	struct cpu *cp;
2317c478bd9Sstevel@tonic-gate 	struct tss *ntss;
2327c478bd9Sstevel@tonic-gate 	kthread_id_t tp;
2337c478bd9Sstevel@tonic-gate 	caddr_t	sp;
2347c478bd9Sstevel@tonic-gate 	int size;
2357c478bd9Sstevel@tonic-gate 	proc_t *procp;
2367c478bd9Sstevel@tonic-gate 	extern void idle();
2377c478bd9Sstevel@tonic-gate 
2387c478bd9Sstevel@tonic-gate 	struct cpu_tables *tablesp;
2397c478bd9Sstevel@tonic-gate 	rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va;
2407c478bd9Sstevel@tonic-gate 
2417c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE
2427c478bd9Sstevel@tonic-gate 	trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun];
2437c478bd9Sstevel@tonic-gate #endif
2447c478bd9Sstevel@tonic-gate 
2457c478bd9Sstevel@tonic-gate 	ASSERT(cpun < NCPU && cpu[cpun] == NULL);
2467c478bd9Sstevel@tonic-gate 
2477c478bd9Sstevel@tonic-gate 	if ((cp = kmem_zalloc(sizeof (*cp), KM_NOSLEEP)) == NULL) {
2487c478bd9Sstevel@tonic-gate 		panic("mp_startup_init: cpu%d: "
2497c478bd9Sstevel@tonic-gate 		    "no memory for cpu structure", cpun);
2507c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
2517c478bd9Sstevel@tonic-gate 	}
2527c478bd9Sstevel@tonic-gate 	procp = curthread->t_procp;
2537c478bd9Sstevel@tonic-gate 
2547c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
2557c478bd9Sstevel@tonic-gate 	/*
2567c478bd9Sstevel@tonic-gate 	 * Initialize the dispatcher first.
2577c478bd9Sstevel@tonic-gate 	 */
2587c478bd9Sstevel@tonic-gate 	disp_cpu_init(cp);
2597c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
2607c478bd9Sstevel@tonic-gate 
261affbd3ccSkchow 	cpu_vm_data_init(cp);
262affbd3ccSkchow 
2637c478bd9Sstevel@tonic-gate 	/*
2647c478bd9Sstevel@tonic-gate 	 * Allocate and initialize the startup thread for this CPU.
2657c478bd9Sstevel@tonic-gate 	 * Interrupt and process switch stacks get allocated later
2667c478bd9Sstevel@tonic-gate 	 * when the CPU starts running.
2677c478bd9Sstevel@tonic-gate 	 */
2687c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, 0, NULL, NULL, 0, procp,
2697c478bd9Sstevel@tonic-gate 	    TS_STOPPED, maxclsyspri);
2707c478bd9Sstevel@tonic-gate 
2717c478bd9Sstevel@tonic-gate 	/*
2727c478bd9Sstevel@tonic-gate 	 * Set state to TS_ONPROC since this thread will start running
2737c478bd9Sstevel@tonic-gate 	 * as soon as the CPU comes online.
2747c478bd9Sstevel@tonic-gate 	 *
2757c478bd9Sstevel@tonic-gate 	 * All the other fields of the thread structure are setup by
2767c478bd9Sstevel@tonic-gate 	 * thread_create().
2777c478bd9Sstevel@tonic-gate 	 */
2787c478bd9Sstevel@tonic-gate 	THREAD_ONPROC(tp, cp);
2797c478bd9Sstevel@tonic-gate 	tp->t_preempt = 1;
2807c478bd9Sstevel@tonic-gate 	tp->t_bound_cpu = cp;
2817c478bd9Sstevel@tonic-gate 	tp->t_affinitycnt = 1;
2827c478bd9Sstevel@tonic-gate 	tp->t_cpu = cp;
2837c478bd9Sstevel@tonic-gate 	tp->t_disp_queue = cp->cpu_disp;
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate 	/*
2867c478bd9Sstevel@tonic-gate 	 * Setup thread to start in mp_startup.
2877c478bd9Sstevel@tonic-gate 	 */
2887c478bd9Sstevel@tonic-gate 	sp = tp->t_stk;
2897c478bd9Sstevel@tonic-gate 	tp->t_pc = (uintptr_t)mp_startup;
2907c478bd9Sstevel@tonic-gate 	tp->t_sp = (uintptr_t)(sp - MINFRAME);
2917c478bd9Sstevel@tonic-gate 
2927c478bd9Sstevel@tonic-gate 	cp->cpu_id = cpun;
2937c478bd9Sstevel@tonic-gate 	cp->cpu_self = cp;
2947c478bd9Sstevel@tonic-gate 	cp->cpu_thread = tp;
2957c478bd9Sstevel@tonic-gate 	cp->cpu_lwp = NULL;
2967c478bd9Sstevel@tonic-gate 	cp->cpu_dispthread = tp;
2977c478bd9Sstevel@tonic-gate 	cp->cpu_dispatch_pri = DISP_PRIO(tp);
2987c478bd9Sstevel@tonic-gate 
299da43ceabSsethg 	/*
300da43ceabSsethg 	 * cpu_base_spl must be set explicitly here to prevent any blocking
301da43ceabSsethg 	 * operations in mp_startup from causing the spl of the cpu to drop
302da43ceabSsethg 	 * to 0 (allowing device interrupts before we're ready) in resume().
303da43ceabSsethg 	 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY.
304da43ceabSsethg 	 * As an extra bit of security on DEBUG kernels, this is enforced with
305da43ceabSsethg 	 * an assertion in mp_startup() -- before cpu_base_spl is set to its
306da43ceabSsethg 	 * proper value.
307da43ceabSsethg 	 */
308da43ceabSsethg 	cp->cpu_base_spl = ipltospl(LOCK_LEVEL);
309da43ceabSsethg 
3107c478bd9Sstevel@tonic-gate 	/*
3117c478bd9Sstevel@tonic-gate 	 * Now, initialize per-CPU idle thread for this CPU.
3127c478bd9Sstevel@tonic-gate 	 */
3137c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1);
3147c478bd9Sstevel@tonic-gate 
3157c478bd9Sstevel@tonic-gate 	cp->cpu_idle_thread = tp;
3167c478bd9Sstevel@tonic-gate 
3177c478bd9Sstevel@tonic-gate 	tp->t_preempt = 1;
3187c478bd9Sstevel@tonic-gate 	tp->t_bound_cpu = cp;
3197c478bd9Sstevel@tonic-gate 	tp->t_affinitycnt = 1;
3207c478bd9Sstevel@tonic-gate 	tp->t_cpu = cp;
3217c478bd9Sstevel@tonic-gate 	tp->t_disp_queue = cp->cpu_disp;
3227c478bd9Sstevel@tonic-gate 
323394b433dSesaxe 	/*
324394b433dSesaxe 	 * Bootstrap the CPU for CMT aware scheduling
325394b433dSesaxe 	 * The rest of the initialization will happen from
326394b433dSesaxe 	 * mp_startup()
327394b433dSesaxe 	 */
328394b433dSesaxe 	chip_bootstrap_cpu(cp);
329394b433dSesaxe 
3307c478bd9Sstevel@tonic-gate 	/*
3317c478bd9Sstevel@tonic-gate 	 * Perform CPC intialization on the new CPU.
3327c478bd9Sstevel@tonic-gate 	 */
3337c478bd9Sstevel@tonic-gate 	kcpc_hw_init(cp);
3347c478bd9Sstevel@tonic-gate 
3357c478bd9Sstevel@tonic-gate 	/*
3367c478bd9Sstevel@tonic-gate 	 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2
3377c478bd9Sstevel@tonic-gate 	 * for each CPU.
3387c478bd9Sstevel@tonic-gate 	 */
3397c478bd9Sstevel@tonic-gate 
3407c478bd9Sstevel@tonic-gate 	setup_vaddr_for_ppcopy(cp);
3417c478bd9Sstevel@tonic-gate 
3427c478bd9Sstevel@tonic-gate 	/*
3437c478bd9Sstevel@tonic-gate 	 * Allocate space for page directory, stack, tss, gdt and idt.
3447c478bd9Sstevel@tonic-gate 	 * This assumes that kmem_alloc will return memory which is aligned
3457c478bd9Sstevel@tonic-gate 	 * to the next higher power of 2 or a page(if size > MAXABIG)
3467c478bd9Sstevel@tonic-gate 	 * If this assumption goes wrong at any time due to change in
3477c478bd9Sstevel@tonic-gate 	 * kmem alloc, things may not work as the page directory has to be
3487c478bd9Sstevel@tonic-gate 	 * page aligned
3497c478bd9Sstevel@tonic-gate 	 */
3507c478bd9Sstevel@tonic-gate 	if ((tablesp = kmem_zalloc(sizeof (*tablesp), KM_NOSLEEP)) == NULL)
3517c478bd9Sstevel@tonic-gate 		panic("mp_startup_init: cpu%d cannot allocate tables", cpun);
3527c478bd9Sstevel@tonic-gate 
3537c478bd9Sstevel@tonic-gate 	if ((uintptr_t)tablesp & ~MMU_STD_PAGEMASK) {
3547c478bd9Sstevel@tonic-gate 		kmem_free(tablesp, sizeof (struct cpu_tables));
3557c478bd9Sstevel@tonic-gate 		size = sizeof (struct cpu_tables) + MMU_STD_PAGESIZE;
3567c478bd9Sstevel@tonic-gate 		tablesp = kmem_zalloc(size, KM_NOSLEEP);
3577c478bd9Sstevel@tonic-gate 		tablesp = (struct cpu_tables *)
3587c478bd9Sstevel@tonic-gate 		    (((uintptr_t)tablesp + MMU_STD_PAGESIZE) &
3597c478bd9Sstevel@tonic-gate 		    MMU_STD_PAGEMASK);
3607c478bd9Sstevel@tonic-gate 	}
3617c478bd9Sstevel@tonic-gate 
3627c478bd9Sstevel@tonic-gate 	ntss = cp->cpu_tss = &tablesp->ct_tss;
3635f9a4ecdSrab 
3645f9a4ecdSrab 	if ((tablesp->ct_gdt = kmem_zalloc(PAGESIZE, KM_NOSLEEP)) == NULL)
3655f9a4ecdSrab 		panic("mp_startup_init: cpu%d cannot allocate GDT", cpun);
3667c478bd9Sstevel@tonic-gate 	cp->cpu_gdt = tablesp->ct_gdt;
3677c478bd9Sstevel@tonic-gate 	bcopy(CPU->cpu_gdt, cp->cpu_gdt, NGDT * (sizeof (user_desc_t)));
3687c478bd9Sstevel@tonic-gate 
3697c478bd9Sstevel@tonic-gate #if defined(__amd64)
3707c478bd9Sstevel@tonic-gate 
3717c478bd9Sstevel@tonic-gate 	/*
3727c478bd9Sstevel@tonic-gate 	 * #DF (double fault).
3737c478bd9Sstevel@tonic-gate 	 */
3747c478bd9Sstevel@tonic-gate 	ntss->tss_ist1 =
3757c478bd9Sstevel@tonic-gate 	    (uint64_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)];
3767c478bd9Sstevel@tonic-gate 
3777c478bd9Sstevel@tonic-gate #elif defined(__i386)
3787c478bd9Sstevel@tonic-gate 
3797c478bd9Sstevel@tonic-gate 	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
3807c478bd9Sstevel@tonic-gate 	    (uint32_t)&tablesp->ct_stack[sizeof (tablesp->ct_stack)];
3817c478bd9Sstevel@tonic-gate 
3827c478bd9Sstevel@tonic-gate 	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;
3837c478bd9Sstevel@tonic-gate 
3847c478bd9Sstevel@tonic-gate 	ntss->tss_eip = (uint32_t)mp_startup;
3857c478bd9Sstevel@tonic-gate 
3867c478bd9Sstevel@tonic-gate 	ntss->tss_cs = KCS_SEL;
3877c478bd9Sstevel@tonic-gate 	ntss->tss_fs = KFS_SEL;
3887c478bd9Sstevel@tonic-gate 	ntss->tss_gs = KGS_SEL;
3897c478bd9Sstevel@tonic-gate 
3907c478bd9Sstevel@tonic-gate 	/*
3917c478bd9Sstevel@tonic-gate 	 * setup kernel %gs.
3927c478bd9Sstevel@tonic-gate 	 */
3937c478bd9Sstevel@tonic-gate 	set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
3947c478bd9Sstevel@tonic-gate 	    SEL_KPL, 0, 1);
3957c478bd9Sstevel@tonic-gate 
3967c478bd9Sstevel@tonic-gate #endif	/* __i386 */
3977c478bd9Sstevel@tonic-gate 
3987c478bd9Sstevel@tonic-gate 	/*
3997c478bd9Sstevel@tonic-gate 	 * Set I/O bit map offset equal to size of TSS segment limit
4007c478bd9Sstevel@tonic-gate 	 * for no I/O permission map. This will cause all user I/O
4017c478bd9Sstevel@tonic-gate 	 * instructions to generate #gp fault.
4027c478bd9Sstevel@tonic-gate 	 */
4037c478bd9Sstevel@tonic-gate 	ntss->tss_bitmapbase = sizeof (*ntss);
4047c478bd9Sstevel@tonic-gate 
4057c478bd9Sstevel@tonic-gate 	/*
4067c478bd9Sstevel@tonic-gate 	 * setup kernel tss.
4077c478bd9Sstevel@tonic-gate 	 */
4087c478bd9Sstevel@tonic-gate 	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
4097c478bd9Sstevel@tonic-gate 	    sizeof (*cp->cpu_tss) -1, SDT_SYSTSS, SEL_KPL);
4107c478bd9Sstevel@tonic-gate 
4117c478bd9Sstevel@tonic-gate 	/*
4127c478bd9Sstevel@tonic-gate 	 * If we have more than one node, each cpu gets a copy of IDT
4137c478bd9Sstevel@tonic-gate 	 * local to its node. If this is a Pentium box, we use cpu 0's
4147c478bd9Sstevel@tonic-gate 	 * IDT. cpu 0's IDT has been made read-only to workaround the
4157c478bd9Sstevel@tonic-gate 	 * cmpxchgl register bug
4167c478bd9Sstevel@tonic-gate 	 */
4177c478bd9Sstevel@tonic-gate 	cp->cpu_idt = CPU->cpu_idt;
4187c478bd9Sstevel@tonic-gate 	if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) {
4197c478bd9Sstevel@tonic-gate 		cp->cpu_idt = kmem_alloc(sizeof (idt0), KM_SLEEP);
4207c478bd9Sstevel@tonic-gate 		bcopy(idt0, cp->cpu_idt, sizeof (idt0));
4217c478bd9Sstevel@tonic-gate 	}
4227c478bd9Sstevel@tonic-gate 
4237c478bd9Sstevel@tonic-gate 	/*
4247c478bd9Sstevel@tonic-gate 	 * Get interrupt priority data from cpu 0
4257c478bd9Sstevel@tonic-gate 	 */
4267c478bd9Sstevel@tonic-gate 	cp->cpu_pri_data = CPU->cpu_pri_data;
4277c478bd9Sstevel@tonic-gate 
4287c478bd9Sstevel@tonic-gate 	hat_cpu_online(cp);
4297c478bd9Sstevel@tonic-gate 
4307c478bd9Sstevel@tonic-gate 	/* Should remove all entries for the current process/thread here */
4317c478bd9Sstevel@tonic-gate 
4327c478bd9Sstevel@tonic-gate 	/*
4337c478bd9Sstevel@tonic-gate 	 * Fill up the real mode platter to make it easy for real mode code to
4347c478bd9Sstevel@tonic-gate 	 * kick it off. This area should really be one passed by boot to kernel
4357c478bd9Sstevel@tonic-gate 	 * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
4367c478bd9Sstevel@tonic-gate 	 * have identical physical and virtual address in paged mode.
4377c478bd9Sstevel@tonic-gate 	 */
4387c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_idt_base = cp->cpu_idt;
4397c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_idt_lim = sizeof (idt0) - 1;
4407c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_gdt_base = cp->cpu_gdt;
4417c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_gdt_lim = sizeof (gdt0) -1;
4427c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_pdbr = getcr3();
4437c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_cpu = cpun;
4447c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_x86feature = x86_feature;
4457c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_cr4 = cr4_value;
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate #if defined(__amd64)
4487c478bd9Sstevel@tonic-gate 	if (getcr3() > 0xffffffffUL)
4497c478bd9Sstevel@tonic-gate 		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
4507c478bd9Sstevel@tonic-gate 			"located above 4G in physical memory (@ 0x%llx).",
4517c478bd9Sstevel@tonic-gate 			(unsigned long long)getcr3());
4527c478bd9Sstevel@tonic-gate 
4537c478bd9Sstevel@tonic-gate 	/*
4547c478bd9Sstevel@tonic-gate 	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
4557c478bd9Sstevel@tonic-gate 	 * by code in real_mode_start():
4567c478bd9Sstevel@tonic-gate 	 *
4577c478bd9Sstevel@tonic-gate 	 * GDT[0]:  NULL selector
4587c478bd9Sstevel@tonic-gate 	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
4597c478bd9Sstevel@tonic-gate 	 *
4607c478bd9Sstevel@tonic-gate 	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
4617c478bd9Sstevel@tonic-gate 	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
4627c478bd9Sstevel@tonic-gate 	 * a course of action as any other, though it may cause the entire
4637c478bd9Sstevel@tonic-gate 	 * platform to reset in some cases...
4647c478bd9Sstevel@tonic-gate 	 */
4657c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt[0] = 0ULL;
4667c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
4677c478bd9Sstevel@tonic-gate 
4687c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt_lim = (ushort_t)
4697c478bd9Sstevel@tonic-gate 	    (sizeof (real_mode_platter->rm_temp_gdt) - 1);
4707c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_gdt_base = rm_platter_pa +
4717c478bd9Sstevel@tonic-gate 	    (uint32_t)(&((rm_platter_t *)0)->rm_temp_gdt);
4727c478bd9Sstevel@tonic-gate 
4737c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_idt_lim = 0;
4747c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_temp_idt_base = 0;
4757c478bd9Sstevel@tonic-gate 
4767c478bd9Sstevel@tonic-gate 	/*
4777c478bd9Sstevel@tonic-gate 	 * Since the CPU needs to jump to protected mode using an identity
4787c478bd9Sstevel@tonic-gate 	 * mapped address, we need to calculate it here.
4797c478bd9Sstevel@tonic-gate 	 */
4807c478bd9Sstevel@tonic-gate 	real_mode_platter->rm_longmode64_addr = rm_platter_pa +
4817c478bd9Sstevel@tonic-gate 	    ((uint32_t)long_mode_64 - (uint32_t)real_mode_start);
4827c478bd9Sstevel@tonic-gate #endif	/* __amd64 */
4837c478bd9Sstevel@tonic-gate 
4847c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE
4857c478bd9Sstevel@tonic-gate 	/*
4867c478bd9Sstevel@tonic-gate 	 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers for this
4877c478bd9Sstevel@tonic-gate 	 * CPU.
4887c478bd9Sstevel@tonic-gate 	 */
4897c478bd9Sstevel@tonic-gate 	ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP);
4907c478bd9Sstevel@tonic-gate 	ttc->ttc_next = ttc->ttc_first;
4917c478bd9Sstevel@tonic-gate 	ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize;
4927c478bd9Sstevel@tonic-gate #endif
4937c478bd9Sstevel@tonic-gate 
4947c478bd9Sstevel@tonic-gate 	/*
4957c478bd9Sstevel@tonic-gate 	 * Record that we have another CPU.
4967c478bd9Sstevel@tonic-gate 	 */
4977c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
4987c478bd9Sstevel@tonic-gate 	/*
4997c478bd9Sstevel@tonic-gate 	 * Initialize the interrupt threads for this CPU
5007c478bd9Sstevel@tonic-gate 	 */
501100b72f4Sandrei 	cpu_intr_alloc(cp, NINTR_THREADS);
5027c478bd9Sstevel@tonic-gate 	/*
5037c478bd9Sstevel@tonic-gate 	 * Add CPU to list of available CPUs.  It'll be on the active list
5047c478bd9Sstevel@tonic-gate 	 * after mp_startup().
5057c478bd9Sstevel@tonic-gate 	 */
5067c478bd9Sstevel@tonic-gate 	cpu_add_unit(cp);
5077c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
5087c478bd9Sstevel@tonic-gate }
5097c478bd9Sstevel@tonic-gate 
5107c478bd9Sstevel@tonic-gate /*
5117c478bd9Sstevel@tonic-gate  * Apply workarounds for known errata, and warn about those that are absent.
5127c478bd9Sstevel@tonic-gate  *
5137c478bd9Sstevel@tonic-gate  * System vendors occasionally create configurations which contain different
5147c478bd9Sstevel@tonic-gate  * revisions of the CPUs that are almost but not exactly the same.  At the
5157c478bd9Sstevel@tonic-gate  * time of writing, this meant that their clock rates were the same, their
5167c478bd9Sstevel@tonic-gate  * feature sets were the same, but the required workaround were -not-
5177c478bd9Sstevel@tonic-gate  * necessarily the same.  So, this routine is invoked on -every- CPU soon
5187c478bd9Sstevel@tonic-gate  * after starting to make sure that the resulting system contains the most
5197c478bd9Sstevel@tonic-gate  * pessimal set of workarounds needed to cope with *any* of the CPUs in the
5207c478bd9Sstevel@tonic-gate  * system.
5217c478bd9Sstevel@tonic-gate  *
522ef50d8c0Sesaxe  * workaround_errata is invoked early in mlsetup() for CPU 0, and in
523ef50d8c0Sesaxe  * mp_startup() for all slave CPUs. Slaves process workaround_errata prior
524ef50d8c0Sesaxe  * to acknowledging their readiness to the master, so this routine will
525ef50d8c0Sesaxe  * never be executed by multiple CPUs in parallel, thus making updates to
526ef50d8c0Sesaxe  * global data safe.
527ef50d8c0Sesaxe  *
5282201b277Skucharsk  * These workarounds are based on Rev 3.57 of the Revision Guide for
5292201b277Skucharsk  * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005.
5307c478bd9Sstevel@tonic-gate  */
5317c478bd9Sstevel@tonic-gate 
5327c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91)
5337c478bd9Sstevel@tonic-gate int opteron_erratum_91;		/* if non-zero -> at least one cpu has it */
5347c478bd9Sstevel@tonic-gate #endif
5357c478bd9Sstevel@tonic-gate 
5367c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93)
5377c478bd9Sstevel@tonic-gate int opteron_erratum_93;		/* if non-zero -> at least one cpu has it */
5387c478bd9Sstevel@tonic-gate #endif
5397c478bd9Sstevel@tonic-gate 
5407c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100)
5417c478bd9Sstevel@tonic-gate int opteron_erratum_100;	/* if non-zero -> at least one cpu has it */
5427c478bd9Sstevel@tonic-gate #endif
5437c478bd9Sstevel@tonic-gate 
5447c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
5457c478bd9Sstevel@tonic-gate int opteron_erratum_109;	/* if non-zero -> at least one cpu has it */
5467c478bd9Sstevel@tonic-gate #endif
5477c478bd9Sstevel@tonic-gate 
5487c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
5497c478bd9Sstevel@tonic-gate int opteron_erratum_121;	/* if non-zero -> at least one cpu has it */
5507c478bd9Sstevel@tonic-gate #endif
5517c478bd9Sstevel@tonic-gate 
5527c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122)
5537c478bd9Sstevel@tonic-gate int opteron_erratum_122;	/* if non-zero -> at least one cpu has it */
5547c478bd9Sstevel@tonic-gate #endif
5557c478bd9Sstevel@tonic-gate 
5567c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123)
5577c478bd9Sstevel@tonic-gate int opteron_erratum_123;	/* if non-zero -> at least one cpu has it */
5587c478bd9Sstevel@tonic-gate #endif
5597c478bd9Sstevel@tonic-gate 
5602201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
5612201b277Skucharsk int opteron_erratum_131;	/* if non-zero -> at least one cpu has it */
5622201b277Skucharsk #endif
5637c478bd9Sstevel@tonic-gate 
564ef50d8c0Sesaxe #if defined(OPTERON_WORKAROUND_6336786)
565ef50d8c0Sesaxe int opteron_workaround_6336786;	/* non-zero -> WA relevant and applied */
566ef50d8c0Sesaxe int opteron_workaround_6336786_UP = 0;	/* Not needed for UP */
567ef50d8c0Sesaxe #endif
568ef50d8c0Sesaxe 
569ee88d2b9Skchow #if defined(OPTERON_WORKAROUND_6323525)
570ee88d2b9Skchow int opteron_workaround_6323525;	/* if non-zero -> at least one cpu has it */
571ee88d2b9Skchow #endif
572ee88d2b9Skchow 
5737c478bd9Sstevel@tonic-gate #define	WARNING(cpu, n)						\
5747c478bd9Sstevel@tonic-gate 	cmn_err(CE_WARN, "cpu%d: no workaround for erratum %d",	\
5757c478bd9Sstevel@tonic-gate 	    (cpu)->cpu_id, (n))
5767c478bd9Sstevel@tonic-gate 
5777c478bd9Sstevel@tonic-gate uint_t
5787c478bd9Sstevel@tonic-gate workaround_errata(struct cpu *cpu)
5797c478bd9Sstevel@tonic-gate {
5807c478bd9Sstevel@tonic-gate 	uint_t missing = 0;
5817c478bd9Sstevel@tonic-gate 
5827c478bd9Sstevel@tonic-gate 	ASSERT(cpu == CPU);
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 	/*LINTED*/
5857c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 88) > 0) {
5867c478bd9Sstevel@tonic-gate 		/*
5877c478bd9Sstevel@tonic-gate 		 * SWAPGS May Fail To Read Correct GS Base
5887c478bd9Sstevel@tonic-gate 		 */
5897c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_88)
5907c478bd9Sstevel@tonic-gate 		/*
5917c478bd9Sstevel@tonic-gate 		 * The workaround is an mfence in the relevant assembler code
5927c478bd9Sstevel@tonic-gate 		 */
5937c478bd9Sstevel@tonic-gate #else
5947c478bd9Sstevel@tonic-gate 		WARNING(cpu, 88);
5957c478bd9Sstevel@tonic-gate 		missing++;
5967c478bd9Sstevel@tonic-gate #endif
5977c478bd9Sstevel@tonic-gate 	}
5987c478bd9Sstevel@tonic-gate 
5997c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 91) > 0) {
6007c478bd9Sstevel@tonic-gate 		/*
6017c478bd9Sstevel@tonic-gate 		 * Software Prefetches May Report A Page Fault
6027c478bd9Sstevel@tonic-gate 		 */
6037c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_91)
6047c478bd9Sstevel@tonic-gate 		/*
6057c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6067c478bd9Sstevel@tonic-gate 		 */
6077c478bd9Sstevel@tonic-gate 		opteron_erratum_91++;
6087c478bd9Sstevel@tonic-gate #else
6097c478bd9Sstevel@tonic-gate 		WARNING(cpu, 91);
6107c478bd9Sstevel@tonic-gate 		missing++;
6117c478bd9Sstevel@tonic-gate #endif
6127c478bd9Sstevel@tonic-gate 	}
6137c478bd9Sstevel@tonic-gate 
6147c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 93) > 0) {
6157c478bd9Sstevel@tonic-gate 		/*
6167c478bd9Sstevel@tonic-gate 		 * RSM Auto-Halt Restart Returns to Incorrect RIP
6177c478bd9Sstevel@tonic-gate 		 */
6187c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_93)
6197c478bd9Sstevel@tonic-gate 		/*
6207c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6217c478bd9Sstevel@tonic-gate 		 */
6227c478bd9Sstevel@tonic-gate 		opteron_erratum_93++;
6237c478bd9Sstevel@tonic-gate #else
6247c478bd9Sstevel@tonic-gate 		WARNING(cpu, 93);
6257c478bd9Sstevel@tonic-gate 		missing++;
6267c478bd9Sstevel@tonic-gate #endif
6277c478bd9Sstevel@tonic-gate 	}
6287c478bd9Sstevel@tonic-gate 
6297c478bd9Sstevel@tonic-gate 	/*LINTED*/
6307c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 95) > 0) {
6317c478bd9Sstevel@tonic-gate 		/*
6327c478bd9Sstevel@tonic-gate 		 * RET Instruction May Return to Incorrect EIP
6337c478bd9Sstevel@tonic-gate 		 */
6347c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_95)
6357c478bd9Sstevel@tonic-gate #if defined(_LP64)
6367c478bd9Sstevel@tonic-gate 		/*
6377c478bd9Sstevel@tonic-gate 		 * Workaround this by ensuring that 32-bit user code and
6387c478bd9Sstevel@tonic-gate 		 * 64-bit kernel code never occupy the same address
6397c478bd9Sstevel@tonic-gate 		 * range mod 4G.
6407c478bd9Sstevel@tonic-gate 		 */
6417c478bd9Sstevel@tonic-gate 		if (_userlimit32 > 0xc0000000ul)
6427c478bd9Sstevel@tonic-gate 			*(uintptr_t *)&_userlimit32 = 0xc0000000ul;
6437c478bd9Sstevel@tonic-gate 
6447c478bd9Sstevel@tonic-gate 		/*LINTED*/
6457c478bd9Sstevel@tonic-gate 		ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u);
6467c478bd9Sstevel@tonic-gate #endif	/* _LP64 */
6477c478bd9Sstevel@tonic-gate #else
6487c478bd9Sstevel@tonic-gate 		WARNING(cpu, 95);
6497c478bd9Sstevel@tonic-gate 		missing++;
6507c478bd9Sstevel@tonic-gate #endif	/* OPTERON_ERRATUM_95 */
6517c478bd9Sstevel@tonic-gate 	}
6527c478bd9Sstevel@tonic-gate 
6537c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 100) > 0) {
6547c478bd9Sstevel@tonic-gate 		/*
6557c478bd9Sstevel@tonic-gate 		 * Compatibility Mode Branches Transfer to Illegal Address
6567c478bd9Sstevel@tonic-gate 		 */
6577c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_100)
6587c478bd9Sstevel@tonic-gate 		/*
6597c478bd9Sstevel@tonic-gate 		 * fix is in trap.c
6607c478bd9Sstevel@tonic-gate 		 */
6617c478bd9Sstevel@tonic-gate 		opteron_erratum_100++;
6627c478bd9Sstevel@tonic-gate #else
6637c478bd9Sstevel@tonic-gate 		WARNING(cpu, 100);
6647c478bd9Sstevel@tonic-gate 		missing++;
6657c478bd9Sstevel@tonic-gate #endif
6667c478bd9Sstevel@tonic-gate 	}
6677c478bd9Sstevel@tonic-gate 
6687c478bd9Sstevel@tonic-gate 	/*LINTED*/
6697c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 108) > 0) {
6707c478bd9Sstevel@tonic-gate 		/*
6717c478bd9Sstevel@tonic-gate 		 * CPUID Instruction May Return Incorrect Model Number In
6727c478bd9Sstevel@tonic-gate 		 * Some Processors
6737c478bd9Sstevel@tonic-gate 		 */
6747c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108)
6757c478bd9Sstevel@tonic-gate 		/*
6767c478bd9Sstevel@tonic-gate 		 * (Our cpuid-handling code corrects the model number on
6777c478bd9Sstevel@tonic-gate 		 * those processors)
6787c478bd9Sstevel@tonic-gate 		 */
6797c478bd9Sstevel@tonic-gate #else
6807c478bd9Sstevel@tonic-gate 		WARNING(cpu, 108);
6817c478bd9Sstevel@tonic-gate 		missing++;
6827c478bd9Sstevel@tonic-gate #endif
6837c478bd9Sstevel@tonic-gate 	}
6847c478bd9Sstevel@tonic-gate 
6857c478bd9Sstevel@tonic-gate 	/*LINTED*/
6867c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 109) > 0) {
6877c478bd9Sstevel@tonic-gate 		/*
6887c478bd9Sstevel@tonic-gate 		 * Certain Reverse REP MOVS May Produce Unpredictable Behaviour
6897c478bd9Sstevel@tonic-gate 		 */
6907c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
6917c478bd9Sstevel@tonic-gate 
6927c478bd9Sstevel@tonic-gate 		/* workaround is to print a warning to upgrade BIOS */
6930ac7d7d8Skucharsk 		if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
6947c478bd9Sstevel@tonic-gate 			opteron_erratum_109++;
6957c478bd9Sstevel@tonic-gate #else
6967c478bd9Sstevel@tonic-gate 		WARNING(cpu, 109);
6977c478bd9Sstevel@tonic-gate 		missing++;
6987c478bd9Sstevel@tonic-gate #endif
6997c478bd9Sstevel@tonic-gate 	}
7007c478bd9Sstevel@tonic-gate 	/*LINTED*/
7017c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 121) > 0) {
7027c478bd9Sstevel@tonic-gate 		/*
7037c478bd9Sstevel@tonic-gate 		 * Sequential Execution Across Non_Canonical Boundary Caused
7047c478bd9Sstevel@tonic-gate 		 * Processor Hang
7057c478bd9Sstevel@tonic-gate 		 */
7067c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121)
7077c478bd9Sstevel@tonic-gate 		static int	lma;
7087c478bd9Sstevel@tonic-gate 
7097c478bd9Sstevel@tonic-gate 		if (opteron_erratum_121)
7107c478bd9Sstevel@tonic-gate 			opteron_erratum_121++;
7117c478bd9Sstevel@tonic-gate 
7127c478bd9Sstevel@tonic-gate 		/*
7137c478bd9Sstevel@tonic-gate 		 * Erratum 121 is only present in long (64 bit) mode.
7147c478bd9Sstevel@tonic-gate 		 * Workaround is to include the page immediately before the
7157c478bd9Sstevel@tonic-gate 		 * va hole to eliminate the possibility of system hangs due to
7167c478bd9Sstevel@tonic-gate 		 * sequential execution across the va hole boundary.
7177c478bd9Sstevel@tonic-gate 		 */
7187c478bd9Sstevel@tonic-gate 		if (lma == 0) {
7197c478bd9Sstevel@tonic-gate 			/*
7207c478bd9Sstevel@tonic-gate 			 * check LMA once: assume all cpus are in long mode
7217c478bd9Sstevel@tonic-gate 			 * or not.
7227c478bd9Sstevel@tonic-gate 			 */
7237c478bd9Sstevel@tonic-gate 			lma = 1;
7247c478bd9Sstevel@tonic-gate 
7250ac7d7d8Skucharsk 			if (rdmsr(MSR_AMD_EFER) & AMD_EFER_LMA) {
7267c478bd9Sstevel@tonic-gate 				if (hole_start) {
7277c478bd9Sstevel@tonic-gate 					hole_start -= PAGESIZE;
7287c478bd9Sstevel@tonic-gate 				} else {
7297c478bd9Sstevel@tonic-gate 					/*
7307c478bd9Sstevel@tonic-gate 					 * hole_start not yet initialized by
7317c478bd9Sstevel@tonic-gate 					 * mmu_init. Initialize hole_start
7327c478bd9Sstevel@tonic-gate 					 * with value to be subtracted.
7337c478bd9Sstevel@tonic-gate 					 */
7347c478bd9Sstevel@tonic-gate 					hole_start = PAGESIZE;
7357c478bd9Sstevel@tonic-gate 				}
7367c478bd9Sstevel@tonic-gate 				opteron_erratum_121++;
7377c478bd9Sstevel@tonic-gate 			}
7387c478bd9Sstevel@tonic-gate 		}
7397c478bd9Sstevel@tonic-gate #else
7407c478bd9Sstevel@tonic-gate 		WARNING(cpu, 121);
7417c478bd9Sstevel@tonic-gate 		missing++;
7427c478bd9Sstevel@tonic-gate #endif
7437c478bd9Sstevel@tonic-gate 	}
7447c478bd9Sstevel@tonic-gate 
7457c478bd9Sstevel@tonic-gate 	/*LINTED*/
7467c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 122) > 0) {
7477c478bd9Sstevel@tonic-gate 		/*
7487c478bd9Sstevel@tonic-gate 		 * TLB Flush Filter May Cause Cohenrency Problem in
7497c478bd9Sstevel@tonic-gate 		 * Multiprocessor Systems
7507c478bd9Sstevel@tonic-gate 		 */
7517c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_122)
7527c478bd9Sstevel@tonic-gate 		/*
7537c478bd9Sstevel@tonic-gate 		 * Erratum 122 is only present in MP configurations (multi-core
7547c478bd9Sstevel@tonic-gate 		 * or multi-processor).
7557c478bd9Sstevel@tonic-gate 		 */
7567c478bd9Sstevel@tonic-gate 
7577c478bd9Sstevel@tonic-gate 		if (opteron_erratum_122 || lgrp_plat_node_cnt > 1 ||
7587c478bd9Sstevel@tonic-gate 		    cpuid_get_ncpu_per_chip(cpu) > 1) {
7597c478bd9Sstevel@tonic-gate 			/* disable TLB Flush Filter */
7600ac7d7d8Skucharsk 			wrmsr(MSR_AMD_HWCR, rdmsr(MSR_AMD_HWCR) |
7610ac7d7d8Skucharsk 			    (uint64_t)(uintptr_t)AMD_HWCR_FFDIS);
7627c478bd9Sstevel@tonic-gate 			opteron_erratum_122++;
7637c478bd9Sstevel@tonic-gate 		}
7647c478bd9Sstevel@tonic-gate 
7657c478bd9Sstevel@tonic-gate #else
7667c478bd9Sstevel@tonic-gate 		WARNING(cpu, 122);
7677c478bd9Sstevel@tonic-gate 		missing++;
7687c478bd9Sstevel@tonic-gate #endif
7697c478bd9Sstevel@tonic-gate 	}
770403c216aSkchow 
771403c216aSkchow #if defined(OPTERON_ERRATUM_123)
7727c478bd9Sstevel@tonic-gate 	/*LINTED*/
7737c478bd9Sstevel@tonic-gate 	if (cpuid_opteron_erratum(cpu, 123) > 0) {
7747c478bd9Sstevel@tonic-gate 		/*
7757c478bd9Sstevel@tonic-gate 		 * Bypassed Reads May Cause Data Corruption of System Hang in
7767c478bd9Sstevel@tonic-gate 		 * Dual Core Processors
7777c478bd9Sstevel@tonic-gate 		 */
7787c478bd9Sstevel@tonic-gate 		/*
7797c478bd9Sstevel@tonic-gate 		 * Erratum 123 applies only to multi-core cpus.
7807c478bd9Sstevel@tonic-gate 		 */
7817c478bd9Sstevel@tonic-gate 
7827c478bd9Sstevel@tonic-gate 		if (cpuid_get_ncpu_per_chip(cpu) > 1) {
7837c478bd9Sstevel@tonic-gate 			/* workaround is to print a warning to upgrade BIOS */
7840ac7d7d8Skucharsk 			if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
7857c478bd9Sstevel@tonic-gate 				opteron_erratum_123++;
7867c478bd9Sstevel@tonic-gate 		}
7877c478bd9Sstevel@tonic-gate 	}
788403c216aSkchow #endif
7892201b277Skucharsk 
7902201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
7912201b277Skucharsk 	/*LINTED*/
7922201b277Skucharsk 	if (cpuid_opteron_erratum(cpu, 131) > 0) {
7932201b277Skucharsk 		/*
7942201b277Skucharsk 		 * Multiprocessor Systems with Four or More Cores May Deadlock
7952201b277Skucharsk 		 * Waiting for a Probe Response
7962201b277Skucharsk 		 */
7972201b277Skucharsk 		/*
7982201b277Skucharsk 		 * Erratum 131 applies to any system with four or more cores.
7992201b277Skucharsk 		 */
8002201b277Skucharsk 		if ((opteron_erratum_131 == 0) && ((lgrp_plat_node_cnt *
8012201b277Skucharsk 		    cpuid_get_ncpu_per_chip(cpu)) >= 4)) {
8022201b277Skucharsk 			/*
8032201b277Skucharsk 			 * Workaround is to print a warning to upgrade
8042201b277Skucharsk 			 * the BIOS
8052201b277Skucharsk 			 */
8060ac7d7d8Skucharsk 			if (!(rdmsr(MSR_AMD_NB_CFG) & AMD_NB_CFG_SRQ_HEARTBEAT))
8072201b277Skucharsk 				opteron_erratum_131++;
8082201b277Skucharsk 		}
809ef50d8c0Sesaxe 	}
8102201b277Skucharsk #endif
811ef50d8c0Sesaxe 
812ef50d8c0Sesaxe #if defined(OPTERON_WORKAROUND_6336786)
813ef50d8c0Sesaxe 	/*
814ef50d8c0Sesaxe 	 * This isn't really erratum, but for convenience the
815ef50d8c0Sesaxe 	 * detection/workaround code lives here and in cpuid_opteron_erratum.
816ef50d8c0Sesaxe 	 */
817ef50d8c0Sesaxe 	if (cpuid_opteron_erratum(cpu, 6336786) > 0) {
818ef50d8c0Sesaxe 		int	node;
819ef50d8c0Sesaxe 		uint8_t data;
820ef50d8c0Sesaxe 
821ef50d8c0Sesaxe 		/*
822ef50d8c0Sesaxe 		 * Disable C1-Clock ramping on multi-core/multi-processor
823ef50d8c0Sesaxe 		 * K8 platforms to guard against TSC drift.
824ef50d8c0Sesaxe 		 */
825ef50d8c0Sesaxe 		if (opteron_workaround_6336786) {
826ef50d8c0Sesaxe 			opteron_workaround_6336786++;
827ef50d8c0Sesaxe 		} else if ((lgrp_plat_node_cnt *
828ef50d8c0Sesaxe 		    cpuid_get_ncpu_per_chip(cpu) >= 2) ||
829ef50d8c0Sesaxe 		    opteron_workaround_6336786_UP) {
830ef50d8c0Sesaxe 			for (node = 0; node < lgrp_plat_node_cnt; node++) {
831ef50d8c0Sesaxe 				/*
832ef50d8c0Sesaxe 				 * Clear PMM7[1:0] (function 3, offset 0x87)
833ef50d8c0Sesaxe 				 * Northbridge device is the node id + 24.
834ef50d8c0Sesaxe 				 */
835ef50d8c0Sesaxe 				data = pci_getb_func(0, node + 24, 3, 0x87);
836ef50d8c0Sesaxe 				data &= 0xFC;
837ef50d8c0Sesaxe 				pci_putb_func(0, node + 24, 3, 0x87, data);
838ef50d8c0Sesaxe 			}
839ef50d8c0Sesaxe 			opteron_workaround_6336786++;
840ef50d8c0Sesaxe 		}
8412201b277Skucharsk 	}
842ef50d8c0Sesaxe #endif
843ee88d2b9Skchow 
844ee88d2b9Skchow #if defined(OPTERON_WORKAROUND_6323525)
845ee88d2b9Skchow 	/*LINTED*/
846ee88d2b9Skchow 	/*
847ee88d2b9Skchow 	 * Mutex primitives don't work as expected.
848ee88d2b9Skchow 	 */
849ee88d2b9Skchow 	if (cpuid_opteron_erratum(cpu, 6323525) > 0) {
850ee88d2b9Skchow 
851ee88d2b9Skchow 		/*
852ee88d2b9Skchow 		 * problem only occurs with 2 or more cores. If bit in
853ee88d2b9Skchow 		 * MSR_BU_CFG set, then not applicable. The workaround
854ee88d2b9Skchow 		 * is to patch the semaphone routines with the lfence
855ee88d2b9Skchow 		 * instruction to provide necessary load memory barrier with
856ee88d2b9Skchow 		 * possible subsequent read-modify-write ops.
857ee88d2b9Skchow 		 *
858ee88d2b9Skchow 		 * It is too early in boot to call the patch routine so
859ee88d2b9Skchow 		 * set erratum variable to be done in startup_end().
860ee88d2b9Skchow 		 */
861ee88d2b9Skchow 		if (opteron_workaround_6323525) {
862ee88d2b9Skchow 			opteron_workaround_6323525++;
863ee88d2b9Skchow 		} else if ((x86_feature & X86_SSE2) && ((lgrp_plat_node_cnt *
864ee88d2b9Skchow 		    cpuid_get_ncpu_per_chip(cpu)) >= 2)) {
865ee88d2b9Skchow 			if ((xrdmsr(MSR_BU_CFG) & 0x02) == 0)
866ee88d2b9Skchow 				opteron_workaround_6323525++;
867ee88d2b9Skchow 		}
868ee88d2b9Skchow 	}
869ee88d2b9Skchow #endif
8707c478bd9Sstevel@tonic-gate 	return (missing);
8717c478bd9Sstevel@tonic-gate }
8727c478bd9Sstevel@tonic-gate 
8737c478bd9Sstevel@tonic-gate void
8747c478bd9Sstevel@tonic-gate workaround_errata_end()
8757c478bd9Sstevel@tonic-gate {
8767c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_109)
8777c478bd9Sstevel@tonic-gate 	if (opteron_erratum_109) {
8782201b277Skucharsk 		cmn_err(CE_WARN,
8792201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
8802201b277Skucharsk 		    " processor\nerratum 109 was not detected; updating your"
8812201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
8822201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
8832201b277Skucharsk 		    " system\noperation may occur.\n");
8847c478bd9Sstevel@tonic-gate 	}
8852201b277Skucharsk #endif	/* OPTERON_ERRATUM_109 */
8867c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_123)
8877c478bd9Sstevel@tonic-gate 	if (opteron_erratum_123) {
8882201b277Skucharsk 		cmn_err(CE_WARN,
8892201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
8902201b277Skucharsk 		    " processor\nerratum 123 was not detected; updating your"
8912201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
8922201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
8932201b277Skucharsk 		    " system\noperation may occur.\n");
8947c478bd9Sstevel@tonic-gate 	}
8952201b277Skucharsk #endif	/* OPTERON_ERRATUM_123 */
8962201b277Skucharsk #if defined(OPTERON_ERRATUM_131)
8972201b277Skucharsk 	if (opteron_erratum_131) {
8982201b277Skucharsk 		cmn_err(CE_WARN,
8992201b277Skucharsk 		    "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)"
9002201b277Skucharsk 		    " processor\nerratum 131 was not detected; updating your"
9012201b277Skucharsk 		    " system's BIOS to a version\ncontaining this"
9022201b277Skucharsk 		    " microcode patch is HIGHLY recommended or erroneous"
9032201b277Skucharsk 		    " system\noperation may occur.\n");
9042201b277Skucharsk 	}
9052201b277Skucharsk #endif	/* OPTERON_ERRATUM_131 */
9067c478bd9Sstevel@tonic-gate }
9077c478bd9Sstevel@tonic-gate 
9087c478bd9Sstevel@tonic-gate static ushort_t *mp_map_warm_reset_vector();
9097c478bd9Sstevel@tonic-gate static void mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector);
9107c478bd9Sstevel@tonic-gate 
911*41791439Sandrei static cpuset_t procset = 1;
912*41791439Sandrei 
9137c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9147c478bd9Sstevel@tonic-gate void
9157c478bd9Sstevel@tonic-gate start_other_cpus(int cprboot)
9167c478bd9Sstevel@tonic-gate {
917*41791439Sandrei 	unsigned int who;
918*41791439Sandrei 	int skipped = 0;
919d90554ebSdmick 	int cpuid = 0;
9207c478bd9Sstevel@tonic-gate 	int delays = 0;
9217c478bd9Sstevel@tonic-gate 	int started_cpu;
9227c478bd9Sstevel@tonic-gate 	ushort_t *warm_reset_vector = NULL;
9237c478bd9Sstevel@tonic-gate 
9247c478bd9Sstevel@tonic-gate 	/*
9257c478bd9Sstevel@tonic-gate 	 * Initialize our own cpu_info.
9267c478bd9Sstevel@tonic-gate 	 */
9277c478bd9Sstevel@tonic-gate 	init_cpu_info(CPU);
9287c478bd9Sstevel@tonic-gate 
9297c478bd9Sstevel@tonic-gate 	/*
9307c478bd9Sstevel@tonic-gate 	 * Initialize our syscall handlers
9317c478bd9Sstevel@tonic-gate 	 */
9327c478bd9Sstevel@tonic-gate 	init_cpu_syscall(CPU);
9337c478bd9Sstevel@tonic-gate 
9347c478bd9Sstevel@tonic-gate 	/*
9357c478bd9Sstevel@tonic-gate 	 * if only 1 cpu or not using MP, skip the rest of this
9367c478bd9Sstevel@tonic-gate 	 */
937*41791439Sandrei 	if (CPUSET_ISEQUAL(mp_cpus, cpu_ready_set) || use_mp == 0) {
9387c478bd9Sstevel@tonic-gate 		if (use_mp == 0)
9397c478bd9Sstevel@tonic-gate 			cmn_err(CE_CONT, "?***** Not in MP mode\n");
9407c478bd9Sstevel@tonic-gate 		goto done;
9417c478bd9Sstevel@tonic-gate 	}
9427c478bd9Sstevel@tonic-gate 
9437c478bd9Sstevel@tonic-gate 	/*
9447c478bd9Sstevel@tonic-gate 	 * perform such initialization as is needed
9457c478bd9Sstevel@tonic-gate 	 * to be able to take CPUs on- and off-line.
9467c478bd9Sstevel@tonic-gate 	 */
9477c478bd9Sstevel@tonic-gate 	cpu_pause_init();
9487c478bd9Sstevel@tonic-gate 
9497c478bd9Sstevel@tonic-gate 	xc_init();		/* initialize processor crosscalls */
9507c478bd9Sstevel@tonic-gate 
9517c478bd9Sstevel@tonic-gate 	/*
9527c478bd9Sstevel@tonic-gate 	 * Copy the real mode code at "real_mode_start" to the
9537c478bd9Sstevel@tonic-gate 	 * page at rm_platter_va.
9547c478bd9Sstevel@tonic-gate 	 */
9557c478bd9Sstevel@tonic-gate 	warm_reset_vector = mp_map_warm_reset_vector();
9567c478bd9Sstevel@tonic-gate 	if (warm_reset_vector == NULL)
9577c478bd9Sstevel@tonic-gate 		goto done;
9587c478bd9Sstevel@tonic-gate 
9597c478bd9Sstevel@tonic-gate 	bcopy((caddr_t)real_mode_start,
9607c478bd9Sstevel@tonic-gate 	    (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code,
9617c478bd9Sstevel@tonic-gate 	    (size_t)real_mode_end - (size_t)real_mode_start);
9627c478bd9Sstevel@tonic-gate 
9637c478bd9Sstevel@tonic-gate 	flushes_require_xcalls = 1;
9647c478bd9Sstevel@tonic-gate 
9657c478bd9Sstevel@tonic-gate 	affinity_set(CPU_CURRENT);
9667c478bd9Sstevel@tonic-gate 
9677c478bd9Sstevel@tonic-gate 	for (who = 0; who < NCPU; who++) {
9687c478bd9Sstevel@tonic-gate 		if (who == cpuid)
9697c478bd9Sstevel@tonic-gate 			continue;
970*41791439Sandrei 		if (!CPU_IN_SET(mp_cpus, who))
971*41791439Sandrei 			continue;
9727c478bd9Sstevel@tonic-gate 
973*41791439Sandrei 		if (ncpus >= max_ncpus) {
974*41791439Sandrei 			skipped = who;
9757c478bd9Sstevel@tonic-gate 			continue;
976*41791439Sandrei 		}
9777c478bd9Sstevel@tonic-gate 
9787c478bd9Sstevel@tonic-gate 		mp_startup_init(who);
9797c478bd9Sstevel@tonic-gate 		started_cpu = 1;
9807c478bd9Sstevel@tonic-gate 		(*cpu_startf)(who, rm_platter_pa);
9817c478bd9Sstevel@tonic-gate 
982*41791439Sandrei 		while (!CPU_IN_SET(procset, who)) {
9837c478bd9Sstevel@tonic-gate 
9847c478bd9Sstevel@tonic-gate 			delay(1);
9857c478bd9Sstevel@tonic-gate 			if (++delays > (20 * hz)) {
9867c478bd9Sstevel@tonic-gate 
9877c478bd9Sstevel@tonic-gate 				cmn_err(CE_WARN,
9887c478bd9Sstevel@tonic-gate 				    "cpu%d failed to start", who);
9897c478bd9Sstevel@tonic-gate 
9907c478bd9Sstevel@tonic-gate 				mutex_enter(&cpu_lock);
9917c478bd9Sstevel@tonic-gate 				cpu[who]->cpu_flags = 0;
992affbd3ccSkchow 				cpu_vm_data_destroy(cpu[who]);
9937c478bd9Sstevel@tonic-gate 				cpu_del_unit(who);
9947c478bd9Sstevel@tonic-gate 				mutex_exit(&cpu_lock);
9957c478bd9Sstevel@tonic-gate 
9967c478bd9Sstevel@tonic-gate 				started_cpu = 0;
9977c478bd9Sstevel@tonic-gate 				break;
9987c478bd9Sstevel@tonic-gate 			}
9997c478bd9Sstevel@tonic-gate 		}
10007c478bd9Sstevel@tonic-gate 		if (!started_cpu)
10017c478bd9Sstevel@tonic-gate 			continue;
10027c478bd9Sstevel@tonic-gate 		if (tsc_gethrtime_enable)
10037c478bd9Sstevel@tonic-gate 			tsc_sync_master(who);
10047c478bd9Sstevel@tonic-gate 
10057c478bd9Sstevel@tonic-gate 		if (dtrace_cpu_init != NULL) {
10067c478bd9Sstevel@tonic-gate 			/*
10077c478bd9Sstevel@tonic-gate 			 * DTrace CPU initialization expects cpu_lock
10087c478bd9Sstevel@tonic-gate 			 * to be held.
10097c478bd9Sstevel@tonic-gate 			 */
10107c478bd9Sstevel@tonic-gate 			mutex_enter(&cpu_lock);
10117c478bd9Sstevel@tonic-gate 			(*dtrace_cpu_init)(who);
10127c478bd9Sstevel@tonic-gate 			mutex_exit(&cpu_lock);
10137c478bd9Sstevel@tonic-gate 		}
10147c478bd9Sstevel@tonic-gate 	}
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 	affinity_clear();
10177c478bd9Sstevel@tonic-gate 
10187c478bd9Sstevel@tonic-gate 	for (who = 0; who < NCPU; who++) {
10197c478bd9Sstevel@tonic-gate 		if (who == cpuid)
10207c478bd9Sstevel@tonic-gate 			continue;
10217c478bd9Sstevel@tonic-gate 
1022*41791439Sandrei 		if (!CPU_IN_SET(procset, who))
10237c478bd9Sstevel@tonic-gate 			continue;
10247c478bd9Sstevel@tonic-gate 
1025*41791439Sandrei 		while (!CPU_IN_SET(cpu_ready_set, who))
10267c478bd9Sstevel@tonic-gate 			delay(1);
10277c478bd9Sstevel@tonic-gate 	}
10287c478bd9Sstevel@tonic-gate 
1029*41791439Sandrei 	if (skipped) {
1030*41791439Sandrei 		cmn_err(CE_NOTE,
1031*41791439Sandrei 		    "System detected %d CPU(s), but "
1032*41791439Sandrei 		    "only %d CPU(s) were enabled during boot.",
1033*41791439Sandrei 		    skipped + 1, ncpus);
1034*41791439Sandrei 		cmn_err(CE_NOTE,
1035*41791439Sandrei 		    "Use \"boot-ncpus\" parameter to enable more CPU(s). "
1036*41791439Sandrei 		    "See eeprom(1M).");
1037*41791439Sandrei 	}
1038*41791439Sandrei 
10397c478bd9Sstevel@tonic-gate done:
10407c478bd9Sstevel@tonic-gate 	workaround_errata_end();
10417c478bd9Sstevel@tonic-gate 
10427c478bd9Sstevel@tonic-gate 	if (warm_reset_vector != NULL)
10437c478bd9Sstevel@tonic-gate 		mp_unmap_warm_reset_vector(warm_reset_vector);
10447c478bd9Sstevel@tonic-gate 	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
10457c478bd9Sstevel@tonic-gate 	    HAT_UNLOAD);
10463ad553a7Sgavinm 
10473ad553a7Sgavinm 	cmi_post_mpstartup();
10487c478bd9Sstevel@tonic-gate }
10497c478bd9Sstevel@tonic-gate 
10507c478bd9Sstevel@tonic-gate /*
10517c478bd9Sstevel@tonic-gate  * Dummy functions - no i86pc platforms support dynamic cpu allocation.
10527c478bd9Sstevel@tonic-gate  */
10537c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10547c478bd9Sstevel@tonic-gate int
10557c478bd9Sstevel@tonic-gate mp_cpu_configure(int cpuid)
10567c478bd9Sstevel@tonic-gate {
10577c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
10587c478bd9Sstevel@tonic-gate }
10597c478bd9Sstevel@tonic-gate 
10607c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10617c478bd9Sstevel@tonic-gate int
10627c478bd9Sstevel@tonic-gate mp_cpu_unconfigure(int cpuid)
10637c478bd9Sstevel@tonic-gate {
10647c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
10657c478bd9Sstevel@tonic-gate }
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate /*
10687c478bd9Sstevel@tonic-gate  * Startup function for 'other' CPUs (besides boot cpu).
1069da43ceabSsethg  * Called from real_mode_start (after *ap_mlsetup).
1070b4b46911Skchow  *
1071b4b46911Skchow  * WARNING: until CPU_READY is set, mp_startup and routines called by
1072b4b46911Skchow  * mp_startup should not call routines (e.g. kmem_free) that could call
1073b4b46911Skchow  * hat_unload which requires CPU_READY to be set.
10747c478bd9Sstevel@tonic-gate  */
10757c478bd9Sstevel@tonic-gate void
10767c478bd9Sstevel@tonic-gate mp_startup(void)
10777c478bd9Sstevel@tonic-gate {
10787c478bd9Sstevel@tonic-gate 	struct cpu *cp = CPU;
10797c478bd9Sstevel@tonic-gate 	uint_t new_x86_feature;
10807c478bd9Sstevel@tonic-gate 
10817c478bd9Sstevel@tonic-gate 	new_x86_feature = cpuid_pass1(cp);
10827c478bd9Sstevel@tonic-gate 
10837c478bd9Sstevel@tonic-gate 	/*
10847c478bd9Sstevel@tonic-gate 	 * We need to Sync MTRR with cpu0's MTRR. We have to do
10857c478bd9Sstevel@tonic-gate 	 * this with interrupts disabled.
10867c478bd9Sstevel@tonic-gate 	 */
10877c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_MTRR)
10887c478bd9Sstevel@tonic-gate 		mtrr_sync();
10897c478bd9Sstevel@tonic-gate 
10907c478bd9Sstevel@tonic-gate 	/*
10917c478bd9Sstevel@tonic-gate 	 * Initialize this CPU's syscall handlers
10927c478bd9Sstevel@tonic-gate 	 */
10937c478bd9Sstevel@tonic-gate 	init_cpu_syscall(cp);
10947c478bd9Sstevel@tonic-gate 
10957c478bd9Sstevel@tonic-gate 	/*
10967c478bd9Sstevel@tonic-gate 	 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the
10977c478bd9Sstevel@tonic-gate 	 * highest level at which a routine is permitted to block on
10987c478bd9Sstevel@tonic-gate 	 * an adaptive mutex (allows for cpu poke interrupt in case
10997c478bd9Sstevel@tonic-gate 	 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks
11007c478bd9Sstevel@tonic-gate 	 * device interrupts that may end up in the hat layer issuing cross
11017c478bd9Sstevel@tonic-gate 	 * calls before CPU_READY is set.
11027c478bd9Sstevel@tonic-gate 	 */
11037c478bd9Sstevel@tonic-gate 	(void) splx(ipltospl(LOCK_LEVEL));
11047c478bd9Sstevel@tonic-gate 
11057c478bd9Sstevel@tonic-gate 	/*
11067c478bd9Sstevel@tonic-gate 	 * Do a sanity check to make sure this new CPU is a sane thing
11077c478bd9Sstevel@tonic-gate 	 * to add to the collection of processors running this system.
11087c478bd9Sstevel@tonic-gate 	 *
11097c478bd9Sstevel@tonic-gate 	 * XXX	Clearly this needs to get more sophisticated, if x86
11107c478bd9Sstevel@tonic-gate 	 * systems start to get built out of heterogenous CPUs; as is
11117c478bd9Sstevel@tonic-gate 	 * likely to happen once the number of processors in a configuration
11127c478bd9Sstevel@tonic-gate 	 * gets large enough.
11137c478bd9Sstevel@tonic-gate 	 */
11147c478bd9Sstevel@tonic-gate 	if ((x86_feature & new_x86_feature) != x86_feature) {
11157c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "?cpu%d: %b\n",
11167c478bd9Sstevel@tonic-gate 		    cp->cpu_id, new_x86_feature, FMT_X86_FEATURE);
11177c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
11187c478bd9Sstevel@tonic-gate 	}
11197c478bd9Sstevel@tonic-gate 
11207c478bd9Sstevel@tonic-gate 	/*
11217c478bd9Sstevel@tonic-gate 	 * We could be more sophisticated here, and just mark the CPU
11227c478bd9Sstevel@tonic-gate 	 * as "faulted" but at this point we'll opt for the easier
11237c478bd9Sstevel@tonic-gate 	 * answer of dieing horribly.  Provided the boot cpu is ok,
11247c478bd9Sstevel@tonic-gate 	 * the system can be recovered by booting with use_mp set to zero.
11257c478bd9Sstevel@tonic-gate 	 */
11267c478bd9Sstevel@tonic-gate 	if (workaround_errata(cp) != 0)
11277c478bd9Sstevel@tonic-gate 		panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
11287c478bd9Sstevel@tonic-gate 
11297c478bd9Sstevel@tonic-gate 	cpuid_pass2(cp);
11307c478bd9Sstevel@tonic-gate 	cpuid_pass3(cp);
11317c478bd9Sstevel@tonic-gate 	(void) cpuid_pass4(cp);
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 	init_cpu_info(cp);
11347c478bd9Sstevel@tonic-gate 
11357c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1136*41791439Sandrei 	CPUSET_ADD(procset, cp->cpu_id);
11377c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
11387c478bd9Sstevel@tonic-gate 
11397c478bd9Sstevel@tonic-gate 	if (tsc_gethrtime_enable)
11407c478bd9Sstevel@tonic-gate 		tsc_sync_slave();
11417c478bd9Sstevel@tonic-gate 
11427c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
11437c478bd9Sstevel@tonic-gate 	/*
11447c478bd9Sstevel@tonic-gate 	 * It's unfortunate that chip_cpu_init() has to be called here.
11457c478bd9Sstevel@tonic-gate 	 * It really belongs in cpu_add_unit(), but unfortunately it is
11467c478bd9Sstevel@tonic-gate 	 * dependent on the cpuid probing, which must be done in the
11477c478bd9Sstevel@tonic-gate 	 * context of the current CPU. Care must be taken on x86 to ensure
11487c478bd9Sstevel@tonic-gate 	 * that mp_startup can safely block even though chip_cpu_init() and
11497c478bd9Sstevel@tonic-gate 	 * cpu_add_active() have not yet been called.
11507c478bd9Sstevel@tonic-gate 	 */
11517c478bd9Sstevel@tonic-gate 	chip_cpu_init(cp);
11527c478bd9Sstevel@tonic-gate 	chip_cpu_startup(cp);
11537c478bd9Sstevel@tonic-gate 
11547c478bd9Sstevel@tonic-gate 	cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_ENABLE | CPU_EXISTS;
11557c478bd9Sstevel@tonic-gate 	cpu_add_active(cp);
11567c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
11577c478bd9Sstevel@tonic-gate 
1158b4b46911Skchow 	add_cpunode2devtree(cp->cpu_id, cp->cpu_m.mcpu_cpi);
1159b4b46911Skchow 
1160da43ceabSsethg 	/* The base spl should still be at LOCK LEVEL here */
1161da43ceabSsethg 	ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL));
1162da43ceabSsethg 	set_base_spl();		/* Restore the spl to its proper value */
1163da43ceabSsethg 
11647c478bd9Sstevel@tonic-gate 	(void) spl0();				/* enable interrupts */
11657c478bd9Sstevel@tonic-gate 
11667aec1d6eScindi 	/*
11677aec1d6eScindi 	 * Set up the CPU module for this CPU.  This can't be done before
11687aec1d6eScindi 	 * this CPU is made CPU_READY, because we may (in heterogeneous systems)
11697aec1d6eScindi 	 * need to go load another CPU module.  The act of attempting to load
11707aec1d6eScindi 	 * a module may trigger a cross-call, which will ASSERT unless this
11717aec1d6eScindi 	 * cpu is CPU_READY.
11727aec1d6eScindi 	 */
11737aec1d6eScindi 	cmi_init();
11747aec1d6eScindi 
11757aec1d6eScindi 	if (x86_feature & X86_MCA)
11767aec1d6eScindi 		cmi_mca_init();
11777aec1d6eScindi 
11787c478bd9Sstevel@tonic-gate 	if (boothowto & RB_DEBUG)
11797c478bd9Sstevel@tonic-gate 		kdi_dvec_cpu_init(cp);
11807c478bd9Sstevel@tonic-gate 
11817c478bd9Sstevel@tonic-gate 	/*
11827c478bd9Sstevel@tonic-gate 	 * Setting the bit in cpu_ready_set must be the last operation in
11837c478bd9Sstevel@tonic-gate 	 * processor initialization; the boot CPU will continue to boot once
11847c478bd9Sstevel@tonic-gate 	 * it sees this bit set for all active CPUs.
11857c478bd9Sstevel@tonic-gate 	 */
11867c478bd9Sstevel@tonic-gate 	CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id);
11877c478bd9Sstevel@tonic-gate 
11887c478bd9Sstevel@tonic-gate 	/*
11897c478bd9Sstevel@tonic-gate 	 * Because mp_startup() gets fired off after init() starts, we
11907c478bd9Sstevel@tonic-gate 	 * can't use the '?' trick to do 'boot -v' printing - so we
11917c478bd9Sstevel@tonic-gate 	 * always direct the 'cpu .. online' messages to the log.
11927c478bd9Sstevel@tonic-gate 	 */
11937c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
11947c478bd9Sstevel@tonic-gate 	    cp->cpu_id);
11957c478bd9Sstevel@tonic-gate 
11967c478bd9Sstevel@tonic-gate 	/*
11977c478bd9Sstevel@tonic-gate 	 * Now we are done with the startup thread, so free it up.
11987c478bd9Sstevel@tonic-gate 	 */
11997c478bd9Sstevel@tonic-gate 	thread_exit();
12007c478bd9Sstevel@tonic-gate 	panic("mp_startup: cannot return");
12017c478bd9Sstevel@tonic-gate 	/*NOTREACHED*/
12027c478bd9Sstevel@tonic-gate }
12037c478bd9Sstevel@tonic-gate 
12047c478bd9Sstevel@tonic-gate 
12057c478bd9Sstevel@tonic-gate /*
12067c478bd9Sstevel@tonic-gate  * Start CPU on user request.
12077c478bd9Sstevel@tonic-gate  */
12087c478bd9Sstevel@tonic-gate /* ARGSUSED */
12097c478bd9Sstevel@tonic-gate int
12107c478bd9Sstevel@tonic-gate mp_cpu_start(struct cpu *cp)
12117c478bd9Sstevel@tonic-gate {
12127c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12137c478bd9Sstevel@tonic-gate 	return (0);
12147c478bd9Sstevel@tonic-gate }
12157c478bd9Sstevel@tonic-gate 
12167c478bd9Sstevel@tonic-gate /*
12177c478bd9Sstevel@tonic-gate  * Stop CPU on user request.
12187c478bd9Sstevel@tonic-gate  */
12197c478bd9Sstevel@tonic-gate /* ARGSUSED */
12207c478bd9Sstevel@tonic-gate int
12217c478bd9Sstevel@tonic-gate mp_cpu_stop(struct cpu *cp)
12227c478bd9Sstevel@tonic-gate {
1223d90554ebSdmick 	extern int cbe_psm_timer_mode;
12247c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1225d90554ebSdmick 
1226d90554ebSdmick 	/*
1227d90554ebSdmick 	 * If TIMER_PERIODIC mode is used, CPU0 is the one running it;
1228d90554ebSdmick 	 * can't stop it.  (This is true only for machines with no TSC.)
1229d90554ebSdmick 	 */
1230d90554ebSdmick 
1231d90554ebSdmick 	if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0))
1232d90554ebSdmick 		return (1);
12337c478bd9Sstevel@tonic-gate 
12347c478bd9Sstevel@tonic-gate 	return (0);
12357c478bd9Sstevel@tonic-gate }
12367c478bd9Sstevel@tonic-gate 
12377c478bd9Sstevel@tonic-gate /*
12387c478bd9Sstevel@tonic-gate  * Power on CPU.
12397c478bd9Sstevel@tonic-gate  */
12407c478bd9Sstevel@tonic-gate /* ARGSUSED */
12417c478bd9Sstevel@tonic-gate int
12427c478bd9Sstevel@tonic-gate mp_cpu_poweron(struct cpu *cp)
12437c478bd9Sstevel@tonic-gate {
12447c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12457c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
12467c478bd9Sstevel@tonic-gate }
12477c478bd9Sstevel@tonic-gate 
12487c478bd9Sstevel@tonic-gate /*
12497c478bd9Sstevel@tonic-gate  * Power off CPU.
12507c478bd9Sstevel@tonic-gate  */
12517c478bd9Sstevel@tonic-gate /* ARGSUSED */
12527c478bd9Sstevel@tonic-gate int
12537c478bd9Sstevel@tonic-gate mp_cpu_poweroff(struct cpu *cp)
12547c478bd9Sstevel@tonic-gate {
12557c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12567c478bd9Sstevel@tonic-gate 	return (ENOTSUP);		/* not supported */
12577c478bd9Sstevel@tonic-gate }
12587c478bd9Sstevel@tonic-gate 
12597c478bd9Sstevel@tonic-gate 
12607c478bd9Sstevel@tonic-gate /*
12617c478bd9Sstevel@tonic-gate  * Take the specified CPU out of participation in interrupts.
12627c478bd9Sstevel@tonic-gate  */
12637c478bd9Sstevel@tonic-gate int
12647c478bd9Sstevel@tonic-gate cpu_disable_intr(struct cpu *cp)
12657c478bd9Sstevel@tonic-gate {
12667c478bd9Sstevel@tonic-gate 	if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS)
12677c478bd9Sstevel@tonic-gate 		return (EBUSY);
12687c478bd9Sstevel@tonic-gate 
12697c478bd9Sstevel@tonic-gate 	cp->cpu_flags &= ~CPU_ENABLE;
12707c478bd9Sstevel@tonic-gate 	return (0);
12717c478bd9Sstevel@tonic-gate }
12727c478bd9Sstevel@tonic-gate 
12737c478bd9Sstevel@tonic-gate /*
12747c478bd9Sstevel@tonic-gate  * Allow the specified CPU to participate in interrupts.
12757c478bd9Sstevel@tonic-gate  */
12767c478bd9Sstevel@tonic-gate void
12777c478bd9Sstevel@tonic-gate cpu_enable_intr(struct cpu *cp)
12787c478bd9Sstevel@tonic-gate {
12797c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
12807c478bd9Sstevel@tonic-gate 	cp->cpu_flags |= CPU_ENABLE;
12817c478bd9Sstevel@tonic-gate 	psm_enable_intr(cp->cpu_id);
12827c478bd9Sstevel@tonic-gate }
12837c478bd9Sstevel@tonic-gate 
12847c478bd9Sstevel@tonic-gate 
12857c478bd9Sstevel@tonic-gate 
12867c478bd9Sstevel@tonic-gate static ushort_t *
12877c478bd9Sstevel@tonic-gate mp_map_warm_reset_vector()
12887c478bd9Sstevel@tonic-gate {
12897c478bd9Sstevel@tonic-gate 	ushort_t *warm_reset_vector;
12907c478bd9Sstevel@tonic-gate 
12917c478bd9Sstevel@tonic-gate 	if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
12927c478bd9Sstevel@tonic-gate 	    sizeof (ushort_t *), PROT_READ|PROT_WRITE)))
12937c478bd9Sstevel@tonic-gate 		return (NULL);
12947c478bd9Sstevel@tonic-gate 
12957c478bd9Sstevel@tonic-gate 	/*
12967c478bd9Sstevel@tonic-gate 	 * setup secondary cpu bios boot up vector
12977c478bd9Sstevel@tonic-gate 	 */
12987c478bd9Sstevel@tonic-gate 	*warm_reset_vector = (ushort_t)((caddr_t)
12997c478bd9Sstevel@tonic-gate 		((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va
13007c478bd9Sstevel@tonic-gate 		+ ((ulong_t)rm_platter_va & 0xf));
13017c478bd9Sstevel@tonic-gate 	warm_reset_vector++;
13027c478bd9Sstevel@tonic-gate 	*warm_reset_vector = (ushort_t)(rm_platter_pa >> 4);
13037c478bd9Sstevel@tonic-gate 
13047c478bd9Sstevel@tonic-gate 	--warm_reset_vector;
13057c478bd9Sstevel@tonic-gate 	return (warm_reset_vector);
13067c478bd9Sstevel@tonic-gate }
13077c478bd9Sstevel@tonic-gate 
13087c478bd9Sstevel@tonic-gate static void
13097c478bd9Sstevel@tonic-gate mp_unmap_warm_reset_vector(ushort_t *warm_reset_vector)
13107c478bd9Sstevel@tonic-gate {
13117c478bd9Sstevel@tonic-gate 	psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *));
13127c478bd9Sstevel@tonic-gate }
13137c478bd9Sstevel@tonic-gate 
13147c478bd9Sstevel@tonic-gate void
13157c478bd9Sstevel@tonic-gate mp_cpu_faulted_enter(struct cpu *cp)
13167aec1d6eScindi {
13177aec1d6eScindi 	cmi_faulted_enter(cp);
13187aec1d6eScindi }
13197c478bd9Sstevel@tonic-gate 
13207c478bd9Sstevel@tonic-gate void
13217c478bd9Sstevel@tonic-gate mp_cpu_faulted_exit(struct cpu *cp)
13227aec1d6eScindi {
13237aec1d6eScindi 	cmi_faulted_exit(cp);
13247aec1d6eScindi }
13257c478bd9Sstevel@tonic-gate 
13267c478bd9Sstevel@tonic-gate /*
13277c478bd9Sstevel@tonic-gate  * The following two routines are used as context operators on threads belonging
13287c478bd9Sstevel@tonic-gate  * to processes with a private LDT (see sysi86).  Due to the rarity of such
13297c478bd9Sstevel@tonic-gate  * processes, these routines are currently written for best code readability and
13307c478bd9Sstevel@tonic-gate  * organization rather than speed.  We could avoid checking x86_feature at every
13317c478bd9Sstevel@tonic-gate  * context switch by installing different context ops, depending on the
13327c478bd9Sstevel@tonic-gate  * x86_feature flags, at LDT creation time -- one for each combination of fast
13337c478bd9Sstevel@tonic-gate  * syscall feature flags.
13347c478bd9Sstevel@tonic-gate  */
13357c478bd9Sstevel@tonic-gate 
13367c478bd9Sstevel@tonic-gate /*ARGSUSED*/
13377c478bd9Sstevel@tonic-gate void
13387c478bd9Sstevel@tonic-gate cpu_fast_syscall_disable(void *arg)
13397c478bd9Sstevel@tonic-gate {
13407c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP)
13417c478bd9Sstevel@tonic-gate 		cpu_sep_disable();
13427c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC)
13437c478bd9Sstevel@tonic-gate 		cpu_asysc_disable();
13447c478bd9Sstevel@tonic-gate }
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate /*ARGSUSED*/
13477c478bd9Sstevel@tonic-gate void
13487c478bd9Sstevel@tonic-gate cpu_fast_syscall_enable(void *arg)
13497c478bd9Sstevel@tonic-gate {
13507c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_SEP)
13517c478bd9Sstevel@tonic-gate 		cpu_sep_enable();
13527c478bd9Sstevel@tonic-gate 	if (x86_feature & X86_ASYSC)
13537c478bd9Sstevel@tonic-gate 		cpu_asysc_enable();
13547c478bd9Sstevel@tonic-gate }
13557c478bd9Sstevel@tonic-gate 
13567c478bd9Sstevel@tonic-gate static void
13577c478bd9Sstevel@tonic-gate cpu_sep_enable(void)
13587c478bd9Sstevel@tonic-gate {
13597c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_SEP);
13607c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
13617c478bd9Sstevel@tonic-gate 
13620ac7d7d8Skucharsk 	wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
13637c478bd9Sstevel@tonic-gate }
13647c478bd9Sstevel@tonic-gate 
13657c478bd9Sstevel@tonic-gate static void
13667c478bd9Sstevel@tonic-gate cpu_sep_disable(void)
13677c478bd9Sstevel@tonic-gate {
13687c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_SEP);
13697c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
13707c478bd9Sstevel@tonic-gate 
13717c478bd9Sstevel@tonic-gate 	/*
13727c478bd9Sstevel@tonic-gate 	 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
13737c478bd9Sstevel@tonic-gate 	 * the sysenter or sysexit instruction to trigger a #gp fault.
13747c478bd9Sstevel@tonic-gate 	 */
13750ac7d7d8Skucharsk 	wrmsr(MSR_INTC_SEP_CS, 0ULL);
13767c478bd9Sstevel@tonic-gate }
13777c478bd9Sstevel@tonic-gate 
13787c478bd9Sstevel@tonic-gate static void
13797c478bd9Sstevel@tonic-gate cpu_asysc_enable(void)
13807c478bd9Sstevel@tonic-gate {
13817c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_ASYSC);
13827c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
13837c478bd9Sstevel@tonic-gate 
13840ac7d7d8Skucharsk 	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
13850ac7d7d8Skucharsk 	    (uint64_t)(uintptr_t)AMD_EFER_SCE);
13867c478bd9Sstevel@tonic-gate }
13877c478bd9Sstevel@tonic-gate 
13887c478bd9Sstevel@tonic-gate static void
13897c478bd9Sstevel@tonic-gate cpu_asysc_disable(void)
13907c478bd9Sstevel@tonic-gate {
13917c478bd9Sstevel@tonic-gate 	ASSERT(x86_feature & X86_ASYSC);
13927c478bd9Sstevel@tonic-gate 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
13937c478bd9Sstevel@tonic-gate 
13947c478bd9Sstevel@tonic-gate 	/*
13957c478bd9Sstevel@tonic-gate 	 * Turn off the SCE (syscall enable) bit in the EFER register. Software
13967c478bd9Sstevel@tonic-gate 	 * executing syscall or sysret with this bit off will incur a #ud trap.
13977c478bd9Sstevel@tonic-gate 	 */
13980ac7d7d8Skucharsk 	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
13990ac7d7d8Skucharsk 	    ~((uint64_t)(uintptr_t)AMD_EFER_SCE));
14007c478bd9Sstevel@tonic-gate }
1401