xref: /illumos-gate/usr/src/uts/sun4u/os/mach_startup.c (revision 56f33205)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5c56c1e58Sgirish  * Common Development and Distribution License (the "License").
6c56c1e58Sgirish  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*56f33205SJonathan Adams  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
277c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
287c478bd9Sstevel@tonic-gate #include <sys/vm.h>
297c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
3025cf1a30Sjl #include <sys/cpupart.h>
316890d023SEric Saxe #include <sys/cmt.h>
326890d023SEric Saxe #include <sys/bitset.h>
337c478bd9Sstevel@tonic-gate #include <sys/reboot.h>
347c478bd9Sstevel@tonic-gate #include <sys/kdi.h>
357c478bd9Sstevel@tonic-gate #include <sys/bootconf.h>
367c478bd9Sstevel@tonic-gate #include <sys/memlist_plat.h>
377c478bd9Sstevel@tonic-gate #include <sys/memlist_impl.h>
387c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h>
397c478bd9Sstevel@tonic-gate #include <sys/prom_isa.h>
407c478bd9Sstevel@tonic-gate #include <sys/autoconf.h>
417c478bd9Sstevel@tonic-gate #include <sys/intreg.h>
427c478bd9Sstevel@tonic-gate #include <sys/ivintr.h>
437c478bd9Sstevel@tonic-gate #include <sys/fpu/fpusystm.h>
447c478bd9Sstevel@tonic-gate #include <sys/iommutsb.h>
457c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h>
467c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
477c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h>
487c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
497c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h>
507c478bd9Sstevel@tonic-gate #include <sys/sysconf.h>
517c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
527c478bd9Sstevel@tonic-gate #include <sys/kobj.h>
537c478bd9Sstevel@tonic-gate #include <sys/sun4asi.h>
547c478bd9Sstevel@tonic-gate #include <sys/clconf.h>
557c478bd9Sstevel@tonic-gate #include <sys/platform_module.h>
567c478bd9Sstevel@tonic-gate #include <sys/panic.h>
577c478bd9Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h>
587c478bd9Sstevel@tonic-gate #include <sys/clock.h>
597c478bd9Sstevel@tonic-gate #include <sys/fpras_impl.h>
607c478bd9Sstevel@tonic-gate #include <sys/prom_debug.h>
617c478bd9Sstevel@tonic-gate #include <sys/traptrace.h>
627c478bd9Sstevel@tonic-gate #include <sys/memnode.h>
637c478bd9Sstevel@tonic-gate #include <sys/mem_cage.h>
647c478bd9Sstevel@tonic-gate 
657c478bd9Sstevel@tonic-gate /*
667c478bd9Sstevel@tonic-gate  * fpRAS implementation structures.
677c478bd9Sstevel@tonic-gate  */
687c478bd9Sstevel@tonic-gate struct fpras_chkfn *fpras_chkfnaddrs[FPRAS_NCOPYOPS];
697c478bd9Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps;
707c478bd9Sstevel@tonic-gate struct fpras_chkfngrp *fpras_chkfngrps_base;
717c478bd9Sstevel@tonic-gate int fpras_frequency = -1;
727c478bd9Sstevel@tonic-gate int64_t fpras_interval = -1;
737c478bd9Sstevel@tonic-gate 
746ceee06eSJerry Gilliam /*
756ceee06eSJerry Gilliam  * Increase unix symbol table size as a work around for 6828121
766ceee06eSJerry Gilliam  */
776ceee06eSJerry Gilliam int alloc_mem_bermuda_triangle;
786ceee06eSJerry Gilliam 
7925cf1a30Sjl /*
8025cf1a30Sjl  * Halt idling cpus optimization
8125cf1a30Sjl  *
8225cf1a30Sjl  * This optimation is only enabled in platforms that have
8325cf1a30Sjl  * the CPU halt support. The cpu_halt_cpu() support is provided
8425cf1a30Sjl  * in the cpu module and it is referenced here with a pragma weak.
8525cf1a30Sjl  * The presence of this routine automatically enable the halt idling
8625cf1a30Sjl  * cpus functionality if the global switch enable_halt_idle_cpus
8725cf1a30Sjl  * is set (default is set).
8825cf1a30Sjl  *
8925cf1a30Sjl  */
9025cf1a30Sjl #pragma weak	cpu_halt_cpu
9125cf1a30Sjl extern void	cpu_halt_cpu();
9225cf1a30Sjl 
93c210ded4Sesaxe /*
94c210ded4Sesaxe  * Defines for the idle_state_transition DTrace probe
95c210ded4Sesaxe  *
96c210ded4Sesaxe  * The probe fires when the CPU undergoes an idle state change (e.g. halting)
97c210ded4Sesaxe  * The agument passed is the state to which the CPU is transitioning.
98c210ded4Sesaxe  *
99c210ded4Sesaxe  * The states are defined here.
100c210ded4Sesaxe  */
101c210ded4Sesaxe #define	IDLE_STATE_NORMAL 0
102c210ded4Sesaxe #define	IDLE_STATE_HALTED 1
103c210ded4Sesaxe 
10425cf1a30Sjl int		enable_halt_idle_cpus = 1; /* global switch */
10525cf1a30Sjl 
1067c478bd9Sstevel@tonic-gate void
1077c478bd9Sstevel@tonic-gate setup_trap_table(void)
1087c478bd9Sstevel@tonic-gate {
1097c478bd9Sstevel@tonic-gate 	intr_init(CPU);			/* init interrupt request free list */
1107c478bd9Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
1117c478bd9Sstevel@tonic-gate 	prom_set_traptable(&trap_table);
1127c478bd9Sstevel@tonic-gate }
1137c478bd9Sstevel@tonic-gate 
1147c478bd9Sstevel@tonic-gate void
1157c478bd9Sstevel@tonic-gate mach_fpras()
1167c478bd9Sstevel@tonic-gate {
1177c478bd9Sstevel@tonic-gate 	if (fpras_implemented && !fpras_disable) {
1187c478bd9Sstevel@tonic-gate 		int i;
1197c478bd9Sstevel@tonic-gate 		struct fpras_chkfngrp *fcgp;
1207c478bd9Sstevel@tonic-gate 		size_t chkfngrpsallocsz;
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate 		/*
1237c478bd9Sstevel@tonic-gate 		 * Note that we size off of NCPU and setup for
1247c478bd9Sstevel@tonic-gate 		 * all those possibilities regardless of whether
1257c478bd9Sstevel@tonic-gate 		 * the cpu id is present or not.  We do this so that
1267c478bd9Sstevel@tonic-gate 		 * we don't have any construction or destruction
1277c478bd9Sstevel@tonic-gate 		 * activity to perform at DR time, and it's not
1287c478bd9Sstevel@tonic-gate 		 * costly in memory.  We require block alignment.
1297c478bd9Sstevel@tonic-gate 		 */
1307c478bd9Sstevel@tonic-gate 		chkfngrpsallocsz = NCPU * sizeof (struct fpras_chkfngrp);
1317c478bd9Sstevel@tonic-gate 		fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz, KM_SLEEP);
1327c478bd9Sstevel@tonic-gate 		if (IS_P2ALIGNED((uintptr_t)fpras_chkfngrps_base, 64)) {
1337c478bd9Sstevel@tonic-gate 			fpras_chkfngrps = fpras_chkfngrps_base;
1347c478bd9Sstevel@tonic-gate 		} else {
1357c478bd9Sstevel@tonic-gate 			kmem_free(fpras_chkfngrps_base, chkfngrpsallocsz);
1367c478bd9Sstevel@tonic-gate 			chkfngrpsallocsz += 64;
1377c478bd9Sstevel@tonic-gate 			fpras_chkfngrps_base = kmem_alloc(chkfngrpsallocsz,
1387c478bd9Sstevel@tonic-gate 			    KM_SLEEP);
1397c478bd9Sstevel@tonic-gate 			fpras_chkfngrps = (struct fpras_chkfngrp *)
1407c478bd9Sstevel@tonic-gate 			    P2ROUNDUP((uintptr_t)fpras_chkfngrps_base, 64);
1417c478bd9Sstevel@tonic-gate 		}
1427c478bd9Sstevel@tonic-gate 
1437c478bd9Sstevel@tonic-gate 		/*
1447c478bd9Sstevel@tonic-gate 		 * Copy our check function into place for each copy operation
1457c478bd9Sstevel@tonic-gate 		 * and each cpu id.
1467c478bd9Sstevel@tonic-gate 		 */
1477c478bd9Sstevel@tonic-gate 		fcgp = &fpras_chkfngrps[0];
1487c478bd9Sstevel@tonic-gate 		for (i = 0; i < FPRAS_NCOPYOPS; ++i)
1497c478bd9Sstevel@tonic-gate 			bcopy((void *)fpras_chkfn_type1, &fcgp->fpras_fn[i],
1507c478bd9Sstevel@tonic-gate 			    sizeof (struct fpras_chkfn));
1517c478bd9Sstevel@tonic-gate 		for (i = 1; i < NCPU; ++i)
1527c478bd9Sstevel@tonic-gate 			*(&fpras_chkfngrps[i]) = *fcgp;
1537c478bd9Sstevel@tonic-gate 
1547c478bd9Sstevel@tonic-gate 		/*
1557c478bd9Sstevel@tonic-gate 		 * At definition fpras_frequency is set to -1, and it will
1567c478bd9Sstevel@tonic-gate 		 * still have that value unless changed in /etc/system (not
1577c478bd9Sstevel@tonic-gate 		 * strictly supported, but not preventable).  The following
1587c478bd9Sstevel@tonic-gate 		 * both sets the default and sanity checks anything from
1597c478bd9Sstevel@tonic-gate 		 * /etc/system.
1607c478bd9Sstevel@tonic-gate 		 */
1617c478bd9Sstevel@tonic-gate 		if (fpras_frequency < 0)
1627c478bd9Sstevel@tonic-gate 			fpras_frequency = FPRAS_DEFAULT_FREQUENCY;
1637c478bd9Sstevel@tonic-gate 
1647c478bd9Sstevel@tonic-gate 		/*
1657c478bd9Sstevel@tonic-gate 		 * Now calculate fpras_interval.  When fpras_interval
1667c478bd9Sstevel@tonic-gate 		 * becomes non-negative fpras checks will commence
1677c478bd9Sstevel@tonic-gate 		 * (copies before this point in boot will bypass fpras).
1687c478bd9Sstevel@tonic-gate 		 * Our stores of instructions must be visible; no need
1697c478bd9Sstevel@tonic-gate 		 * to flush as they're never been executed before.
1707c478bd9Sstevel@tonic-gate 		 */
1717c478bd9Sstevel@tonic-gate 		membar_producer();
1727c478bd9Sstevel@tonic-gate 		fpras_interval = (fpras_frequency == 0) ?
1737c478bd9Sstevel@tonic-gate 		    0 : sys_tick_freq / fpras_frequency;
1747c478bd9Sstevel@tonic-gate 	}
1757c478bd9Sstevel@tonic-gate }
1767c478bd9Sstevel@tonic-gate 
1777c478bd9Sstevel@tonic-gate void
1787c478bd9Sstevel@tonic-gate mach_hw_copy_limit(void)
1797c478bd9Sstevel@tonic-gate {
1807c478bd9Sstevel@tonic-gate 	if (!fpu_exists) {
1817c478bd9Sstevel@tonic-gate 		use_hw_bcopy = 0;
1827c478bd9Sstevel@tonic-gate 		hw_copy_limit_1 = 0;
1837c478bd9Sstevel@tonic-gate 		hw_copy_limit_2 = 0;
1847c478bd9Sstevel@tonic-gate 		hw_copy_limit_4 = 0;
1857c478bd9Sstevel@tonic-gate 		hw_copy_limit_8 = 0;
1867c478bd9Sstevel@tonic-gate 		use_hw_bzero = 0;
1877c478bd9Sstevel@tonic-gate 	}
1887c478bd9Sstevel@tonic-gate }
1897c478bd9Sstevel@tonic-gate 
1907c478bd9Sstevel@tonic-gate void
1917c478bd9Sstevel@tonic-gate load_tod_module()
1927c478bd9Sstevel@tonic-gate {
1937c478bd9Sstevel@tonic-gate 	/*
1947c478bd9Sstevel@tonic-gate 	 * Load tod driver module for the tod part found on this system.
1957c478bd9Sstevel@tonic-gate 	 * Recompute the cpu frequency/delays based on tod as tod part
1967c478bd9Sstevel@tonic-gate 	 * tends to keep time more accurately.
1977c478bd9Sstevel@tonic-gate 	 */
1987c478bd9Sstevel@tonic-gate 	if (tod_module_name == NULL || modload("tod", tod_module_name) == -1)
1997c478bd9Sstevel@tonic-gate 		halt("Can't load tod module");
2007c478bd9Sstevel@tonic-gate }
2017c478bd9Sstevel@tonic-gate 
2027c478bd9Sstevel@tonic-gate void
2037c478bd9Sstevel@tonic-gate mach_memscrub(void)
2047c478bd9Sstevel@tonic-gate {
2057c478bd9Sstevel@tonic-gate 	/*
2067c478bd9Sstevel@tonic-gate 	 * Startup memory scrubber, if not running fpu emulation code.
2077c478bd9Sstevel@tonic-gate 	 */
2087c478bd9Sstevel@tonic-gate 
20925cf1a30Sjl #ifndef _HW_MEMSCRUB_SUPPORT
2107c478bd9Sstevel@tonic-gate 	if (fpu_exists) {
2117c478bd9Sstevel@tonic-gate 		if (memscrub_init()) {
2127c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN,
2137c478bd9Sstevel@tonic-gate 			    "Memory scrubber failed to initialize");
2147c478bd9Sstevel@tonic-gate 		}
2157c478bd9Sstevel@tonic-gate 	}
21625cf1a30Sjl #endif /* _HW_MEMSCRUB_SUPPORT */
21725cf1a30Sjl }
21825cf1a30Sjl 
21925cf1a30Sjl /*
2206b2c23f3SDave Plauger  * Halt the present CPU until awoken via an interrupt.
22125cf1a30Sjl  * This routine should only be invoked if cpu_halt_cpu()
22225cf1a30Sjl  * exists and is supported, see mach_cpu_halt_idle()
22325cf1a30Sjl  */
2246b2c23f3SDave Plauger void
22525cf1a30Sjl cpu_halt(void)
22625cf1a30Sjl {
2276b2c23f3SDave Plauger 	cpu_t *cpup = CPU;
2286b2c23f3SDave Plauger 	processorid_t cpu_sid = cpup->cpu_seqid;
2296b2c23f3SDave Plauger 	cpupart_t *cp = cpup->cpu_part;
2306b2c23f3SDave Plauger 	int hset_update = 1;
2316b2c23f3SDave Plauger 	volatile int *p = &cpup->cpu_disp->disp_nrunnable;
2326b2c23f3SDave Plauger 	uint_t s;
23325cf1a30Sjl 
23425cf1a30Sjl 	/*
2356b2c23f3SDave Plauger 	 * If this CPU is online then we should notate our halting
23625cf1a30Sjl 	 * by adding ourselves to the partition's halted CPU
2376890d023SEric Saxe 	 * bitset. This allows other CPUs to find/awaken us when
23825cf1a30Sjl 	 * work becomes available.
23925cf1a30Sjl 	 */
2406b2c23f3SDave Plauger 	if (CPU->cpu_flags & CPU_OFFLINE)
24125cf1a30Sjl 		hset_update = 0;
24225cf1a30Sjl 
24325cf1a30Sjl 	/*
2446b2c23f3SDave Plauger 	 * Add ourselves to the partition's halted CPUs bitset
24525cf1a30Sjl 	 * and set our HALTED flag, if necessary.
24625cf1a30Sjl 	 *
24725cf1a30Sjl 	 * When a thread becomes runnable, it is placed on the queue
2486890d023SEric Saxe 	 * and then the halted cpu bitset is checked to determine who
24925cf1a30Sjl 	 * (if anyone) should be awoken. We therefore need to first
2506b2c23f3SDave Plauger 	 * add ourselves to the halted bitset, and then check if there
2516b2c23f3SDave Plauger 	 * is any work available.  The order is important to prevent a race
2526890d023SEric Saxe 	 * that can lead to work languishing on a run queue somewhere while
2536890d023SEric Saxe 	 * this CPU remains halted.
2546890d023SEric Saxe 	 *
2556890d023SEric Saxe 	 * Either the producing CPU will see we're halted and will awaken us,
2566890d023SEric Saxe 	 * or this CPU will see the work available in disp_anywork()
25725cf1a30Sjl 	 */
25825cf1a30Sjl 	if (hset_update) {
25925cf1a30Sjl 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
26025cf1a30Sjl 		membar_producer();
2616890d023SEric Saxe 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
26225cf1a30Sjl 	}
26325cf1a30Sjl 
26425cf1a30Sjl 	/*
26525cf1a30Sjl 	 * Check to make sure there's really nothing to do.
26625cf1a30Sjl 	 * Work destined for this CPU may become available after
26725cf1a30Sjl 	 * this check. We'll be notified through the clearing of our
2686890d023SEric Saxe 	 * bit in the halted CPU bitset, and a poke.
26925cf1a30Sjl 	 */
27025cf1a30Sjl 	if (disp_anywork()) {
27125cf1a30Sjl 		if (hset_update) {
27225cf1a30Sjl 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
2736890d023SEric Saxe 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
27425cf1a30Sjl 		}
27525cf1a30Sjl 		return;
27625cf1a30Sjl 	}
27725cf1a30Sjl 
27825cf1a30Sjl 	/*
2796b2c23f3SDave Plauger 	 * We're on our way to being halted.  Wait until something becomes
2806b2c23f3SDave Plauger 	 * runnable locally or we are awaken (i.e. removed from the halt set).
2816b2c23f3SDave Plauger 	 * Note that the call to hv_cpu_yield() can return even if we have
2826b2c23f3SDave Plauger 	 * nothing to do.
28325cf1a30Sjl 	 *
28425cf1a30Sjl 	 * Disable interrupts now, so that we'll awaken immediately
28525cf1a30Sjl 	 * after halting if someone tries to poke us between now and
28625cf1a30Sjl 	 * the time we actually halt.
28725cf1a30Sjl 	 *
28825cf1a30Sjl 	 * We check for the presence of our bit after disabling interrupts.
28925cf1a30Sjl 	 * If it's cleared, we'll return. If the bit is cleared after
29025cf1a30Sjl 	 * we check then the poke will pop us out of the halted state.
2916b2c23f3SDave Plauger 	 * Also, if the offlined CPU has been brought back on-line, then
2926b2c23f3SDave Plauger 	 * we return as well.
29325cf1a30Sjl 	 *
29425cf1a30Sjl 	 * The ordering of the poke and the clearing of the bit by cpu_wakeup
29525cf1a30Sjl 	 * is important.
29625cf1a30Sjl 	 * cpu_wakeup() must clear, then poke.
29725cf1a30Sjl 	 * cpu_halt() must disable interrupts, then check for the bit.
2986b2c23f3SDave Plauger 	 *
29925cf1a30Sjl 	 * The check for anything locally runnable is here for performance
30025cf1a30Sjl 	 * and isn't needed for correctness. disp_nrunnable ought to be
30125cf1a30Sjl 	 * in our cache still, so it's inexpensive to check, and if there
30225cf1a30Sjl 	 * is anything runnable we won't have to wait for the poke.
3036b2c23f3SDave Plauger 	 *
3046b2c23f3SDave Plauger 	 * Any interrupt will awaken the cpu from halt. Looping here
3056b2c23f3SDave Plauger 	 * will filter spurious interrupts that wake us up, but don't
3066b2c23f3SDave Plauger 	 * represent a need for us to head back out to idle().  This
3076b2c23f3SDave Plauger 	 * will enable the idle loop to be more efficient and sleep in
3086b2c23f3SDave Plauger 	 * the processor pipeline for a larger percent of the time,
3096b2c23f3SDave Plauger 	 * which returns useful cycles to the peer hardware strand
3106b2c23f3SDave Plauger 	 * that shares the pipeline.
31125cf1a30Sjl 	 */
3126b2c23f3SDave Plauger 	s = disable_vec_intr();
3136b2c23f3SDave Plauger 	while (*p == 0 &&
3146b2c23f3SDave Plauger 	    ((hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid)) ||
3156b2c23f3SDave Plauger 	    (!hset_update && (CPU->cpu_flags & CPU_OFFLINE)))) {
31625cf1a30Sjl 
317c210ded4Sesaxe 		DTRACE_PROBE1(idle__state__transition,
318c210ded4Sesaxe 		    uint_t, IDLE_STATE_HALTED);
3196b2c23f3SDave Plauger 		(void) cpu_halt_cpu();
320c210ded4Sesaxe 		DTRACE_PROBE1(idle__state__transition,
321c210ded4Sesaxe 		    uint_t, IDLE_STATE_NORMAL);
3226b2c23f3SDave Plauger 
3236b2c23f3SDave Plauger 		enable_vec_intr(s);
3246b2c23f3SDave Plauger 		s = disable_vec_intr();
325c210ded4Sesaxe 	}
326c210ded4Sesaxe 
32725cf1a30Sjl 	/*
32825cf1a30Sjl 	 * We're no longer halted
32925cf1a30Sjl 	 */
3306b2c23f3SDave Plauger 	enable_vec_intr(s);
33125cf1a30Sjl 	if (hset_update) {
33225cf1a30Sjl 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
3336890d023SEric Saxe 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
33425cf1a30Sjl 	}
33525cf1a30Sjl }
33625cf1a30Sjl 
33725cf1a30Sjl /*
33825cf1a30Sjl  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
33925cf1a30Sjl  * Otherwise, see if other CPUs in the cpu partition are halted and need to
34025cf1a30Sjl  * be woken up so that they can steal the thread we placed on this CPU.
34125cf1a30Sjl  * This function is only used on MP systems.
34225cf1a30Sjl  * This function should only be invoked if cpu_halt_cpu()
34325cf1a30Sjl  * exists and is supported, see mach_cpu_halt_idle()
34425cf1a30Sjl  */
34525cf1a30Sjl static void
34625cf1a30Sjl cpu_wakeup(cpu_t *cpu, int bound)
34725cf1a30Sjl {
34825cf1a30Sjl 	uint_t		cpu_found;
3496890d023SEric Saxe 	processorid_t	cpu_sid;
35025cf1a30Sjl 	cpupart_t	*cp;
35125cf1a30Sjl 
35225cf1a30Sjl 	cp = cpu->cpu_part;
3536890d023SEric Saxe 	cpu_sid = cpu->cpu_seqid;
3546890d023SEric Saxe 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
35525cf1a30Sjl 		/*
35625cf1a30Sjl 		 * Clear the halted bit for that CPU since it will be
35725cf1a30Sjl 		 * poked in a moment.
35825cf1a30Sjl 		 */
3596890d023SEric Saxe 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
36025cf1a30Sjl 		/*
3616890d023SEric Saxe 		 * We may find the current CPU present in the halted cpu bitset
36225cf1a30Sjl 		 * if we're in the context of an interrupt that occurred
36325cf1a30Sjl 		 * before we had a chance to clear our bit in cpu_halt().
36425cf1a30Sjl 		 * Poking ourself is obviously unnecessary, since if
36525cf1a30Sjl 		 * we're here, we're not halted.
36625cf1a30Sjl 		 */
36725cf1a30Sjl 		if (cpu != CPU)
36825cf1a30Sjl 			poke_cpu(cpu->cpu_id);
36925cf1a30Sjl 		return;
37025cf1a30Sjl 	} else {
37125cf1a30Sjl 		/*
37225cf1a30Sjl 		 * This cpu isn't halted, but it's idle or undergoing a
37325cf1a30Sjl 		 * context switch. No need to awaken anyone else.
37425cf1a30Sjl 		 */
37525cf1a30Sjl 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
37625cf1a30Sjl 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
37725cf1a30Sjl 			return;
37825cf1a30Sjl 	}
37925cf1a30Sjl 
38025cf1a30Sjl 	/*
3816890d023SEric Saxe 	 * No need to wake up other CPUs if this is for a bound thread.
38225cf1a30Sjl 	 */
38325cf1a30Sjl 	if (bound)
38425cf1a30Sjl 		return;
38525cf1a30Sjl 
38625cf1a30Sjl 	/*
3876890d023SEric Saxe 	 * The CPU specified for wakeup isn't currently halted, so check
3886890d023SEric Saxe 	 * to see if there are any other halted CPUs in the partition,
3896890d023SEric Saxe 	 * and if there are then awaken one.
3906890d023SEric Saxe 	 *
3916890d023SEric Saxe 	 * If possible, try to select a CPU close to the target, since this
3926890d023SEric Saxe 	 * will likely trigger a migration.
39325cf1a30Sjl 	 */
39425cf1a30Sjl 	do {
3956890d023SEric Saxe 		cpu_found = bitset_find(&cp->cp_haltset);
3966890d023SEric Saxe 		if (cpu_found == (uint_t)-1)
39725cf1a30Sjl 			return;
3986890d023SEric Saxe 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
39925cf1a30Sjl 
4006890d023SEric Saxe 	if (cpu_found != CPU->cpu_seqid)
4016890d023SEric Saxe 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
4027c478bd9Sstevel@tonic-gate }
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate void
4050e751525SEric Saxe mach_cpu_halt_idle(void)
4067c478bd9Sstevel@tonic-gate {
40725cf1a30Sjl 	if (enable_halt_idle_cpus) {
40825cf1a30Sjl 		if (&cpu_halt_cpu) {
40925cf1a30Sjl 			idle_cpu = cpu_halt;
41025cf1a30Sjl 			disp_enq_thread = cpu_wakeup;
41125cf1a30Sjl 		}
41225cf1a30Sjl 	}
4137c478bd9Sstevel@tonic-gate }
4147c478bd9Sstevel@tonic-gate 
4157c478bd9Sstevel@tonic-gate /*ARGSUSED*/
416982b9107Sjb int
4177c478bd9Sstevel@tonic-gate cpu_intrq_setup(struct cpu *cp)
4187c478bd9Sstevel@tonic-gate {
4197c478bd9Sstevel@tonic-gate 	/* Interrupt mondo queues not applicable to sun4u */
420982b9107Sjb 	return (0);
4217c478bd9Sstevel@tonic-gate }
4227c478bd9Sstevel@tonic-gate 
4231ae08745Sheppo /*ARGSUSED*/
4241ae08745Sheppo void
4251ae08745Sheppo cpu_intrq_cleanup(struct cpu *cp)
4261ae08745Sheppo {
4271ae08745Sheppo 	/* Interrupt mondo queues not applicable to sun4u */
4281ae08745Sheppo }
4291ae08745Sheppo 
4307c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4317c478bd9Sstevel@tonic-gate void
4327c478bd9Sstevel@tonic-gate cpu_intrq_register(struct cpu *cp)
4337c478bd9Sstevel@tonic-gate {
4347c478bd9Sstevel@tonic-gate 	/* Interrupt/error queues not applicable to sun4u */
4357c478bd9Sstevel@tonic-gate }
4367c478bd9Sstevel@tonic-gate 
4377c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4387c478bd9Sstevel@tonic-gate void
439db6d2ee3Ssvemuri mach_htraptrace_setup(int cpuid)
4407c478bd9Sstevel@tonic-gate {
4417c478bd9Sstevel@tonic-gate 	/* Setup hypervisor traptrace buffer, not applicable to sun4u */
4427c478bd9Sstevel@tonic-gate }
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4457c478bd9Sstevel@tonic-gate void
446db6d2ee3Ssvemuri mach_htraptrace_configure(int cpuid)
4477c478bd9Sstevel@tonic-gate {
448db6d2ee3Ssvemuri 	/* enable/ disable hypervisor traptracing, not applicable to sun4u */
449db6d2ee3Ssvemuri }
450db6d2ee3Ssvemuri 
451db6d2ee3Ssvemuri /*ARGSUSED*/
452db6d2ee3Ssvemuri void
453db6d2ee3Ssvemuri mach_htraptrace_cleanup(int cpuid)
454db6d2ee3Ssvemuri {
455db6d2ee3Ssvemuri 	/* cleanup hypervisor traptrace buffer, not applicable to sun4u */
4567c478bd9Sstevel@tonic-gate }
4577c478bd9Sstevel@tonic-gate 
4581ae08745Sheppo void
4591ae08745Sheppo mach_descrip_startup_init(void)
4601ae08745Sheppo {
4611ae08745Sheppo 	/*
4621ae08745Sheppo 	 * Only for sun4v.
4631ae08745Sheppo 	 * Initialize Machine description framework during startup.
4641ae08745Sheppo 	 */
4651ae08745Sheppo }
4661ae08745Sheppo void
4671ae08745Sheppo mach_descrip_startup_fini(void)
4681ae08745Sheppo {
4691ae08745Sheppo 	/*
4701ae08745Sheppo 	 * Only for sun4v.
4711ae08745Sheppo 	 * Clean up Machine Description framework during startup.
4721ae08745Sheppo 	 */
4731ae08745Sheppo }
4741ae08745Sheppo 
4757c478bd9Sstevel@tonic-gate void
4767c478bd9Sstevel@tonic-gate mach_descrip_init(void)
4777c478bd9Sstevel@tonic-gate {
4781ae08745Sheppo 	/*
4791ae08745Sheppo 	 * Only for sun4v.
4801ae08745Sheppo 	 * Initialize Machine description framework.
4811ae08745Sheppo 	 */
4827c478bd9Sstevel@tonic-gate }
4837c478bd9Sstevel@tonic-gate 
484c56c1e58Sgirish void
485c56c1e58Sgirish hsvc_setup(void)
486c56c1e58Sgirish {
487c56c1e58Sgirish 	/* Setup hypervisor services, not applicable to sun4u */
488c56c1e58Sgirish }
489c56c1e58Sgirish 
4901ae08745Sheppo void
4911ae08745Sheppo load_mach_drivers(void)
4921ae08745Sheppo {
4931ae08745Sheppo 	/* Currently no machine class (sun4u) specific drivers to load */
4941ae08745Sheppo }
4951ae08745Sheppo 
4967c478bd9Sstevel@tonic-gate /*
4977c478bd9Sstevel@tonic-gate  * Return true if the machine we're running on is a Positron.
4987c478bd9Sstevel@tonic-gate  * (Positron is an unsupported developers platform.)
4997c478bd9Sstevel@tonic-gate  */
5007c478bd9Sstevel@tonic-gate int
5017c478bd9Sstevel@tonic-gate iam_positron(void)
5027c478bd9Sstevel@tonic-gate {
5037c478bd9Sstevel@tonic-gate 	char model[32];
5047c478bd9Sstevel@tonic-gate 	const char proto_model[] = "SUNW,501-2732";
505fa9e4066Sahrens 	pnode_t root = prom_rootnode();
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate 	if (prom_getproplen(root, "model") != sizeof (proto_model))
5087c478bd9Sstevel@tonic-gate 		return (0);
5097c478bd9Sstevel@tonic-gate 
5107c478bd9Sstevel@tonic-gate 	(void) prom_getprop(root, "model", model);
5117c478bd9Sstevel@tonic-gate 	if (strcmp(model, proto_model) == 0)
5127c478bd9Sstevel@tonic-gate 		return (1);
5137c478bd9Sstevel@tonic-gate 	return (0);
5147c478bd9Sstevel@tonic-gate }
5157c478bd9Sstevel@tonic-gate 
5167c478bd9Sstevel@tonic-gate /*
5177c478bd9Sstevel@tonic-gate  * Find a physically contiguous area of twice the largest ecache size
5187c478bd9Sstevel@tonic-gate  * to be used while doing displacement flush of ecaches.
5197c478bd9Sstevel@tonic-gate  */
5207c478bd9Sstevel@tonic-gate uint64_t
5217c478bd9Sstevel@tonic-gate ecache_flush_address(void)
5227c478bd9Sstevel@tonic-gate {
5237c478bd9Sstevel@tonic-gate 	struct memlist *pmem;
5247c478bd9Sstevel@tonic-gate 	uint64_t flush_size;
5257c478bd9Sstevel@tonic-gate 	uint64_t ret_val;
5267c478bd9Sstevel@tonic-gate 
5277c478bd9Sstevel@tonic-gate 	flush_size = ecache_size * 2;
528*56f33205SJonathan Adams 	for (pmem = phys_install; pmem; pmem = pmem->ml_next) {
529*56f33205SJonathan Adams 		ret_val = P2ROUNDUP(pmem->ml_address, ecache_size);
530*56f33205SJonathan Adams 		if (ret_val + flush_size <= pmem->ml_address + pmem->ml_size)
5317c478bd9Sstevel@tonic-gate 			return (ret_val);
5327c478bd9Sstevel@tonic-gate 	}
5337c478bd9Sstevel@tonic-gate 	return ((uint64_t)-1);
5347c478bd9Sstevel@tonic-gate }
5357c478bd9Sstevel@tonic-gate 
5367c478bd9Sstevel@tonic-gate /*
5377c478bd9Sstevel@tonic-gate  * Called with the memlist lock held to say that phys_install has
5387c478bd9Sstevel@tonic-gate  * changed.
5397c478bd9Sstevel@tonic-gate  */
5407c478bd9Sstevel@tonic-gate void
5417c478bd9Sstevel@tonic-gate phys_install_has_changed(void)
5427c478bd9Sstevel@tonic-gate {
5437c478bd9Sstevel@tonic-gate 	/*
5447c478bd9Sstevel@tonic-gate 	 * Get the new address into a temporary just in case panicking
5457c478bd9Sstevel@tonic-gate 	 * involves use of ecache_flushaddr.
5467c478bd9Sstevel@tonic-gate 	 */
5477c478bd9Sstevel@tonic-gate 	uint64_t new_addr;
5487c478bd9Sstevel@tonic-gate 
5497c478bd9Sstevel@tonic-gate 	new_addr = ecache_flush_address();
5507c478bd9Sstevel@tonic-gate 	if (new_addr == (uint64_t)-1) {
5517c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC,
5527c478bd9Sstevel@tonic-gate 		    "ecache_flush_address(): failed, ecache_size=%x",
5537c478bd9Sstevel@tonic-gate 		    ecache_size);
5547c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
5557c478bd9Sstevel@tonic-gate 	}
5567c478bd9Sstevel@tonic-gate 	ecache_flushaddr = new_addr;
5577c478bd9Sstevel@tonic-gate 	membar_producer();
5587c478bd9Sstevel@tonic-gate }
559