17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51ae08745Sheppo * Common Development and Distribution License (the "License").
61ae08745Sheppo * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
211ae08745Sheppo
227c478bd9Sstevel@tonic-gate /*
231ae08745Sheppo * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate */
267c478bd9Sstevel@tonic-gate
277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
287c478bd9Sstevel@tonic-gate
291ae08745Sheppo #include <sys/cpuvar.h>
307c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>
311ae08745Sheppo #include <sys/machsystm.h>
321ae08745Sheppo #include <sys/archsystm.h>
331ae08745Sheppo #include <sys/prom_plat.h>
341ae08745Sheppo #include <sys/hypervisor_api.h>
351ae08745Sheppo #include <sys/hsvc.h>
361ae08745Sheppo
371ae08745Sheppo extern uint64_t xc_tick_limit;
381ae08745Sheppo extern uint64_t xc_tick_jump_limit;
391ae08745Sheppo
401ae08745Sheppo extern void cpu_intrq_unregister_powerdown(uint64_t doneflag_va);
417c478bd9Sstevel@tonic-gate
427c478bd9Sstevel@tonic-gate /*
437c478bd9Sstevel@tonic-gate * set_idle_cpu is called from idle() when a CPU becomes idle.
447c478bd9Sstevel@tonic-gate */
457c478bd9Sstevel@tonic-gate /*ARGSUSED*/
467c478bd9Sstevel@tonic-gate void
set_idle_cpu(int cpun)477c478bd9Sstevel@tonic-gate set_idle_cpu(int cpun)
487c478bd9Sstevel@tonic-gate {
497c478bd9Sstevel@tonic-gate }
507c478bd9Sstevel@tonic-gate
517c478bd9Sstevel@tonic-gate /*
527c478bd9Sstevel@tonic-gate * unset_idle_cpu is called from idle() when a CPU is no longer idle.
537c478bd9Sstevel@tonic-gate */
547c478bd9Sstevel@tonic-gate /*ARGSUSED*/
557c478bd9Sstevel@tonic-gate void
unset_idle_cpu(int cpun)567c478bd9Sstevel@tonic-gate unset_idle_cpu(int cpun)
577c478bd9Sstevel@tonic-gate {
587c478bd9Sstevel@tonic-gate }
591ae08745Sheppo
601ae08745Sheppo /*
611ae08745Sheppo * Stop a CPU based on its cpuid, using the cpu_stop hypervisor call.
621ae08745Sheppo * Since this requires that the hypervisor force a remote CPU to stop,
631ae08745Sheppo * the assumption is made that this should take roughly the same amount
64*f273041fSjm * of time as a executing a cross-call. Consequently, the xcall
65*f273041fSjm * timeout is used to determine when to give up waiting for the CPU to
66*f273041fSjm * stop.
671ae08745Sheppo *
681ae08745Sheppo * Attempts to stop a CPU already in the stopped or error state will
691ae08745Sheppo * silently succeed. Zero is returned on success and a non-negative
701ae08745Sheppo * errno value is returned on failure.
711ae08745Sheppo */
721ae08745Sheppo int
stopcpu_bycpuid(int cpuid)731ae08745Sheppo stopcpu_bycpuid(int cpuid)
741ae08745Sheppo {
751ae08745Sheppo uint64_t loop_cnt;
761ae08745Sheppo uint64_t state;
771ae08745Sheppo uint64_t rv;
781ae08745Sheppo uint64_t major = 0;
791ae08745Sheppo uint64_t minor = 0;
801ae08745Sheppo uint64_t cpu_stop_time_limit;
81*f273041fSjm extern uint64_t xc_func_time_limit;
821ae08745Sheppo
831ae08745Sheppo ASSERT(MUTEX_HELD(&cpu_lock));
841ae08745Sheppo
851ae08745Sheppo /*
861ae08745Sheppo * Check the state of the CPU up front to see if an
871ae08745Sheppo * attempt to stop it is even necessary.
881ae08745Sheppo */
891ae08745Sheppo if (hv_cpu_state(cpuid, &state) != H_EOK)
901ae08745Sheppo return (EINVAL);
911ae08745Sheppo
921ae08745Sheppo /* treat stopped and error state the same */
931ae08745Sheppo if (state != CPU_STATE_RUNNING) {
941ae08745Sheppo /* nothing to do */
951ae08745Sheppo return (0);
961ae08745Sheppo }
971ae08745Sheppo
981ae08745Sheppo /*
991ae08745Sheppo * The HV API to stop a CPU is only supported in
1001ae08745Sheppo * version 1.1 and later of the core group. If an
1011ae08745Sheppo * older version of the HV is in use, return not
1021ae08745Sheppo * supported.
1031ae08745Sheppo */
1041ae08745Sheppo if (hsvc_version(HSVC_GROUP_CORE, &major, &minor) != 0)
1051ae08745Sheppo return (EINVAL);
1061ae08745Sheppo
1071ae08745Sheppo ASSERT(major != 0);
1081ae08745Sheppo
1091ae08745Sheppo if ((major == 1) && (minor < 1))
1101ae08745Sheppo return (ENOTSUP);
1111ae08745Sheppo
1121ae08745Sheppo /* use the mondo timeout if it has been initialized */
113*f273041fSjm cpu_stop_time_limit = xc_func_time_limit;
1141ae08745Sheppo
1151ae08745Sheppo /*
1161ae08745Sheppo * If called early in boot before the mondo time limit
1171ae08745Sheppo * is set, use a reasonable timeout based on the the
1181ae08745Sheppo * clock frequency of the current CPU.
1191ae08745Sheppo */
1201ae08745Sheppo if (cpu_stop_time_limit == 0)
1211ae08745Sheppo cpu_stop_time_limit = cpunodes[CPU->cpu_id].clock_freq;
1221ae08745Sheppo
1231ae08745Sheppo /* should only fail if called too early in boot */
1241ae08745Sheppo ASSERT(cpu_stop_time_limit > 0);
1251ae08745Sheppo
1261ae08745Sheppo loop_cnt = 0;
1271ae08745Sheppo
1281ae08745Sheppo /*
1291ae08745Sheppo * Attempt to stop the CPU, retrying if it is busy.
1301ae08745Sheppo */
1311ae08745Sheppo while (loop_cnt++ < cpu_stop_time_limit) {
1321ae08745Sheppo
1331ae08745Sheppo if ((rv = hv_cpu_stop(cpuid)) != H_EWOULDBLOCK)
1341ae08745Sheppo break;
1351ae08745Sheppo }
1361ae08745Sheppo
1371ae08745Sheppo if (loop_cnt == cpu_stop_time_limit)
1381ae08745Sheppo return (ETIMEDOUT);
1391ae08745Sheppo
1401ae08745Sheppo if (rv != H_EOK)
1411ae08745Sheppo return (EINVAL);
1421ae08745Sheppo
1431ae08745Sheppo /*
1441ae08745Sheppo * Verify that the CPU has reached the stopped state.
1451ae08745Sheppo */
1461ae08745Sheppo while (loop_cnt++ < cpu_stop_time_limit) {
1471ae08745Sheppo
1481ae08745Sheppo if (hv_cpu_state(cpuid, &state) != H_EOK)
1491ae08745Sheppo return (EINVAL);
1501ae08745Sheppo
1511ae08745Sheppo /* treat stopped and error state the same */
1521ae08745Sheppo if (state != CPU_STATE_RUNNING)
1531ae08745Sheppo break;
1541ae08745Sheppo }
1551ae08745Sheppo
1561ae08745Sheppo return ((loop_cnt == cpu_stop_time_limit) ? ETIMEDOUT : 0);
1571ae08745Sheppo }
1581ae08745Sheppo
1591ae08745Sheppo /*
1601ae08745Sheppo * X-trap to the target to unregister its interrupt and error queues
1611ae08745Sheppo * and put it in a safe place just before the CPU is stopped. After
1621ae08745Sheppo * unregistering its queues, the target CPU must not return from the
1631ae08745Sheppo * trap to priv or user context. Ensure that the interrupt CPU unregister
1641ae08745Sheppo * succeeded.
1651ae08745Sheppo */
1661ae08745Sheppo void
xt_cpu_unreg_powerdown(struct cpu * cpup)1671ae08745Sheppo xt_cpu_unreg_powerdown(struct cpu *cpup)
1681ae08745Sheppo {
1691ae08745Sheppo uint8_t volatile not_done;
1701ae08745Sheppo uint64_t starttick, endtick, tick, lasttick;
1711ae08745Sheppo processorid_t cpuid = cpup->cpu_id;
1721ae08745Sheppo
1731ae08745Sheppo kpreempt_disable();
1741ae08745Sheppo
1751ae08745Sheppo /*
1761ae08745Sheppo * Sun4v uses a queue for receiving mondos. Successful
1771ae08745Sheppo * transmission of a mondo only indicates that the mondo
1781ae08745Sheppo * has been written into the queue.
1791ae08745Sheppo *
1801ae08745Sheppo * Set the not_done flag to 1 before sending the cross
1811ae08745Sheppo * trap and wait until the other cpu resets it to 0.
1821ae08745Sheppo */
1831ae08745Sheppo
1841ae08745Sheppo not_done = 1;
1851ae08745Sheppo
1861ae08745Sheppo xt_one_unchecked(cpuid, (xcfunc_t *)cpu_intrq_unregister_powerdown,
1871ae08745Sheppo (uint64_t)¬_done, 0);
1881ae08745Sheppo
1891ae08745Sheppo starttick = lasttick = gettick();
1901ae08745Sheppo endtick = starttick + xc_tick_limit;
1911ae08745Sheppo
1921ae08745Sheppo while (not_done) {
1931ae08745Sheppo
1941ae08745Sheppo tick = gettick();
1951ae08745Sheppo
1961ae08745Sheppo /*
1971ae08745Sheppo * If there is a big jump between the current tick
1981ae08745Sheppo * count and lasttick, we have probably hit a break
1991ae08745Sheppo * point. Adjust endtick accordingly to avoid panic.
2001ae08745Sheppo */
2011ae08745Sheppo if (tick > (lasttick + xc_tick_jump_limit)) {
2021ae08745Sheppo endtick += (tick - lasttick);
2031ae08745Sheppo }
2041ae08745Sheppo
2051ae08745Sheppo lasttick = tick;
2061ae08745Sheppo if (tick > endtick) {
2071ae08745Sheppo cmn_err(CE_CONT, "Cross trap timeout at cpu id %x\n",
2081ae08745Sheppo cpuid);
2091ae08745Sheppo cmn_err(CE_WARN, "xt_intrq_unreg_powerdown: timeout");
2101ae08745Sheppo }
2111ae08745Sheppo }
2121ae08745Sheppo
2131ae08745Sheppo kpreempt_enable();
2141ae08745Sheppo }
2151ae08745Sheppo
2161ae08745Sheppo int
plat_cpu_poweroff(struct cpu * cp)2171ae08745Sheppo plat_cpu_poweroff(struct cpu *cp)
2181ae08745Sheppo {
2191ae08745Sheppo int rv = 0;
2201ae08745Sheppo int status;
2211ae08745Sheppo processorid_t cpuid = cp->cpu_id;
2221ae08745Sheppo
2231ae08745Sheppo ASSERT(MUTEX_HELD(&cpu_lock));
2241ae08745Sheppo
2251ae08745Sheppo /*
2261ae08745Sheppo * Capture all CPUs (except for detaching proc) to prevent
2271ae08745Sheppo * crosscalls to the detaching proc until it has cleared its
2281ae08745Sheppo * bit in cpu_ready_set.
2291ae08745Sheppo *
2301ae08745Sheppo * The CPU's remain paused and the prom_mutex is known to be free.
2311ae08745Sheppo * This prevents the x-trap victim from blocking when doing prom
2321ae08745Sheppo * IEEE-1275 calls at a high PIL level.
2331ae08745Sheppo */
2341ae08745Sheppo promsafe_pause_cpus();
2351ae08745Sheppo
2361ae08745Sheppo /*
2371ae08745Sheppo * Quiesce interrupts on the target CPU. We do this by setting
2381ae08745Sheppo * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set)
2391ae08745Sheppo * to prevent it from receiving cross calls and cross traps. This
2401ae08745Sheppo * prevents the processor from receiving any new soft interrupts.
2411ae08745Sheppo */
2421ae08745Sheppo mp_cpu_quiesce(cp);
2431ae08745Sheppo
2441ae08745Sheppo /*
2451ae08745Sheppo * Send a cross trap to the cpu to unregister its interrupt
2461ae08745Sheppo * error queues.
2471ae08745Sheppo */
2481ae08745Sheppo xt_cpu_unreg_powerdown(cp);
2491ae08745Sheppo
2501ae08745Sheppo cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
2511ae08745Sheppo
2521ae08745Sheppo /* call into the Hypervisor to stop the CPU */
2531ae08745Sheppo if ((status = stopcpu_bycpuid(cpuid)) != 0) {
2541ae08745Sheppo rv = -1;
2551ae08745Sheppo }
2561ae08745Sheppo
2571ae08745Sheppo start_cpus();
2581ae08745Sheppo
2591ae08745Sheppo if (rv != 0) {
2601ae08745Sheppo cmn_err(CE_WARN, "failed to stop cpu %d (%d)", cpuid, status);
2611ae08745Sheppo /* mark the CPU faulted so that it cannot be onlined */
2621ae08745Sheppo cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_FAULTED;
2631ae08745Sheppo }
2641ae08745Sheppo
2651ae08745Sheppo return (rv);
2661ae08745Sheppo }
2671ae08745Sheppo
2681ae08745Sheppo int
plat_cpu_poweron(struct cpu * cp)2691ae08745Sheppo plat_cpu_poweron(struct cpu *cp)
2701ae08745Sheppo {
2711ae08745Sheppo extern void restart_other_cpu(int);
2721ae08745Sheppo
2731ae08745Sheppo ASSERT(MUTEX_HELD(&cpu_lock));
2741ae08745Sheppo
2751ae08745Sheppo cp->cpu_flags &= ~CPU_POWEROFF;
2761ae08745Sheppo
2771ae08745Sheppo restart_other_cpu(cp->cpu_id);
2781ae08745Sheppo
2791ae08745Sheppo return (0);
2801ae08745Sheppo }
281