14b3651bdSmh /*
24b3651bdSmh * CDDL HEADER START
34b3651bdSmh *
44b3651bdSmh * The contents of this file are subject to the terms of the
54b3651bdSmh * Common Development and Distribution License (the "License").
64b3651bdSmh * You may not use this file except in compliance with the License.
74b3651bdSmh *
84b3651bdSmh * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
94b3651bdSmh * or http://www.opensolaris.org/os/licensing.
104b3651bdSmh * See the License for the specific language governing permissions
114b3651bdSmh * and limitations under the License.
124b3651bdSmh *
134b3651bdSmh * When distributing Covered Code, include this CDDL HEADER in each
144b3651bdSmh * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
154b3651bdSmh * If applicable, add the following below this CDDL HEADER, with the
164b3651bdSmh * fields enclosed by brackets "[]" replaced with your own identifying
174b3651bdSmh * information: Portions Copyright [yyyy] [name of copyright owner]
184b3651bdSmh *
194b3651bdSmh * CDDL HEADER END
204b3651bdSmh */
214b3651bdSmh /*
227417cfdeSKuriakose Kuruvilla * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23*ab5bb018SKeith M Wesolowski * Copyright 2022 Oxide Computer Co.
244b3651bdSmh */
254b3651bdSmh
264b3651bdSmh #include <sys/x86_archext.h>
274b3651bdSmh #include <sys/machsystm.h>
284b3651bdSmh #include <sys/x_call.h>
294b3651bdSmh #include <sys/acpi/acpi.h>
304b3651bdSmh #include <sys/acpica.h>
314b3651bdSmh #include <sys/pwrnow.h>
324b3651bdSmh #include <sys/cpu_acpi.h>
334b3651bdSmh #include <sys/cpupm.h>
344b3651bdSmh #include <sys/dtrace.h>
354b3651bdSmh #include <sys/sdt.h>
364b3651bdSmh
370e751525SEric Saxe static int pwrnow_init(cpu_t *);
380e751525SEric Saxe static void pwrnow_fini(cpu_t *);
390e751525SEric Saxe static void pwrnow_power(cpuset_t, uint32_t);
40444f66e7SMark Haywood static void pwrnow_stop(cpu_t *);
417f606aceSMark Haywood
425951ced0SHans Rosenfeld static boolean_t pwrnow_cpb_supported(void);
435951ced0SHans Rosenfeld
447f606aceSMark Haywood /*
457f606aceSMark Haywood * Interfaces for modules implementing AMD's PowerNow!.
467f606aceSMark Haywood */
470e751525SEric Saxe cpupm_state_ops_t pwrnow_ops = {
487f606aceSMark Haywood "PowerNow! Technology",
497f606aceSMark Haywood pwrnow_init,
507f606aceSMark Haywood pwrnow_fini,
51444f66e7SMark Haywood pwrnow_power,
52444f66e7SMark Haywood pwrnow_stop
537f606aceSMark Haywood };
547f606aceSMark Haywood
554b3651bdSmh /*
564b3651bdSmh * Error returns
574b3651bdSmh */
584b3651bdSmh #define PWRNOW_RET_SUCCESS 0x00
594b3651bdSmh #define PWRNOW_RET_NO_PM 0x01
604b3651bdSmh #define PWRNOW_RET_UNSUP_STATE 0x02
614b3651bdSmh #define PWRNOW_RET_TRANS_INCOMPLETE 0x03
624b3651bdSmh
634b3651bdSmh #define PWRNOW_LATENCY_WAIT 10
644b3651bdSmh
654b3651bdSmh /*
664b3651bdSmh * MSR registers for changing and reading processor power state.
674b3651bdSmh */
684b3651bdSmh #define PWRNOW_PERF_CTL_MSR 0xC0010062
694b3651bdSmh #define PWRNOW_PERF_STATUS_MSR 0xC0010063
704b3651bdSmh
714b3651bdSmh #define AMD_CPUID_PSTATE_HARDWARE (1<<7)
724b3651bdSmh #define AMD_CPUID_TSC_CONSTANT (1<<8)
735951ced0SHans Rosenfeld #define AMD_CPUID_CPB (1<<9)
744b3651bdSmh
754b3651bdSmh /*
764b3651bdSmh * Debugging support
774b3651bdSmh */
784b3651bdSmh #ifdef DEBUG
794b3651bdSmh volatile int pwrnow_debug = 0;
804b3651bdSmh #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
814b3651bdSmh #else
824b3651bdSmh #define PWRNOW_DEBUG(arglist)
834b3651bdSmh #endif
844b3651bdSmh
854b3651bdSmh /*
864b3651bdSmh * Write the ctrl register.
874b3651bdSmh */
880e751525SEric Saxe static void
write_ctrl(cpu_acpi_handle_t handle,uint32_t ctrl)894b3651bdSmh write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
904b3651bdSmh {
914b3651bdSmh cpu_acpi_pct_t *pct_ctrl;
924b3651bdSmh uint64_t reg;
934b3651bdSmh
944b3651bdSmh pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
954b3651bdSmh
967f606aceSMark Haywood switch (pct_ctrl->cr_addrspace_id) {
974b3651bdSmh case ACPI_ADR_SPACE_FIXED_HARDWARE:
984b3651bdSmh reg = ctrl;
994b3651bdSmh wrmsr(PWRNOW_PERF_CTL_MSR, reg);
1004b3651bdSmh break;
1014b3651bdSmh
1024b3651bdSmh default:
1034b3651bdSmh DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
1047f606aceSMark Haywood pct_ctrl->cr_addrspace_id);
1050e751525SEric Saxe return;
1064b3651bdSmh }
1074b3651bdSmh
1084b3651bdSmh DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
1094b3651bdSmh }
1104b3651bdSmh
1114b3651bdSmh /*
1124b3651bdSmh * Transition the current processor to the requested state.
1134b3651bdSmh */
114027bcc9fSToomas Soome static int
pwrnow_pstate_transition(xc_arg_t arg1,xc_arg_t arg2 __unused,xc_arg_t arg3 __unused)115027bcc9fSToomas Soome pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
116027bcc9fSToomas Soome xc_arg_t arg3 __unused)
1174b3651bdSmh {
118027bcc9fSToomas Soome uint32_t req_state = (uint32_t)arg1;
1190e751525SEric Saxe cpupm_mach_state_t *mach_state =
1200e751525SEric Saxe (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
1210e751525SEric Saxe cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
1224b3651bdSmh cpu_acpi_pstate_t *req_pstate;
1234b3651bdSmh uint32_t ctrl;
1244b3651bdSmh
1257f606aceSMark Haywood req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
1267f606aceSMark Haywood req_pstate += req_state;
1270e751525SEric Saxe
1284b3651bdSmh DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
1294b3651bdSmh CPU_ACPI_FREQ(req_pstate));
1304b3651bdSmh
1314b3651bdSmh /*
1324b3651bdSmh * Initiate the processor p-state change.
1334b3651bdSmh */
1347f606aceSMark Haywood ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
1350e751525SEric Saxe write_ctrl(handle, ctrl);
1364b3651bdSmh
1375951ced0SHans Rosenfeld if (mach_state->ms_turbo != NULL)
1385951ced0SHans Rosenfeld cpupm_record_turbo_info(mach_state->ms_turbo,
1395951ced0SHans Rosenfeld mach_state->ms_pstate.cma_state.pstate, req_state);
1405951ced0SHans Rosenfeld
1410e751525SEric Saxe mach_state->ms_pstate.cma_state.pstate = req_state;
1420e751525SEric Saxe cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
143027bcc9fSToomas Soome return (0);
1444b3651bdSmh }
1454b3651bdSmh
1460e751525SEric Saxe static void
pwrnow_power(cpuset_t set,uint32_t req_state)1470e751525SEric Saxe pwrnow_power(cpuset_t set, uint32_t req_state)
1484b3651bdSmh {
149375e0503SMark Haywood /*
150375e0503SMark Haywood * If thread is already running on target CPU then just
151375e0503SMark Haywood * make the transition request. Otherwise, we'll need to
152375e0503SMark Haywood * make a cross-call.
153375e0503SMark Haywood */
1544b3651bdSmh kpreempt_disable();
1550e751525SEric Saxe if (CPU_IN_SET(set, CPU->cpu_id)) {
156027bcc9fSToomas Soome (void) pwrnow_pstate_transition(req_state, 0, 0);
1570e751525SEric Saxe CPUSET_DEL(set, CPU->cpu_id);
1580e751525SEric Saxe }
1590e751525SEric Saxe if (!CPUSET_ISNULL(set)) {
1604da99751SToomas Soome xc_call((xc_arg_t)req_state, 0, 0,
161027bcc9fSToomas Soome CPUSET2BV(set), pwrnow_pstate_transition);
162375e0503SMark Haywood }
1634b3651bdSmh kpreempt_enable();
1644b3651bdSmh }
1654b3651bdSmh
1664b3651bdSmh /*
1674b3651bdSmh * Validate that this processor supports PowerNow! and if so,
1684b3651bdSmh * get the P-state data from ACPI and cache it.
1694b3651bdSmh */
1707f606aceSMark Haywood static int
pwrnow_init(cpu_t * cp)1710e751525SEric Saxe pwrnow_init(cpu_t *cp)
1724b3651bdSmh {
1730e751525SEric Saxe cpupm_mach_state_t *mach_state =
1740e751525SEric Saxe (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
1750e751525SEric Saxe cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
1764b3651bdSmh cpu_acpi_pct_t *pct_stat;
177511588bbSYuri Pankov static int logged = 0;
1784b3651bdSmh
1790e751525SEric Saxe PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
1804b3651bdSmh
1814b3651bdSmh /*
1827f606aceSMark Haywood * Cache the P-state specific ACPI data.
1834b3651bdSmh */
1847f606aceSMark Haywood if (cpu_acpi_cache_pstate_data(handle) != 0) {
185511588bbSYuri Pankov if (!logged) {
186511588bbSYuri Pankov cmn_err(CE_NOTE, "!PowerNow! support is being "
187511588bbSYuri Pankov "disabled due to errors parsing ACPI P-state "
188511588bbSYuri Pankov "objects exported by BIOS.");
189511588bbSYuri Pankov logged = 1;
190511588bbSYuri Pankov }
1910e751525SEric Saxe pwrnow_fini(cp);
1924b3651bdSmh return (PWRNOW_RET_NO_PM);
1934b3651bdSmh }
1944b3651bdSmh
1954b3651bdSmh pct_stat = CPU_ACPI_PCT_STATUS(handle);
1967f606aceSMark Haywood switch (pct_stat->cr_addrspace_id) {
1974b3651bdSmh case ACPI_ADR_SPACE_FIXED_HARDWARE:
1984b3651bdSmh PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
1994b3651bdSmh break;
2004b3651bdSmh default:
2014b3651bdSmh cmn_err(CE_WARN, "!_PCT configured for unsupported "
2027f606aceSMark Haywood "addrspace = %d.", pct_stat->cr_addrspace_id);
2034b3651bdSmh cmn_err(CE_NOTE, "!CPU power management will not function.");
2040e751525SEric Saxe pwrnow_fini(cp);
2054b3651bdSmh return (PWRNOW_RET_NO_PM);
2064b3651bdSmh }
2074b3651bdSmh
2080e751525SEric Saxe cpupm_alloc_domains(cp, CPUPM_P_STATES);
2094b3651bdSmh
2105951ced0SHans Rosenfeld /*
2115951ced0SHans Rosenfeld * Check for Core Performance Boost support
2125951ced0SHans Rosenfeld */
2135951ced0SHans Rosenfeld if (pwrnow_cpb_supported())
2145951ced0SHans Rosenfeld mach_state->ms_turbo = cpupm_turbo_init(cp);
2155951ced0SHans Rosenfeld
2160e751525SEric Saxe PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
2174b3651bdSmh return (PWRNOW_RET_SUCCESS);
2184b3651bdSmh }
2194b3651bdSmh
2204b3651bdSmh /*
2214b3651bdSmh * Free resources allocated by pwrnow_init().
2224b3651bdSmh */
2237f606aceSMark Haywood static void
pwrnow_fini(cpu_t * cp)2240e751525SEric Saxe pwrnow_fini(cpu_t *cp)
2254b3651bdSmh {
2260e751525SEric Saxe cpupm_mach_state_t *mach_state =
2270e751525SEric Saxe (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
2280e751525SEric Saxe cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
2297f606aceSMark Haywood
2300e751525SEric Saxe cpupm_free_domains(&cpupm_pstate_domains);
2317f606aceSMark Haywood cpu_acpi_free_pstate_data(handle);
2325951ced0SHans Rosenfeld
2335951ced0SHans Rosenfeld if (mach_state->ms_turbo != NULL)
2345951ced0SHans Rosenfeld cpupm_turbo_fini(mach_state->ms_turbo);
2355951ced0SHans Rosenfeld mach_state->ms_turbo = NULL;
2367f606aceSMark Haywood }
2377f606aceSMark Haywood
2387f606aceSMark Haywood boolean_t
pwrnow_supported()2397f606aceSMark Haywood pwrnow_supported()
2407f606aceSMark Haywood {
2417f606aceSMark Haywood struct cpuid_regs cpu_regs;
2427f606aceSMark Haywood
2437f606aceSMark Haywood /* Required features */
244*ab5bb018SKeith M Wesolowski ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
245*ab5bb018SKeith M Wesolowski if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
2467f606aceSMark Haywood PWRNOW_DEBUG(("No CPUID or MSR support."));
2477f606aceSMark Haywood return (B_FALSE);
2487f606aceSMark Haywood }
2497f606aceSMark Haywood
2507f606aceSMark Haywood /*
2517f606aceSMark Haywood * Get the Advanced Power Management Information.
2527f606aceSMark Haywood */
2537f606aceSMark Haywood cpu_regs.cp_eax = 0x80000007;
2547f606aceSMark Haywood (void) __cpuid_insn(&cpu_regs);
2557f606aceSMark Haywood
2567f606aceSMark Haywood /*
2577f606aceSMark Haywood * We currently only support CPU power management of
2587f606aceSMark Haywood * processors that are P-state TSC invariant
2597f606aceSMark Haywood */
2607f606aceSMark Haywood if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
2617f606aceSMark Haywood PWRNOW_DEBUG(("No support for CPUs that are not P-state "
2627f606aceSMark Haywood "TSC invariant.\n"));
2637f606aceSMark Haywood return (B_FALSE);
2647f606aceSMark Haywood }
2657f606aceSMark Haywood
2667f606aceSMark Haywood /*
2677f606aceSMark Haywood * We only support the "Fire and Forget" style of PowerNow! (i.e.,
2687f606aceSMark Haywood * single MSR write to change speed).
2697f606aceSMark Haywood */
2707f606aceSMark Haywood if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
2717f606aceSMark Haywood PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
2727f606aceSMark Haywood return (B_FALSE);
2737f606aceSMark Haywood }
2747f606aceSMark Haywood return (B_TRUE);
2754b3651bdSmh }
276444f66e7SMark Haywood
2775951ced0SHans Rosenfeld static boolean_t
pwrnow_cpb_supported(void)2785951ced0SHans Rosenfeld pwrnow_cpb_supported(void)
2795951ced0SHans Rosenfeld {
2805951ced0SHans Rosenfeld struct cpuid_regs cpu_regs;
2815951ced0SHans Rosenfeld
2825951ced0SHans Rosenfeld /* Required features */
283*ab5bb018SKeith M Wesolowski ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
284*ab5bb018SKeith M Wesolowski if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
2855951ced0SHans Rosenfeld PWRNOW_DEBUG(("No CPUID or MSR support."));
2865951ced0SHans Rosenfeld return (B_FALSE);
2875951ced0SHans Rosenfeld }
2885951ced0SHans Rosenfeld
2895951ced0SHans Rosenfeld /*
2905951ced0SHans Rosenfeld * Get the Advanced Power Management Information.
2915951ced0SHans Rosenfeld */
2925951ced0SHans Rosenfeld cpu_regs.cp_eax = 0x80000007;
2935951ced0SHans Rosenfeld (void) __cpuid_insn(&cpu_regs);
2945951ced0SHans Rosenfeld
2955951ced0SHans Rosenfeld if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
2965951ced0SHans Rosenfeld return (B_FALSE);
2975951ced0SHans Rosenfeld
2985951ced0SHans Rosenfeld return (B_TRUE);
2995951ced0SHans Rosenfeld }
3005951ced0SHans Rosenfeld
301444f66e7SMark Haywood static void
pwrnow_stop(cpu_t * cp)302444f66e7SMark Haywood pwrnow_stop(cpu_t *cp)
303444f66e7SMark Haywood {
304444f66e7SMark Haywood cpupm_mach_state_t *mach_state =
305444f66e7SMark Haywood (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
306444f66e7SMark Haywood cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
307444f66e7SMark Haywood
308444f66e7SMark Haywood cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
309444f66e7SMark Haywood cpu_acpi_free_pstate_data(handle);
3105951ced0SHans Rosenfeld
3115951ced0SHans Rosenfeld if (mach_state->ms_turbo != NULL)
3125951ced0SHans Rosenfeld cpupm_turbo_fini(mach_state->ms_turbo);
3135951ced0SHans Rosenfeld mach_state->ms_turbo = NULL;
314444f66e7SMark Haywood }
315