1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include <sys/x86_archext.h>
26#include <sys/machsystm.h>
27#include <sys/x_call.h>
28#include <sys/acpi/acpi.h>
29#include <sys/acpica.h>
30#include <sys/pwrnow.h>
31#include <sys/cpu_acpi.h>
32#include <sys/cpupm.h>
33#include <sys/dtrace.h>
34#include <sys/sdt.h>
35
36static int pwrnow_init(cpu_t *);
37static void pwrnow_fini(cpu_t *);
38static void pwrnow_power(cpuset_t, uint32_t);
39static void pwrnow_stop(cpu_t *);
40
41static boolean_t pwrnow_cpb_supported(void);
42
43/*
44 * Interfaces for modules implementing AMD's PowerNow!.
45 */
46cpupm_state_ops_t pwrnow_ops = {
47	"PowerNow! Technology",
48	pwrnow_init,
49	pwrnow_fini,
50	pwrnow_power,
51	pwrnow_stop
52};
53
54/*
55 * Error returns
56 */
57#define	PWRNOW_RET_SUCCESS		0x00
58#define	PWRNOW_RET_NO_PM		0x01
59#define	PWRNOW_RET_UNSUP_STATE		0x02
60#define	PWRNOW_RET_TRANS_INCOMPLETE	0x03
61
62#define	PWRNOW_LATENCY_WAIT		10
63
64/*
65 * MSR registers for changing and reading processor power state.
66 */
67#define	PWRNOW_PERF_CTL_MSR		0xC0010062
68#define	PWRNOW_PERF_STATUS_MSR		0xC0010063
69
70#define	AMD_CPUID_PSTATE_HARDWARE	(1<<7)
71#define	AMD_CPUID_TSC_CONSTANT		(1<<8)
72#define	AMD_CPUID_CPB			(1<<9)
73
74/*
75 * Debugging support
76 */
77#ifdef	DEBUG
78volatile int pwrnow_debug = 0;
79#define	PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
80#else
81#define	PWRNOW_DEBUG(arglist)
82#endif
83
84/*
85 * Write the ctrl register.
86 */
87static void
88write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
89{
90	cpu_acpi_pct_t *pct_ctrl;
91	uint64_t reg;
92
93	pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
94
95	switch (pct_ctrl->cr_addrspace_id) {
96	case ACPI_ADR_SPACE_FIXED_HARDWARE:
97		reg = ctrl;
98		wrmsr(PWRNOW_PERF_CTL_MSR, reg);
99		break;
100
101	default:
102		DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
103		    pct_ctrl->cr_addrspace_id);
104		return;
105	}
106
107	DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
108}
109
110/*
111 * Transition the current processor to the requested state.
112 */
113static int
114pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
115    xc_arg_t arg3 __unused)
116{
117	uint32_t req_state = (uint32_t)arg1;
118	cpupm_mach_state_t *mach_state =
119	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
120	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
121	cpu_acpi_pstate_t *req_pstate;
122	uint32_t ctrl;
123
124	req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
125	req_pstate += req_state;
126
127	DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
128	    CPU_ACPI_FREQ(req_pstate));
129
130	/*
131	 * Initiate the processor p-state change.
132	 */
133	ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
134	write_ctrl(handle, ctrl);
135
136	if (mach_state->ms_turbo != NULL)
137		cpupm_record_turbo_info(mach_state->ms_turbo,
138		    mach_state->ms_pstate.cma_state.pstate, req_state);
139
140	mach_state->ms_pstate.cma_state.pstate = req_state;
141	cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
142	return (0);
143}
144
145static void
146pwrnow_power(cpuset_t set, uint32_t req_state)
147{
148	/*
149	 * If thread is already running on target CPU then just
150	 * make the transition request. Otherwise, we'll need to
151	 * make a cross-call.
152	 */
153	kpreempt_disable();
154	if (CPU_IN_SET(set, CPU->cpu_id)) {
155		(void) pwrnow_pstate_transition(req_state, 0, 0);
156		CPUSET_DEL(set, CPU->cpu_id);
157	}
158	if (!CPUSET_ISNULL(set)) {
159		xc_call((xc_arg_t)req_state, 0, 0,
160		    CPUSET2BV(set), pwrnow_pstate_transition);
161	}
162	kpreempt_enable();
163}
164
165/*
166 * Validate that this processor supports PowerNow! and if so,
167 * get the P-state data from ACPI and cache it.
168 */
169static int
170pwrnow_init(cpu_t *cp)
171{
172	cpupm_mach_state_t *mach_state =
173	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
174	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
175	cpu_acpi_pct_t *pct_stat;
176	static int logged = 0;
177
178	PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
179
180	/*
181	 * Cache the P-state specific ACPI data.
182	 */
183	if (cpu_acpi_cache_pstate_data(handle) != 0) {
184		if (!logged) {
185			cmn_err(CE_NOTE, "!PowerNow! support is being "
186			    "disabled due to errors parsing ACPI P-state "
187			    "objects exported by BIOS.");
188			logged = 1;
189		}
190		pwrnow_fini(cp);
191		return (PWRNOW_RET_NO_PM);
192	}
193
194	pct_stat = CPU_ACPI_PCT_STATUS(handle);
195	switch (pct_stat->cr_addrspace_id) {
196	case ACPI_ADR_SPACE_FIXED_HARDWARE:
197		PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
198		break;
199	default:
200		cmn_err(CE_WARN, "!_PCT configured for unsupported "
201		    "addrspace = %d.", pct_stat->cr_addrspace_id);
202		cmn_err(CE_NOTE, "!CPU power management will not function.");
203		pwrnow_fini(cp);
204		return (PWRNOW_RET_NO_PM);
205	}
206
207	cpupm_alloc_domains(cp, CPUPM_P_STATES);
208
209	/*
210	 * Check for Core Performance Boost support
211	 */
212	if (pwrnow_cpb_supported())
213		mach_state->ms_turbo = cpupm_turbo_init(cp);
214
215	PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
216	return (PWRNOW_RET_SUCCESS);
217}
218
219/*
220 * Free resources allocated by pwrnow_init().
221 */
222static void
223pwrnow_fini(cpu_t *cp)
224{
225	cpupm_mach_state_t *mach_state =
226	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
227	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
228
229	cpupm_free_domains(&cpupm_pstate_domains);
230	cpu_acpi_free_pstate_data(handle);
231
232	if (mach_state->ms_turbo != NULL)
233		cpupm_turbo_fini(mach_state->ms_turbo);
234	mach_state->ms_turbo = NULL;
235}
236
237boolean_t
238pwrnow_supported()
239{
240	struct cpuid_regs cpu_regs;
241
242	/* Required features */
243	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
244	    !is_x86_feature(x86_featureset, X86FSET_MSR)) {
245		PWRNOW_DEBUG(("No CPUID or MSR support."));
246		return (B_FALSE);
247	}
248
249	/*
250	 * Get the Advanced Power Management Information.
251	 */
252	cpu_regs.cp_eax = 0x80000007;
253	(void) __cpuid_insn(&cpu_regs);
254
255	/*
256	 * We currently only support CPU power management of
257	 * processors that are P-state TSC invariant
258	 */
259	if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
260		PWRNOW_DEBUG(("No support for CPUs that are not P-state "
261		    "TSC invariant.\n"));
262		return (B_FALSE);
263	}
264
265	/*
266	 * We only support the "Fire and Forget" style of PowerNow! (i.e.,
267	 * single MSR write to change speed).
268	 */
269	if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
270		PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
271		return (B_FALSE);
272	}
273	return (B_TRUE);
274}
275
276static boolean_t
277pwrnow_cpb_supported(void)
278{
279	struct cpuid_regs cpu_regs;
280
281	/* Required features */
282	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
283	    !is_x86_feature(x86_featureset, X86FSET_MSR)) {
284		PWRNOW_DEBUG(("No CPUID or MSR support."));
285		return (B_FALSE);
286	}
287
288	/*
289	 * Get the Advanced Power Management Information.
290	 */
291	cpu_regs.cp_eax = 0x80000007;
292	(void) __cpuid_insn(&cpu_regs);
293
294	if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
295		return (B_FALSE);
296
297	return (B_TRUE);
298}
299
300static void
301pwrnow_stop(cpu_t *cp)
302{
303	cpupm_mach_state_t *mach_state =
304	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
305	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
306
307	cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
308	cpu_acpi_free_pstate_data(handle);
309
310	if (mach_state->ms_turbo != NULL)
311		cpupm_turbo_fini(mach_state->ms_turbo);
312	mach_state->ms_turbo = NULL;
313}
314