xref: /illumos-gate/usr/src/uts/i86pc/os/cpupm/speedstep.c (revision e5bbdc06902032d2a59f8d57eb4acf8dac4cbca3)
15cff7825Smh /*
25cff7825Smh  * CDDL HEADER START
35cff7825Smh  *
45cff7825Smh  * The contents of this file are subject to the terms of the
55cff7825Smh  * Common Development and Distribution License (the "License").
65cff7825Smh  * You may not use this file except in compliance with the License.
75cff7825Smh  *
85cff7825Smh  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95cff7825Smh  * or http://www.opensolaris.org/os/licensing.
105cff7825Smh  * See the License for the specific language governing permissions
115cff7825Smh  * and limitations under the License.
125cff7825Smh  *
135cff7825Smh  * When distributing Covered Code, include this CDDL HEADER in each
145cff7825Smh  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155cff7825Smh  * If applicable, add the following below this CDDL HEADER, with the
165cff7825Smh  * fields enclosed by brackets "[]" replaced with your own identifying
175cff7825Smh  * information: Portions Copyright [yyyy] [name of copyright owner]
185cff7825Smh  *
195cff7825Smh  * CDDL HEADER END
205cff7825Smh  */
215cff7825Smh /*
220e751525SEric Saxe  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
235cff7825Smh  * Use is subject to license terms.
245cff7825Smh  */
255cff7825Smh 
265cff7825Smh #include <sys/x86_archext.h>
275cff7825Smh #include <sys/machsystm.h>
28*e5bbdc06SRafael Vanoni #include <sys/archsystm.h>
295cff7825Smh #include <sys/x_call.h>
305cff7825Smh #include <sys/acpi/acpi.h>
315cff7825Smh #include <sys/acpica.h>
325cff7825Smh #include <sys/speedstep.h>
335cff7825Smh #include <sys/cpu_acpi.h>
345cff7825Smh #include <sys/cpupm.h>
355cff7825Smh #include <sys/dtrace.h>
365cff7825Smh #include <sys/sdt.h>
375cff7825Smh 
38*e5bbdc06SRafael Vanoni /*
39*e5bbdc06SRafael Vanoni  * turbo related structure definitions
40*e5bbdc06SRafael Vanoni  */
41*e5bbdc06SRafael Vanoni typedef struct cpupm_turbo_info {
42*e5bbdc06SRafael Vanoni 	kstat_t		*turbo_ksp;		/* turbo kstat */
43*e5bbdc06SRafael Vanoni 	int		in_turbo;		/* in turbo? */
44*e5bbdc06SRafael Vanoni 	int		turbo_supported;	/* turbo flag */
45*e5bbdc06SRafael Vanoni 	uint64_t	t_mcnt;			/* turbo mcnt */
46*e5bbdc06SRafael Vanoni 	uint64_t	t_acnt;			/* turbo acnt */
47*e5bbdc06SRafael Vanoni } cpupm_turbo_info_t;
48*e5bbdc06SRafael Vanoni 
49*e5bbdc06SRafael Vanoni typedef struct turbo_kstat_s {
50*e5bbdc06SRafael Vanoni 	struct kstat_named	turbo_supported;	/* turbo flag */
51*e5bbdc06SRafael Vanoni 	struct kstat_named	t_mcnt;			/* IA32_MPERF_MSR */
52*e5bbdc06SRafael Vanoni 	struct kstat_named	t_acnt;			/* IA32_APERF_MSR */
53*e5bbdc06SRafael Vanoni } turbo_kstat_t;
54*e5bbdc06SRafael Vanoni 
550e751525SEric Saxe static int speedstep_init(cpu_t *);
560e751525SEric Saxe static void speedstep_fini(cpu_t *);
570e751525SEric Saxe static void speedstep_power(cpuset_t, uint32_t);
58*e5bbdc06SRafael Vanoni static boolean_t turbo_supported(void);
59*e5bbdc06SRafael Vanoni static int turbo_kstat_update(kstat_t *, int);
60*e5bbdc06SRafael Vanoni static void get_turbo_info(cpupm_turbo_info_t *);
61*e5bbdc06SRafael Vanoni static void reset_turbo_info(void);
62*e5bbdc06SRafael Vanoni static void record_turbo_info(cpupm_turbo_info_t *, uint32_t, uint32_t);
63*e5bbdc06SRafael Vanoni static void update_turbo_info(cpupm_turbo_info_t *);
647f606aceSMark Haywood 
657f606aceSMark Haywood /*
667f606aceSMark Haywood  * Interfaces for modules implementing Intel's Enhanced SpeedStep.
677f606aceSMark Haywood  */
680e751525SEric Saxe cpupm_state_ops_t speedstep_ops = {
697f606aceSMark Haywood 	"Enhanced SpeedStep Technology",
707f606aceSMark Haywood 	speedstep_init,
717f606aceSMark Haywood 	speedstep_fini,
727f606aceSMark Haywood 	speedstep_power
737f606aceSMark Haywood };
747f606aceSMark Haywood 
755cff7825Smh /*
765cff7825Smh  * Error returns
775cff7825Smh  */
785cff7825Smh #define	ESS_RET_SUCCESS		0x00
795cff7825Smh #define	ESS_RET_NO_PM		0x01
805cff7825Smh #define	ESS_RET_UNSUP_STATE	0x02
815cff7825Smh 
825cff7825Smh /*
835cff7825Smh  * MSR registers for changing and reading processor power state.
845cff7825Smh  */
8562c4c2f3Smh #define	IA32_PERF_STAT_MSR		0x198
865cff7825Smh #define	IA32_PERF_CTL_MSR		0x199
875cff7825Smh 
885cff7825Smh #define	IA32_CPUID_TSC_CONSTANT		0xF30
895cff7825Smh #define	IA32_MISC_ENABLE_MSR		0x1A0
905cff7825Smh #define	IA32_MISC_ENABLE_EST		(1<<16)
915cff7825Smh #define	IA32_MISC_ENABLE_CXE		(1<<25)
92*e5bbdc06SRafael Vanoni 
93*e5bbdc06SRafael Vanoni #define	CPUID_TURBO_SUPPORT		(1 << 1)
94*e5bbdc06SRafael Vanoni #define	CPU_ACPI_P0			0
95*e5bbdc06SRafael Vanoni #define	CPU_IN_TURBO			1
96*e5bbdc06SRafael Vanoni 
97*e5bbdc06SRafael Vanoni /*
98*e5bbdc06SRafael Vanoni  * MSR for hardware coordination feedback mechanism
99*e5bbdc06SRafael Vanoni  *   - IA32_MPERF: increments in proportion to a fixed frequency
100*e5bbdc06SRafael Vanoni  *   - IA32_APERF: increments in proportion to actual performance
101*e5bbdc06SRafael Vanoni  */
102*e5bbdc06SRafael Vanoni #define	IA32_MPERF_MSR			0xE7
103*e5bbdc06SRafael Vanoni #define	IA32_APERF_MSR			0xE8
104*e5bbdc06SRafael Vanoni 
1055cff7825Smh /*
1065cff7825Smh  * Debugging support
1075cff7825Smh  */
1085cff7825Smh #ifdef	DEBUG
1095cff7825Smh volatile int ess_debug = 0;
1105cff7825Smh #define	ESSDEBUG(arglist) if (ess_debug) printf arglist;
1115cff7825Smh #else
1125cff7825Smh #define	ESSDEBUG(arglist)
1135cff7825Smh #endif
1145cff7825Smh 
115*e5bbdc06SRafael Vanoni static kmutex_t turbo_mutex;
116*e5bbdc06SRafael Vanoni 
117*e5bbdc06SRafael Vanoni turbo_kstat_t turbo_kstat = {
118*e5bbdc06SRafael Vanoni 	{ "turbo_supported",	KSTAT_DATA_UINT32 },
119*e5bbdc06SRafael Vanoni 	{ "turbo_mcnt",		KSTAT_DATA_UINT64 },
120*e5bbdc06SRafael Vanoni 	{ "turbo_acnt",		KSTAT_DATA_UINT64 },
121*e5bbdc06SRafael Vanoni };
122*e5bbdc06SRafael Vanoni 
123*e5bbdc06SRafael Vanoni /*
124*e5bbdc06SRafael Vanoni  * kstat update function of the turbo mode info
125*e5bbdc06SRafael Vanoni  */
126*e5bbdc06SRafael Vanoni static int
127*e5bbdc06SRafael Vanoni turbo_kstat_update(kstat_t *ksp, int flag)
128*e5bbdc06SRafael Vanoni {
129*e5bbdc06SRafael Vanoni 	cpupm_turbo_info_t *turbo_info = ksp->ks_private;
130*e5bbdc06SRafael Vanoni 
131*e5bbdc06SRafael Vanoni 	if (flag == KSTAT_WRITE) {
132*e5bbdc06SRafael Vanoni 		return (EACCES);
133*e5bbdc06SRafael Vanoni 	}
134*e5bbdc06SRafael Vanoni 
135*e5bbdc06SRafael Vanoni 	/*
136*e5bbdc06SRafael Vanoni 	 * update the count in case CPU is in the turbo
137*e5bbdc06SRafael Vanoni 	 * mode for a long time
138*e5bbdc06SRafael Vanoni 	 */
139*e5bbdc06SRafael Vanoni 	if (turbo_info->in_turbo == CPU_IN_TURBO)
140*e5bbdc06SRafael Vanoni 		update_turbo_info(turbo_info);
141*e5bbdc06SRafael Vanoni 
142*e5bbdc06SRafael Vanoni 	turbo_kstat.turbo_supported.value.ui32 =
143*e5bbdc06SRafael Vanoni 	    turbo_info->turbo_supported;
144*e5bbdc06SRafael Vanoni 	turbo_kstat.t_mcnt.value.ui64 = turbo_info->t_mcnt;
145*e5bbdc06SRafael Vanoni 	turbo_kstat.t_acnt.value.ui64 = turbo_info->t_acnt;
146*e5bbdc06SRafael Vanoni 
147*e5bbdc06SRafael Vanoni 	return (0);
148*e5bbdc06SRafael Vanoni }
149*e5bbdc06SRafael Vanoni 
150*e5bbdc06SRafael Vanoni /*
151*e5bbdc06SRafael Vanoni  * Get count of MPERF/APERF MSR
152*e5bbdc06SRafael Vanoni  */
153*e5bbdc06SRafael Vanoni static void
154*e5bbdc06SRafael Vanoni get_turbo_info(cpupm_turbo_info_t *turbo_info)
155*e5bbdc06SRafael Vanoni {
156*e5bbdc06SRafael Vanoni 	ulong_t		iflag;
157*e5bbdc06SRafael Vanoni 	uint64_t	mcnt, acnt;
158*e5bbdc06SRafael Vanoni 
159*e5bbdc06SRafael Vanoni 	iflag = intr_clear();
160*e5bbdc06SRafael Vanoni 	mcnt = rdmsr(IA32_MPERF_MSR);
161*e5bbdc06SRafael Vanoni 	acnt = rdmsr(IA32_APERF_MSR);
162*e5bbdc06SRafael Vanoni 	turbo_info->t_mcnt += mcnt;
163*e5bbdc06SRafael Vanoni 	turbo_info->t_acnt += acnt;
164*e5bbdc06SRafael Vanoni 	intr_restore(iflag);
165*e5bbdc06SRafael Vanoni }
166*e5bbdc06SRafael Vanoni 
167*e5bbdc06SRafael Vanoni /*
168*e5bbdc06SRafael Vanoni  * Clear MPERF/APERF MSR
169*e5bbdc06SRafael Vanoni  */
170*e5bbdc06SRafael Vanoni static void
171*e5bbdc06SRafael Vanoni reset_turbo_info(void)
172*e5bbdc06SRafael Vanoni {
173*e5bbdc06SRafael Vanoni 	ulong_t		iflag;
174*e5bbdc06SRafael Vanoni 
175*e5bbdc06SRafael Vanoni 	iflag = intr_clear();
176*e5bbdc06SRafael Vanoni 	wrmsr(IA32_MPERF_MSR, 0);
177*e5bbdc06SRafael Vanoni 	wrmsr(IA32_APERF_MSR, 0);
178*e5bbdc06SRafael Vanoni 	intr_restore(iflag);
179*e5bbdc06SRafael Vanoni }
180*e5bbdc06SRafael Vanoni 
181*e5bbdc06SRafael Vanoni /*
182*e5bbdc06SRafael Vanoni  * sum up the count of one CPU_ACPI_P0 transition
183*e5bbdc06SRafael Vanoni  */
184*e5bbdc06SRafael Vanoni static void
185*e5bbdc06SRafael Vanoni record_turbo_info(cpupm_turbo_info_t *turbo_info,
186*e5bbdc06SRafael Vanoni     uint32_t cur_state, uint32_t req_state)
187*e5bbdc06SRafael Vanoni {
188*e5bbdc06SRafael Vanoni 	if (!turbo_info->turbo_supported)
189*e5bbdc06SRafael Vanoni 		return;
190*e5bbdc06SRafael Vanoni 	/*
191*e5bbdc06SRafael Vanoni 	 * enter P0 state
192*e5bbdc06SRafael Vanoni 	 */
193*e5bbdc06SRafael Vanoni 	if (req_state == CPU_ACPI_P0) {
194*e5bbdc06SRafael Vanoni 		reset_turbo_info();
195*e5bbdc06SRafael Vanoni 		turbo_info->in_turbo = CPU_IN_TURBO;
196*e5bbdc06SRafael Vanoni 	}
197*e5bbdc06SRafael Vanoni 	/*
198*e5bbdc06SRafael Vanoni 	 * Leave P0 state
199*e5bbdc06SRafael Vanoni 	 */
200*e5bbdc06SRafael Vanoni 	else if (cur_state == CPU_ACPI_P0) {
201*e5bbdc06SRafael Vanoni 		turbo_info->in_turbo = 0;
202*e5bbdc06SRafael Vanoni 		get_turbo_info(turbo_info);
203*e5bbdc06SRafael Vanoni 	}
204*e5bbdc06SRafael Vanoni }
205*e5bbdc06SRafael Vanoni 
206*e5bbdc06SRafael Vanoni /*
207*e5bbdc06SRafael Vanoni  * update the sum of counts and clear MSRs
208*e5bbdc06SRafael Vanoni  */
209*e5bbdc06SRafael Vanoni static void
210*e5bbdc06SRafael Vanoni update_turbo_info(cpupm_turbo_info_t *turbo_info)
211*e5bbdc06SRafael Vanoni {
212*e5bbdc06SRafael Vanoni 	ulong_t		iflag;
213*e5bbdc06SRafael Vanoni 	uint64_t	mcnt, acnt;
214*e5bbdc06SRafael Vanoni 
215*e5bbdc06SRafael Vanoni 	iflag = intr_clear();
216*e5bbdc06SRafael Vanoni 	mcnt = rdmsr(IA32_MPERF_MSR);
217*e5bbdc06SRafael Vanoni 	acnt = rdmsr(IA32_APERF_MSR);
218*e5bbdc06SRafael Vanoni 	wrmsr(IA32_MPERF_MSR, 0);
219*e5bbdc06SRafael Vanoni 	wrmsr(IA32_APERF_MSR, 0);
220*e5bbdc06SRafael Vanoni 	turbo_info->t_mcnt += mcnt;
221*e5bbdc06SRafael Vanoni 	turbo_info->t_acnt += acnt;
222*e5bbdc06SRafael Vanoni 	intr_restore(iflag);
223*e5bbdc06SRafael Vanoni }
224*e5bbdc06SRafael Vanoni 
2255cff7825Smh /*
2265cff7825Smh  * Write the ctrl register. How it is written, depends upon the _PCT
2275cff7825Smh  * APCI object value.
2285cff7825Smh  */
2290e751525SEric Saxe static void
2305cff7825Smh write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
2315cff7825Smh {
2325cff7825Smh 	cpu_acpi_pct_t *pct_ctrl;
2335cff7825Smh 	uint64_t reg;
2345cff7825Smh 
2355cff7825Smh 	pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
2365cff7825Smh 
2377f606aceSMark Haywood 	switch (pct_ctrl->cr_addrspace_id) {
2385cff7825Smh 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
2395cff7825Smh 		/*
2405cff7825Smh 		 * Read current power state because reserved bits must be
2415cff7825Smh 		 * preserved, compose new value, and write it.
2425cff7825Smh 		 */
2435cff7825Smh 		reg = rdmsr(IA32_PERF_CTL_MSR);
2445cff7825Smh 		reg &= ~((uint64_t)0xFFFF);
2455cff7825Smh 		reg |= ctrl;
2465cff7825Smh 		wrmsr(IA32_PERF_CTL_MSR, reg);
2475cff7825Smh 		break;
2485cff7825Smh 
2495cff7825Smh 	case ACPI_ADR_SPACE_SYSTEM_IO:
2500e751525SEric Saxe 		(void) cpu_acpi_write_port(pct_ctrl->cr_address, ctrl,
2517f606aceSMark Haywood 		    pct_ctrl->cr_width);
2525cff7825Smh 		break;
2535cff7825Smh 
2545cff7825Smh 	default:
2555cff7825Smh 		DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t,
2567f606aceSMark Haywood 		    pct_ctrl->cr_addrspace_id);
2570e751525SEric Saxe 		return;
2585cff7825Smh 	}
2595cff7825Smh 
2605cff7825Smh 	DTRACE_PROBE1(ess_ctrl_write, uint32_t, ctrl);
2615cff7825Smh }
2625cff7825Smh 
2635cff7825Smh /*
2645cff7825Smh  * Transition the current processor to the requested state.
2655cff7825Smh  */
2665cff7825Smh void
2670e751525SEric Saxe speedstep_pstate_transition(uint32_t req_state)
2685cff7825Smh {
2690e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
2700e751525SEric Saxe 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
2710e751525SEric Saxe 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
2725cff7825Smh 	cpu_acpi_pstate_t *req_pstate;
2735cff7825Smh 	uint32_t ctrl;
274*e5bbdc06SRafael Vanoni 	cpupm_turbo_info_t *turbo_info =
275*e5bbdc06SRafael Vanoni 	    (cpupm_turbo_info_t *)(mach_state->ms_vendor);
2765cff7825Smh 
2777f606aceSMark Haywood 	req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
2787f606aceSMark Haywood 	req_pstate += req_state;
2790e751525SEric Saxe 
2805cff7825Smh 	DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate));
2815cff7825Smh 
2825cff7825Smh 	/*
2835cff7825Smh 	 * Initiate the processor p-state change.
2845cff7825Smh 	 */
2857f606aceSMark Haywood 	ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
2860e751525SEric Saxe 	write_ctrl(handle, ctrl);
2875cff7825Smh 
288*e5bbdc06SRafael Vanoni 	if (turbo_info)
289*e5bbdc06SRafael Vanoni 		record_turbo_info(turbo_info,
290*e5bbdc06SRafael Vanoni 		    mach_state->ms_pstate.cma_state.pstate, req_state);
291*e5bbdc06SRafael Vanoni 
292*e5bbdc06SRafael Vanoni 
2930e751525SEric Saxe 	mach_state->ms_pstate.cma_state.pstate = req_state;
2940e751525SEric Saxe 	cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
2955cff7825Smh }
2965cff7825Smh 
2970e751525SEric Saxe static void
2980e751525SEric Saxe speedstep_power(cpuset_t set, uint32_t req_state)
2995cff7825Smh {
300375e0503SMark Haywood 	/*
301375e0503SMark Haywood 	 * If thread is already running on target CPU then just
302375e0503SMark Haywood 	 * make the transition request. Otherwise, we'll need to
303375e0503SMark Haywood 	 * make a cross-call.
304375e0503SMark Haywood 	 */
3055cff7825Smh 	kpreempt_disable();
3060e751525SEric Saxe 	if (CPU_IN_SET(set, CPU->cpu_id)) {
3070e751525SEric Saxe 		speedstep_pstate_transition(req_state);
3080e751525SEric Saxe 		CPUSET_DEL(set, CPU->cpu_id);
3090e751525SEric Saxe 	}
3100e751525SEric Saxe 	if (!CPUSET_ISNULL(set)) {
3110e751525SEric Saxe 		xc_call((xc_arg_t)req_state, NULL, NULL, X_CALL_HIPRI, set,
3120e751525SEric Saxe 		    (xc_func_t)speedstep_pstate_transition);
313375e0503SMark Haywood 	}
3145cff7825Smh 	kpreempt_enable();
3155cff7825Smh }
3165cff7825Smh 
3175cff7825Smh /*
3185cff7825Smh  * Validate that this processor supports Speedstep and if so,
3195cff7825Smh  * get the P-state data from ACPI and cache it.
3205cff7825Smh  */
3217f606aceSMark Haywood static int
3220e751525SEric Saxe speedstep_init(cpu_t *cp)
3235cff7825Smh {
3240e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
3250e751525SEric Saxe 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
3260e751525SEric Saxe 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
3275cff7825Smh 	cpu_acpi_pct_t *pct_stat;
328*e5bbdc06SRafael Vanoni 	cpupm_turbo_info_t *turbo_info;
3295cff7825Smh 
3300e751525SEric Saxe 	ESSDEBUG(("speedstep_init: processor %d\n", cp->cpu_id));
3315cff7825Smh 
3325cff7825Smh 	/*
3337f606aceSMark Haywood 	 * Cache the P-state specific ACPI data.
3345cff7825Smh 	 */
3357f606aceSMark Haywood 	if (cpu_acpi_cache_pstate_data(handle) != 0) {
3365cff7825Smh 		ESSDEBUG(("Failed to cache ACPI data\n"));
3370e751525SEric Saxe 		speedstep_fini(cp);
3385cff7825Smh 		return (ESS_RET_NO_PM);
3395cff7825Smh 	}
3405cff7825Smh 
3415cff7825Smh 	pct_stat = CPU_ACPI_PCT_STATUS(handle);
3427f606aceSMark Haywood 	switch (pct_stat->cr_addrspace_id) {
3435cff7825Smh 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
3445cff7825Smh 		ESSDEBUG(("Transitions will use fixed hardware\n"));
3455cff7825Smh 		break;
3465cff7825Smh 	case ACPI_ADR_SPACE_SYSTEM_IO:
3475cff7825Smh 		ESSDEBUG(("Transitions will use system IO\n"));
3485cff7825Smh 		break;
3495cff7825Smh 	default:
3505cff7825Smh 		cmn_err(CE_WARN, "!_PCT conifgured for unsupported "
3517f606aceSMark Haywood 		    "addrspace = %d.", pct_stat->cr_addrspace_id);
3525cff7825Smh 		cmn_err(CE_NOTE, "!CPU power management will not function.");
3530e751525SEric Saxe 		speedstep_fini(cp);
3545cff7825Smh 		return (ESS_RET_NO_PM);
3555cff7825Smh 	}
3565cff7825Smh 
3570e751525SEric Saxe 	cpupm_alloc_domains(cp, CPUPM_P_STATES);
3585cff7825Smh 
359*e5bbdc06SRafael Vanoni 	if (!turbo_supported()) {
360*e5bbdc06SRafael Vanoni 		mach_state->ms_vendor = NULL;
361*e5bbdc06SRafael Vanoni 		goto ess_ret_success;
362*e5bbdc06SRafael Vanoni 	}
363*e5bbdc06SRafael Vanoni 	/*
364*e5bbdc06SRafael Vanoni 	 * turbo mode supported
365*e5bbdc06SRafael Vanoni 	 */
366*e5bbdc06SRafael Vanoni 	turbo_info = mach_state->ms_vendor =
367*e5bbdc06SRafael Vanoni 	    kmem_zalloc(sizeof (cpupm_turbo_info_t), KM_SLEEP);
368*e5bbdc06SRafael Vanoni 	turbo_info->turbo_supported = 1;
369*e5bbdc06SRafael Vanoni 	turbo_info->turbo_ksp = kstat_create("turbo", cp->cpu_id,
370*e5bbdc06SRafael Vanoni 	    "turbo", "misc", KSTAT_TYPE_NAMED,
371*e5bbdc06SRafael Vanoni 	    sizeof (turbo_kstat) / sizeof (kstat_named_t),
372*e5bbdc06SRafael Vanoni 	    KSTAT_FLAG_VIRTUAL);
373*e5bbdc06SRafael Vanoni 
374*e5bbdc06SRafael Vanoni 	if (turbo_info->turbo_ksp == NULL) {
375*e5bbdc06SRafael Vanoni 		cmn_err(CE_NOTE, "kstat_create(turbo) fail");
376*e5bbdc06SRafael Vanoni 	} else {
377*e5bbdc06SRafael Vanoni 		turbo_info->turbo_ksp->ks_data = &turbo_kstat;
378*e5bbdc06SRafael Vanoni 		turbo_info->turbo_ksp->ks_lock = &turbo_mutex;
379*e5bbdc06SRafael Vanoni 		turbo_info->turbo_ksp->ks_update = turbo_kstat_update;
380*e5bbdc06SRafael Vanoni 		turbo_info->turbo_ksp->ks_data_size += MAXNAMELEN;
381*e5bbdc06SRafael Vanoni 		turbo_info->turbo_ksp->ks_private = turbo_info;
382*e5bbdc06SRafael Vanoni 
383*e5bbdc06SRafael Vanoni 		kstat_install(turbo_info->turbo_ksp);
384*e5bbdc06SRafael Vanoni 	}
385*e5bbdc06SRafael Vanoni 
386*e5bbdc06SRafael Vanoni ess_ret_success:
387*e5bbdc06SRafael Vanoni 
3880e751525SEric Saxe 	ESSDEBUG(("Processor %d succeeded.\n", cp->cpu_id))
3895cff7825Smh 	return (ESS_RET_SUCCESS);
3905cff7825Smh }
3915cff7825Smh 
3925cff7825Smh /*
3935cff7825Smh  * Free resources allocated by speedstep_init().
3945cff7825Smh  */
3957f606aceSMark Haywood static void
3960e751525SEric Saxe speedstep_fini(cpu_t *cp)
3975cff7825Smh {
3980e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
3990e751525SEric Saxe 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
4000e751525SEric Saxe 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
401*e5bbdc06SRafael Vanoni 	cpupm_turbo_info_t *turbo_info =
402*e5bbdc06SRafael Vanoni 	    (cpupm_turbo_info_t *)(mach_state->ms_vendor);
4037f606aceSMark Haywood 
4040e751525SEric Saxe 	cpupm_free_domains(&cpupm_pstate_domains);
4057f606aceSMark Haywood 	cpu_acpi_free_pstate_data(handle);
406*e5bbdc06SRafael Vanoni 
407*e5bbdc06SRafael Vanoni 	if (turbo_info) {
408*e5bbdc06SRafael Vanoni 		if (turbo_info->turbo_ksp != NULL)
409*e5bbdc06SRafael Vanoni 			kstat_delete(turbo_info->turbo_ksp);
410*e5bbdc06SRafael Vanoni 		kmem_free(turbo_info, sizeof (cpupm_turbo_info_t));
411*e5bbdc06SRafael Vanoni 	}
4127f606aceSMark Haywood }
4137f606aceSMark Haywood 
4147f606aceSMark Haywood boolean_t
4157f606aceSMark Haywood speedstep_supported(uint_t family, uint_t model)
4167f606aceSMark Haywood {
4177f606aceSMark Haywood 	struct cpuid_regs cpu_regs;
4187f606aceSMark Haywood 
4197f606aceSMark Haywood 	/* Required features */
4207f606aceSMark Haywood 	if (!(x86_feature & X86_CPUID) ||
4217f606aceSMark Haywood 	    !(x86_feature & X86_MSR)) {
4227f606aceSMark Haywood 		return (B_FALSE);
4237f606aceSMark Haywood 	}
4247f606aceSMark Haywood 
4257f606aceSMark Haywood 	/*
4267f606aceSMark Haywood 	 * We only support family/model combinations which
4277f606aceSMark Haywood 	 * are P-state TSC invariant.
4287f606aceSMark Haywood 	 */
4297f606aceSMark Haywood 	if (!((family == 0xf && model >= 0x3) ||
4307f606aceSMark Haywood 	    (family == 0x6 && model >= 0xe))) {
4317f606aceSMark Haywood 		return (B_FALSE);
4327f606aceSMark Haywood 	}
4337f606aceSMark Haywood 
4347f606aceSMark Haywood 	/*
4357f606aceSMark Haywood 	 * Enhanced SpeedStep supported?
4367f606aceSMark Haywood 	 */
4377f606aceSMark Haywood 	cpu_regs.cp_eax = 0x1;
4387f606aceSMark Haywood 	(void) __cpuid_insn(&cpu_regs);
4397f606aceSMark Haywood 	if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
4407f606aceSMark Haywood 		return (B_FALSE);
4417f606aceSMark Haywood 	}
4427f606aceSMark Haywood 
4437f606aceSMark Haywood 	return (B_TRUE);
4445cff7825Smh }
445*e5bbdc06SRafael Vanoni 
446*e5bbdc06SRafael Vanoni boolean_t
447*e5bbdc06SRafael Vanoni turbo_supported(void)
448*e5bbdc06SRafael Vanoni {
449*e5bbdc06SRafael Vanoni 	struct cpuid_regs cpu_regs;
450*e5bbdc06SRafael Vanoni 
451*e5bbdc06SRafael Vanoni 	/* Required features */
452*e5bbdc06SRafael Vanoni 	if (!(x86_feature & X86_CPUID) ||
453*e5bbdc06SRafael Vanoni 	    !(x86_feature & X86_MSR)) {
454*e5bbdc06SRafael Vanoni 		return (B_FALSE);
455*e5bbdc06SRafael Vanoni 	}
456*e5bbdc06SRafael Vanoni 
457*e5bbdc06SRafael Vanoni 	/*
458*e5bbdc06SRafael Vanoni 	 * turbo mode supported?
459*e5bbdc06SRafael Vanoni 	 */
460*e5bbdc06SRafael Vanoni 	cpu_regs.cp_eax = 0x6;
461*e5bbdc06SRafael Vanoni 	(void) __cpuid_insn(&cpu_regs);
462*e5bbdc06SRafael Vanoni 	if (!(cpu_regs.cp_eax & CPUID_TURBO_SUPPORT)) {
463*e5bbdc06SRafael Vanoni 		return (B_FALSE);
464*e5bbdc06SRafael Vanoni 	}
465*e5bbdc06SRafael Vanoni 
466*e5bbdc06SRafael Vanoni 	return (B_TRUE);
467*e5bbdc06SRafael Vanoni }
468