1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/x86_archext.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/cpu_acpi.h>
30 #include <sys/cpupm_throttle.h>
31 #include <sys/dtrace.h>
32 #include <sys/sdt.h>
33 
34 static int cpupm_throttle_init(cpu_t *);
35 static void cpupm_throttle_fini(cpu_t *);
36 static void cpupm_throttle(cpuset_t,  uint32_t);
37 static void cpupm_throttle_stop(cpu_t *);
38 
39 cpupm_state_ops_t cpupm_throttle_ops = {
40 	"Generic ACPI T-state Support",
41 	cpupm_throttle_init,
42 	cpupm_throttle_fini,
43 	cpupm_throttle,
44 	cpupm_throttle_stop
45 };
46 
47 /*
48  * Error returns
49  */
50 #define	THROTTLE_RET_SUCCESS		0x00
51 #define	THROTTLE_RET_INCOMPLETE_DATA	0x01
52 #define	THROTTLE_RET_UNSUP_STATE	0x02
53 #define	THROTTLE_RET_TRANS_INCOMPLETE	0x03
54 
55 #define	THROTTLE_LATENCY_WAIT		1
56 
57 /*
58  * MSR register for clock modulation
59  */
60 #define	IA32_CLOCK_MODULATION_MSR	0x19A
61 
62 /*
63  * Debugging support
64  */
65 #ifdef  DEBUG
66 volatile int cpupm_throttle_debug = 0;
67 #define	CTDEBUG(arglist) if (cpupm_throttle_debug) printf arglist;
68 #else
69 #define	CTDEBUG(arglist)
70 #endif
71 
72 /*
73  * Write the _PTC ctrl register. How it is written, depends upon the _PTC
74  * APCI object value.
75  */
76 static int
write_ctrl(cpu_acpi_handle_t handle,uint32_t ctrl)77 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
78 {
79 	cpu_acpi_ptc_t *ptc_ctrl;
80 	uint64_t reg;
81 	int ret = 0;
82 
83 	ptc_ctrl = CPU_ACPI_PTC_CTRL(handle);
84 
85 	switch (ptc_ctrl->cr_addrspace_id) {
86 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
87 		/*
88 		 * Read current thermal state because reserved bits must be
89 		 * preserved, compose new value, and write it.The writable
90 		 * bits are 4:1 (1 to 4).
91 		 * Bits 3:1 => On-Demand Clock Modulation Duty Cycle
92 		 * Bit  4   => On-Demand Clock Modulation Enable
93 		 * Left shift ctrl by 1 to allign with bits 1-4 of MSR
94 		 */
95 		reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
96 		reg &= ~((uint64_t)0x1E);
97 		reg |= ctrl;
98 		wrmsr(IA32_CLOCK_MODULATION_MSR, reg);
99 		break;
100 
101 	case ACPI_ADR_SPACE_SYSTEM_IO:
102 		ret = cpu_acpi_write_port(ptc_ctrl->cr_address, ctrl,
103 		    ptc_ctrl->cr_width);
104 		break;
105 
106 	default:
107 		DTRACE_PROBE1(throttle_ctrl_unsupported_type, uint8_t,
108 		    ptc_ctrl->cr_addrspace_id);
109 
110 		ret = -1;
111 	}
112 
113 	DTRACE_PROBE1(throttle_ctrl_write, uint32_t, ctrl);
114 	DTRACE_PROBE1(throttle_ctrl_write_err, int, ret);
115 
116 	return (ret);
117 }
118 
119 static int
read_status(cpu_acpi_handle_t handle,uint32_t * stat)120 read_status(cpu_acpi_handle_t handle, uint32_t *stat)
121 {
122 	cpu_acpi_ptc_t *ptc_stat;
123 	uint64_t reg;
124 	int ret = 0;
125 
126 	ptc_stat = CPU_ACPI_PTC_STATUS(handle);
127 
128 	switch (ptc_stat->cr_addrspace_id) {
129 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
130 		reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
131 		*stat = reg & 0x1E;
132 		ret = 0;
133 		break;
134 
135 	case ACPI_ADR_SPACE_SYSTEM_IO:
136 		ret = cpu_acpi_read_port(ptc_stat->cr_address, stat,
137 		    ptc_stat->cr_width);
138 		break;
139 
140 	default:
141 		DTRACE_PROBE1(throttle_status_unsupported_type, uint8_t,
142 		    ptc_stat->cr_addrspace_id);
143 
144 		return (-1);
145 	}
146 
147 	DTRACE_PROBE1(throttle_status_read, uint32_t, *stat);
148 	DTRACE_PROBE1(throttle_status_read_err, int, ret);
149 
150 	return (ret);
151 }
152 
153 /*
154  * Transition the current processor to the requested throttling state.
155  */
156 static int
cpupm_tstate_transition(xc_arg_t arg1,xc_arg_t arg2 __unused,xc_arg_t arg3 __unused)157 cpupm_tstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
158     xc_arg_t arg3 __unused)
159 {
160 	uint32_t req_state = arg1;
161 	cpupm_mach_state_t *mach_state =
162 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
163 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
164 	cpu_acpi_tstate_t *req_tstate;
165 	uint32_t ctrl;
166 	uint32_t stat;
167 	int i;
168 
169 	req_tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle);
170 	req_tstate += req_state;
171 	DTRACE_PROBE1(throttle_transition, uint32_t,
172 	    CPU_ACPI_FREQPER(req_tstate));
173 
174 	/*
175 	 * Initiate the processor t-state change.
176 	 */
177 	ctrl = CPU_ACPI_TSTATE_CTRL(req_tstate);
178 	if (write_ctrl(handle, ctrl) != 0) {
179 		return (0);
180 	}
181 
182 	/*
183 	 * If status is zero, then transition is synchronous and
184 	 * no status value comparison is required.
185 	 */
186 	if (CPU_ACPI_TSTATE_STAT(req_tstate) == 0) {
187 		return (0);
188 	}
189 
190 	/* Wait until switch is complete, but bound the loop just in case. */
191 	for (i = CPU_ACPI_TSTATE_TRANSLAT(req_tstate) * 2; i >= 0;
192 	    i -= THROTTLE_LATENCY_WAIT) {
193 		if (read_status(handle, &stat) == 0 &&
194 		    CPU_ACPI_TSTATE_STAT(req_tstate) == stat)
195 			break;
196 		drv_usecwait(THROTTLE_LATENCY_WAIT);
197 	}
198 
199 	if (CPU_ACPI_TSTATE_STAT(req_tstate) != stat) {
200 		DTRACE_PROBE(throttle_transition_incomplete);
201 	}
202 	return (0);
203 }
204 
205 static void
cpupm_throttle(cpuset_t set,uint32_t throtl_lvl)206 cpupm_throttle(cpuset_t set,  uint32_t throtl_lvl)
207 {
208 	xc_arg_t xc_arg = (xc_arg_t)throtl_lvl;
209 
210 	/*
211 	 * If thread is already running on target CPU then just
212 	 * make the transition request. Otherwise, we'll need to
213 	 * make a cross-call.
214 	 */
215 	kpreempt_disable();
216 	if (CPU_IN_SET(set, CPU->cpu_id)) {
217 		cpupm_tstate_transition(xc_arg, 0, 0);
218 		CPUSET_DEL(set, CPU->cpu_id);
219 	}
220 	if (!CPUSET_ISNULL(set)) {
221 		xc_call(xc_arg, 0, 0,
222 		    CPUSET2BV(set), cpupm_tstate_transition);
223 	}
224 	kpreempt_enable();
225 }
226 
227 static int
cpupm_throttle_init(cpu_t * cp)228 cpupm_throttle_init(cpu_t *cp)
229 {
230 	cpupm_mach_state_t *mach_state =
231 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
232 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
233 	cpu_acpi_ptc_t *ptc_stat;
234 	int ret;
235 
236 	if ((ret = cpu_acpi_cache_tstate_data(handle)) != 0) {
237 		if (ret < 0)
238 			cmn_err(CE_NOTE,
239 			    "!Support for CPU throttling is being "
240 			    "disabled due to errors parsing ACPI T-state "
241 			    "objects exported by BIOS.");
242 		cpupm_throttle_fini(cp);
243 		return (THROTTLE_RET_INCOMPLETE_DATA);
244 	}
245 
246 	/*
247 	 * Check the address space used for transitions
248 	 */
249 	ptc_stat = CPU_ACPI_PTC_STATUS(handle);
250 	switch (ptc_stat->cr_addrspace_id) {
251 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
252 		CTDEBUG(("T-State transitions will use fixed hardware\n"));
253 		break;
254 	case ACPI_ADR_SPACE_SYSTEM_IO:
255 		CTDEBUG(("T-State transitions will use System IO\n"));
256 		break;
257 	default:
258 		cmn_err(CE_NOTE, "!_PTC configured for unsupported "
259 		    "address space type = %d.", ptc_stat->cr_addrspace_id);
260 		return (THROTTLE_RET_INCOMPLETE_DATA);
261 	}
262 
263 	cpupm_alloc_domains(cp, CPUPM_T_STATES);
264 
265 	return (THROTTLE_RET_SUCCESS);
266 }
267 
268 static void
cpupm_throttle_fini(cpu_t * cp)269 cpupm_throttle_fini(cpu_t *cp)
270 {
271 	cpupm_mach_state_t *mach_state =
272 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
273 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
274 
275 	cpupm_free_domains(&cpupm_tstate_domains);
276 	cpu_acpi_free_tstate_data(handle);
277 }
278 
279 static void
cpupm_throttle_stop(cpu_t * cp)280 cpupm_throttle_stop(cpu_t *cp)
281 {
282 	cpupm_mach_state_t *mach_state =
283 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
284 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
285 
286 	cpupm_remove_domains(cp, CPUPM_T_STATES, &cpupm_tstate_domains);
287 	cpu_acpi_free_tstate_data(handle);
288 }
289 
290 /*
291  * This routine reads the ACPI _TPC object. It's accessed as a callback
292  * by the cpu driver whenever a _TPC change notification is received.
293  */
294 static int
cpupm_throttle_get_max(processorid_t cpu_id)295 cpupm_throttle_get_max(processorid_t cpu_id)
296 {
297 	cpu_t			*cp = cpu[cpu_id];
298 	cpupm_mach_state_t	*mach_state =
299 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
300 	cpu_acpi_handle_t	handle;
301 	int			throtl_level;
302 	int			max_throttle_lvl;
303 	uint_t			num_throtl;
304 
305 	if (mach_state == NULL) {
306 		return (-1);
307 	}
308 
309 	handle = mach_state->ms_acpi_handle;
310 	ASSERT(handle != NULL);
311 
312 	cpu_acpi_cache_tpc(handle);
313 	throtl_level = CPU_ACPI_TPC(handle);
314 
315 	num_throtl = CPU_ACPI_TSTATES_COUNT(handle);
316 
317 	max_throttle_lvl = num_throtl - 1;
318 	if ((throtl_level < 0) || (throtl_level > max_throttle_lvl)) {
319 		cmn_err(CE_NOTE, "!cpupm_throttle_get_max: CPU %d: "
320 		    "_TPC out of range %d", cp->cpu_id, throtl_level);
321 		throtl_level = 0;
322 	}
323 
324 	return (throtl_level);
325 }
326 
327 /*
328  * Take care of CPU throttling when _TPC notification arrives
329  */
330 void
cpupm_throttle_manage_notification(void * ctx)331 cpupm_throttle_manage_notification(void *ctx)
332 {
333 	cpu_t			*cp = ctx;
334 	processorid_t		cpu_id = cp->cpu_id;
335 	cpupm_mach_state_t	*mach_state =
336 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
337 	boolean_t		is_ready;
338 	int			new_level;
339 
340 	if (mach_state == NULL) {
341 		return;
342 	}
343 
344 	/*
345 	 * We currently refuse to power-manage if the CPU is not ready to
346 	 * take cross calls (cross calls fail silently if CPU is not ready
347 	 * for it).
348 	 *
349 	 * Additionally, for x86 platforms we cannot power-manage an instance,
350 	 * until it has been initialized.
351 	 */
352 	is_ready = (cp->cpu_flags & CPU_READY) && cpupm_throttle_ready(cp);
353 	if (!is_ready)
354 		return;
355 
356 	if (!(mach_state->ms_caps & CPUPM_T_STATES))
357 		return;
358 	ASSERT(mach_state->ms_tstate.cma_ops != NULL);
359 
360 	/*
361 	 * Get the new T-State support level
362 	 */
363 	new_level = cpupm_throttle_get_max(cpu_id);
364 
365 	cpupm_state_change(cp, new_level, CPUPM_T_STATES);
366 }
367