1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include <sys/cpu_acpi.h>
26#include <sys/cpu_idle.h>
27#include <sys/dtrace.h>
28#include <sys/sdt.h>
29
30/*
31 * List of the processor ACPI object types that are being used.
32 */
33typedef enum cpu_acpi_obj {
34	PDC_OBJ = 0,
35	PCT_OBJ,
36	PSS_OBJ,
37	PSD_OBJ,
38	PPC_OBJ,
39	PTC_OBJ,
40	TSS_OBJ,
41	TSD_OBJ,
42	TPC_OBJ,
43	CST_OBJ,
44	CSD_OBJ,
45} cpu_acpi_obj_t;
46
47/*
48 * Container to store object name.
49 * Other attributes can be added in the future as necessary.
50 */
51typedef struct cpu_acpi_obj_attr {
52	char *name;
53} cpu_acpi_obj_attr_t;
54
55/*
56 * List of object attributes.
57 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t.
58 */
59static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = {
60	{"_PDC"},
61	{"_PCT"},
62	{"_PSS"},
63	{"_PSD"},
64	{"_PPC"},
65	{"_PTC"},
66	{"_TSS"},
67	{"_TSD"},
68	{"_TPC"},
69	{"_CST"},
70	{"_CSD"}
71};
72
73/*
74 * Cache the ACPI CPU control data objects.
75 */
76static int
77cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype,
78    cpu_acpi_ctrl_regs_t *regs)
79{
80	ACPI_STATUS astatus;
81	ACPI_BUFFER abuf;
82	ACPI_OBJECT *obj;
83	AML_RESOURCE_GENERIC_REGISTER *greg;
84	int ret = -1;
85	int i;
86
87	/*
88	 * Fetch the control registers (if present) for the CPU node.
89	 * Since they are optional, non-existence is not a failure
90	 * (we just consider it a fixed hardware case).
91	 */
92	abuf.Length = ACPI_ALLOCATE_BUFFER;
93	abuf.Pointer = NULL;
94	astatus = AcpiEvaluateObjectTyped(handle->cs_handle,
95	    cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, ACPI_TYPE_PACKAGE);
96	if (ACPI_FAILURE(astatus)) {
97		if (astatus == AE_NOT_FOUND) {
98			DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id,
99			    int, objtype, int, astatus);
100			regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
101			regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
102			return (1);
103		}
104		cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package "
105		    "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name,
106		    handle->cs_id);
107		goto out;
108	}
109
110	obj = abuf.Pointer;
111	if (obj->Package.Count != 2) {
112		cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d for "
113		    "CPU %d.", cpu_acpi_obj_attrs[objtype].name,
114		    obj->Package.Count, handle->cs_id);
115		goto out;
116	}
117
118	/*
119	 * Does the package look coherent?
120	 */
121	for (i = 0; i < obj->Package.Count; i++) {
122		if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) {
123			cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in "
124			    "%s package for CPU %d.",
125			    cpu_acpi_obj_attrs[objtype].name,
126			    handle->cs_id);
127			goto out;
128		}
129
130		greg = (AML_RESOURCE_GENERIC_REGISTER *)
131		    obj->Package.Elements[i].Buffer.Pointer;
132		if (greg->DescriptorType !=
133		    ACPI_RESOURCE_NAME_GENERIC_REGISTER) {
134			cmn_err(CE_NOTE, "!cpu_acpi: %s package has format "
135			    "error for CPU %d.",
136			    cpu_acpi_obj_attrs[objtype].name,
137			    handle->cs_id);
138			goto out;
139		}
140		if (greg->ResourceLength !=
141		    ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) {
142			cmn_err(CE_NOTE, "!cpu_acpi: %s package not right "
143			    "size for CPU %d.",
144			    cpu_acpi_obj_attrs[objtype].name,
145			    handle->cs_id);
146			goto out;
147		}
148		if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE &&
149		    greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) {
150			cmn_err(CE_NOTE, "!cpu_apci: %s contains unsupported "
151			    "address space type %x for CPU %d.",
152			    cpu_acpi_obj_attrs[objtype].name,
153			    greg->AddressSpaceId,
154			    handle->cs_id);
155			goto out;
156		}
157	}
158
159	/*
160	 * Looks good!
161	 */
162	for (i = 0; i < obj->Package.Count; i++) {
163		greg = (AML_RESOURCE_GENERIC_REGISTER *)
164		    obj->Package.Elements[i].Buffer.Pointer;
165		regs[i].cr_addrspace_id = greg->AddressSpaceId;
166		regs[i].cr_width = greg->BitWidth;
167		regs[i].cr_offset = greg->BitOffset;
168		regs[i].cr_asize = greg->AccessSize;
169		regs[i].cr_address = greg->Address;
170	}
171	ret = 0;
172out:
173	if (abuf.Pointer != NULL)
174		AcpiOsFree(abuf.Pointer);
175	return (ret);
176}
177
178/*
179 * Cache the ACPI _PCT data. The _PCT data defines the interface to use
180 * when making power level transitions (i.e., system IO ports, fixed
181 * hardware port, etc).
182 */
183static int
184cpu_acpi_cache_pct(cpu_acpi_handle_t handle)
185{
186	cpu_acpi_pct_t *pct;
187	int ret;
188
189	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED);
190	pct = &CPU_ACPI_PCT(handle)[0];
191	if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0)
192		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED);
193	return (ret);
194}
195
196/*
197 * Cache the ACPI _PTC data. The _PTC data defines the interface to use
198 * when making T-state transitions (i.e., system IO ports, fixed
199 * hardware port, etc).
200 */
201static int
202cpu_acpi_cache_ptc(cpu_acpi_handle_t handle)
203{
204	cpu_acpi_ptc_t *ptc;
205	int ret;
206
207	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED);
208	ptc = &CPU_ACPI_PTC(handle)[0];
209	if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0)
210		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED);
211	return (ret);
212}
213
214/*
215 * Cache the ACPI CPU state dependency data objects.
216 */
217static int
218cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle,
219    cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd)
220{
221	ACPI_STATUS astatus;
222	ACPI_BUFFER abuf;
223	ACPI_OBJECT *pkg, *elements;
224	int number;
225	int ret = -1;
226
227	if (objtype == CSD_OBJ) {
228		number = 6;
229	} else {
230		number = 5;
231	}
232	/*
233	 * Fetch the dependencies (if present) for the CPU node.
234	 * Since they are optional, non-existence is not a failure
235	 * (it's up to the caller to determine how to handle non-existence).
236	 */
237	abuf.Length = ACPI_ALLOCATE_BUFFER;
238	abuf.Pointer = NULL;
239	astatus = AcpiEvaluateObjectTyped(handle->cs_handle,
240	    cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, ACPI_TYPE_PACKAGE);
241	if (ACPI_FAILURE(astatus)) {
242		if (astatus == AE_NOT_FOUND) {
243			DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id,
244			    int, objtype, int, astatus);
245			return (1);
246		}
247		cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package "
248		    "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name,
249		    handle->cs_id);
250		goto out;
251	}
252
253	pkg = abuf.Pointer;
254
255	if (((objtype != CSD_OBJ) && (pkg->Package.Count != 1)) ||
256	    ((objtype == CSD_OBJ) && (pkg->Package.Count != 1) &&
257	    (pkg->Package.Count != 2))) {
258		cmn_err(CE_NOTE, "!cpu_acpi: %s unsupported package count %d "
259		    "for CPU %d.", cpu_acpi_obj_attrs[objtype].name,
260		    pkg->Package.Count, handle->cs_id);
261		goto out;
262	}
263
264	/*
265	 * For C-state domain, we assume C2 and C3 have the same
266	 * domain information
267	 */
268	if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE ||
269	    pkg->Package.Elements[0].Package.Count != number) {
270		cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s package "
271		    "for CPU %d.", cpu_acpi_obj_attrs[objtype].name,
272		    handle->cs_id);
273		goto out;
274	}
275	elements = pkg->Package.Elements[0].Package.Elements;
276	if (elements[0].Integer.Value != number ||
277	    elements[1].Integer.Value != 0) {
278		cmn_err(CE_NOTE, "!cpu_acpi: Unexpected %s revision for "
279		    "CPU %d.", cpu_acpi_obj_attrs[objtype].name,
280		    handle->cs_id);
281		goto out;
282	}
283
284	sd->sd_entries = elements[0].Integer.Value;
285	sd->sd_revision = elements[1].Integer.Value;
286	sd->sd_domain = elements[2].Integer.Value;
287	sd->sd_type = elements[3].Integer.Value;
288	sd->sd_num = elements[4].Integer.Value;
289	if (objtype == CSD_OBJ) {
290		sd->sd_index = elements[5].Integer.Value;
291	}
292
293	ret = 0;
294out:
295	if (abuf.Pointer != NULL)
296		AcpiOsFree(abuf.Pointer);
297	return (ret);
298}
299
300/*
301 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies
302 * (think CPU domains).
303 */
304static int
305cpu_acpi_cache_psd(cpu_acpi_handle_t handle)
306{
307	cpu_acpi_psd_t *psd;
308	int ret;
309
310	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED);
311	psd = &CPU_ACPI_PSD(handle);
312	ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd);
313	if (ret == 0)
314		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED);
315	return (ret);
316
317}
318
319/*
320 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies
321 * (think CPU domains).
322 */
323static int
324cpu_acpi_cache_tsd(cpu_acpi_handle_t handle)
325{
326	cpu_acpi_tsd_t *tsd;
327	int ret;
328
329	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED);
330	tsd = &CPU_ACPI_TSD(handle);
331	ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd);
332	if (ret == 0)
333		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED);
334	return (ret);
335
336}
337
338/*
339 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies
340 * (think CPU domains).
341 */
342static int
343cpu_acpi_cache_csd(cpu_acpi_handle_t handle)
344{
345	cpu_acpi_csd_t *csd;
346	int ret;
347
348	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CSD_CACHED);
349	csd = &CPU_ACPI_CSD(handle);
350	ret = cpu_acpi_cache_state_dependencies(handle, CSD_OBJ, csd);
351	if (ret == 0)
352		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CSD_CACHED);
353	return (ret);
354
355}
356
357static void
358cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt)
359{
360	cpu_acpi_pstate_t *pstate;
361	ACPI_OBJECT *q, *l;
362	int i, j;
363
364	CPU_ACPI_PSTATES_COUNT(handle) = cnt;
365	CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt),
366	    KM_SLEEP);
367	pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
368	for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) {
369		uint32_t *up;
370
371		q = obj->Package.Elements[i].Package.Elements;
372
373		/*
374		 * Skip duplicate entries.
375		 */
376		if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
377			continue;
378
379		up = (uint32_t *)pstate;
380		for (j = 0; j < CPU_ACPI_PSS_CNT; j++)
381			up[j] = q[j].Integer.Value;
382		pstate++;
383		cnt--;
384	}
385}
386
387static void
388cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt)
389{
390	cpu_acpi_tstate_t *tstate;
391	ACPI_OBJECT *q, *l;
392	int i, j;
393
394	CPU_ACPI_TSTATES_COUNT(handle) = cnt;
395	CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt),
396	    KM_SLEEP);
397	tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle);
398	for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) {
399		uint32_t *up;
400
401		q = obj->Package.Elements[i].Package.Elements;
402
403		/*
404		 * Skip duplicate entries.
405		 */
406		if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
407			continue;
408
409		up = (uint32_t *)tstate;
410		for (j = 0; j < CPU_ACPI_TSS_CNT; j++)
411			up[j] = q[j].Integer.Value;
412		tstate++;
413		cnt--;
414	}
415}
416
417/*
418 * Cache the _PSS or _TSS data.
419 */
420static int
421cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle,
422    cpu_acpi_obj_t objtype, int fcnt)
423{
424	ACPI_STATUS astatus;
425	ACPI_BUFFER abuf;
426	ACPI_OBJECT *obj, *q, *l;
427	boolean_t eot = B_FALSE;
428	int ret = -1;
429	int cnt;
430	int i, j;
431
432	/*
433	 * Fetch the state data (if present) for the CPU node.
434	 */
435	abuf.Length = ACPI_ALLOCATE_BUFFER;
436	abuf.Pointer = NULL;
437	astatus = AcpiEvaluateObjectTyped(handle->cs_handle,
438	    cpu_acpi_obj_attrs[objtype].name, NULL, &abuf,
439	    ACPI_TYPE_PACKAGE);
440	if (ACPI_FAILURE(astatus)) {
441		if (astatus == AE_NOT_FOUND) {
442			DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id,
443			    int, objtype, int, astatus);
444			return (1);
445		}
446		cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package "
447		    "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name,
448		    handle->cs_id);
449		goto out;
450	}
451	obj = abuf.Pointer;
452	if (obj->Package.Count < 2) {
453		cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d for "
454		    "CPU %d.", cpu_acpi_obj_attrs[objtype].name,
455		    obj->Package.Count, handle->cs_id);
456		goto out;
457	}
458
459	/*
460	 * Does the package look coherent?
461	 */
462	cnt = 0;
463	for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) {
464		if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE ||
465		    obj->Package.Elements[i].Package.Count != fcnt) {
466			cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in "
467			    "%s package for CPU %d.",
468			    cpu_acpi_obj_attrs[objtype].name,
469			    handle->cs_id);
470			goto out;
471		}
472
473		q = obj->Package.Elements[i].Package.Elements;
474		for (j = 0; j < fcnt; j++) {
475			if (q[j].Type != ACPI_TYPE_INTEGER) {
476				cmn_err(CE_NOTE, "!cpu_acpi: %s element "
477				    "invalid (type) for CPU %d.",
478				    cpu_acpi_obj_attrs[objtype].name,
479				    handle->cs_id);
480				goto out;
481			}
482		}
483
484		/*
485		 * Ignore duplicate entries.
486		 */
487		if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
488			continue;
489
490		/*
491		 * Some supported state tables are larger than required
492		 * and unused elements are filled with patterns
493		 * of 0xff.  Simply check here for frequency = 0xffff
494		 * and stop counting if found.
495		 */
496		if (q[0].Integer.Value == 0xffff) {
497			eot = B_TRUE;
498			continue;
499		}
500
501		/*
502		 * We should never find a valid entry after we've hit
503		 * an the end-of-table entry.
504		 */
505		if (eot) {
506			cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s "
507			    "package after eot for CPU %d.",
508			    cpu_acpi_obj_attrs[objtype].name,
509			    handle->cs_id);
510			goto out;
511		}
512
513		/*
514		 * states must be defined in order from highest to lowest.
515		 */
516		if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) {
517			cmn_err(CE_NOTE, "!cpu_acpi: %s package state "
518			    "definitions out of order for CPU %d.",
519			    cpu_acpi_obj_attrs[objtype].name,
520			    handle->cs_id);
521			goto out;
522		}
523
524		/*
525		 * This entry passes.
526		 */
527		cnt++;
528	}
529	if (cnt == 0)
530		goto out;
531
532	/*
533	 * Yes, fill in the structure.
534	 */
535	ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ);
536	(objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) :
537	    cpu_acpi_cache_tstate(handle, obj, cnt);
538
539	ret = 0;
540out:
541	if (abuf.Pointer != NULL)
542		AcpiOsFree(abuf.Pointer);
543	return (ret);
544}
545
546/*
547 * Cache the _PSS data. The _PSS data defines the different power levels
548 * supported by the CPU and the attributes associated with each power level
549 * (i.e., frequency, voltage, etc.). The power levels are number from
550 * highest to lowest. That is, the highest power level is _PSS entry 0
551 * and the lowest power level is the last _PSS entry.
552 */
553static int
554cpu_acpi_cache_pstates(cpu_acpi_handle_t handle)
555{
556	int ret;
557
558	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED);
559	ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ,
560	    CPU_ACPI_PSS_CNT);
561	if (ret == 0)
562		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED);
563	return (ret);
564}
565
566/*
567 * Cache the _TSS data. The _TSS data defines the different freq throttle
568 * levels supported by the CPU and the attributes associated with each
569 * throttle level (i.e., frequency throttle percentage, voltage, etc.).
570 * The throttle levels are number from highest to lowest.
571 */
572static int
573cpu_acpi_cache_tstates(cpu_acpi_handle_t handle)
574{
575	int ret;
576
577	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED);
578	ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ,
579	    CPU_ACPI_TSS_CNT);
580	if (ret == 0)
581		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED);
582	return (ret);
583}
584
585/*
586 * Cache the ACPI CPU present capabilities data objects.
587 */
588static int
589cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle,
590    cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc)
591
592{
593	ACPI_STATUS astatus;
594	ACPI_BUFFER abuf;
595	ACPI_OBJECT *obj;
596	int ret = -1;
597
598	/*
599	 * Fetch the present capabilites object (if present) for the CPU node.
600	 */
601	abuf.Length = ACPI_ALLOCATE_BUFFER;
602	abuf.Pointer = NULL;
603	astatus = AcpiEvaluateObject(handle->cs_handle,
604	    cpu_acpi_obj_attrs[objtype].name, NULL, &abuf);
605	if (ACPI_FAILURE(astatus) && astatus != AE_NOT_FOUND) {
606		cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s "
607		    "package for CPU %d.", astatus,
608		    cpu_acpi_obj_attrs[objtype].name, handle->cs_id);
609		goto out;
610	}
611	if (astatus == AE_NOT_FOUND || abuf.Length == 0) {
612		*pc = 0;
613		return (1);
614	}
615
616	obj = (ACPI_OBJECT *)abuf.Pointer;
617	*pc = obj->Integer.Value;
618
619	ret = 0;
620out:
621	if (abuf.Pointer != NULL)
622		AcpiOsFree(abuf.Pointer);
623	return (ret);
624}
625
626/*
627 * Cache the _PPC data. The _PPC simply contains an integer value which
628 * represents the highest power level that a CPU should transition to.
629 * That is, it's an index into the array of _PSS entries and will be
630 * greater than or equal to zero.
631 */
632void
633cpu_acpi_cache_ppc(cpu_acpi_handle_t handle)
634{
635	cpu_acpi_ppc_t *ppc;
636	int ret;
637
638	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED);
639	ppc = &CPU_ACPI_PPC(handle);
640	ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc);
641	if (ret == 0)
642		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED);
643}
644
645/*
646 * Cache the _TPC data. The _TPC simply contains an integer value which
647 * represents the throttle level that a CPU should transition to.
648 * That is, it's an index into the array of _TSS entries and will be
649 * greater than or equal to zero.
650 */
651void
652cpu_acpi_cache_tpc(cpu_acpi_handle_t handle)
653{
654	cpu_acpi_tpc_t *tpc;
655	int ret;
656
657	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED);
658	tpc = &CPU_ACPI_TPC(handle);
659	ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc);
660	if (ret == 0)
661		CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED);
662}
663
664int
665cpu_acpi_verify_cstate(cpu_acpi_cstate_t *cstate)
666{
667	uint32_t addrspaceid = cstate->cs_addrspace_id;
668
669	if ((addrspaceid != ACPI_ADR_SPACE_FIXED_HARDWARE) &&
670	    (addrspaceid != ACPI_ADR_SPACE_SYSTEM_IO)) {
671		cmn_err(CE_NOTE, "!cpu_acpi: _CST unsupported address space id"
672		    ":C%d, type: %d\n", cstate->cs_type, addrspaceid);
673		return (1);
674	}
675	return (0);
676}
677
678int
679cpu_acpi_cache_cst(cpu_acpi_handle_t handle)
680{
681	ACPI_STATUS astatus;
682	ACPI_BUFFER abuf;
683	ACPI_OBJECT *obj;
684	ACPI_INTEGER cnt, old_cnt;
685	cpu_acpi_cstate_t *cstate, *p;
686	size_t alloc_size;
687	int i, count;
688	int ret = 1;
689
690	CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CST_CACHED);
691
692	abuf.Length = ACPI_ALLOCATE_BUFFER;
693	abuf.Pointer = NULL;
694
695	/*
696	 * Fetch the C-state data (if present) for the CPU node.
697	 */
698	astatus = AcpiEvaluateObjectTyped(handle->cs_handle, "_CST",
699	    NULL, &abuf, ACPI_TYPE_PACKAGE);
700	if (ACPI_FAILURE(astatus)) {
701		if (astatus == AE_NOT_FOUND) {
702			DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id,
703			    int, CST_OBJ, int, astatus);
704			return (1);
705		}
706		cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating _CST package "
707		    "for CPU %d.", astatus, handle->cs_id);
708		goto out;
709
710	}
711	obj = (ACPI_OBJECT *)abuf.Pointer;
712	if (obj->Package.Count < 2) {
713		cmn_err(CE_NOTE, "!cpu_acpi: _CST unsupported package "
714		    "count %d for CPU %d.", obj->Package.Count, handle->cs_id);
715		goto out;
716	}
717
718	/*
719	 * Does the package look coherent?
720	 */
721	cnt = obj->Package.Elements[0].Integer.Value;
722	if (cnt < 1 || cnt != obj->Package.Count - 1) {
723		cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid element "
724		    "count %d != Package count %d for CPU %d",
725		    (int)cnt, (int)obj->Package.Count - 1, handle->cs_id);
726		goto out;
727	}
728
729	/*
730	 * Reuse the old buffer if the number of C states is the same.
731	 */
732	if (CPU_ACPI_CSTATES(handle) &&
733	    (old_cnt = CPU_ACPI_CSTATES_COUNT(handle)) != cnt) {
734		kmem_free(CPU_ACPI_CSTATES(handle),
735		    CPU_ACPI_CSTATES_SIZE(old_cnt));
736		CPU_ACPI_CSTATES(handle) = NULL;
737	}
738
739	CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)cnt;
740	alloc_size = CPU_ACPI_CSTATES_SIZE(cnt);
741	if (CPU_ACPI_CSTATES(handle) == NULL)
742		CPU_ACPI_CSTATES(handle) = kmem_zalloc(alloc_size, KM_SLEEP);
743	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
744	p = cstate;
745
746	for (i = 1, count = 1; i <= cnt; i++) {
747		ACPI_OBJECT *pkg;
748		AML_RESOURCE_GENERIC_REGISTER *reg;
749		ACPI_OBJECT *element;
750
751		pkg = &(obj->Package.Elements[i]);
752		reg = (AML_RESOURCE_GENERIC_REGISTER *)
753		    pkg->Package.Elements[0].Buffer.Pointer;
754		cstate->cs_addrspace_id = reg->AddressSpaceId;
755		cstate->cs_address = reg->Address;
756		element = &(pkg->Package.Elements[1]);
757		cstate->cs_type = element->Integer.Value;
758		element = &(pkg->Package.Elements[2]);
759		cstate->cs_latency = element->Integer.Value;
760		element = &(pkg->Package.Elements[3]);
761		cstate->cs_power = element->Integer.Value;
762
763		if (cpu_acpi_verify_cstate(cstate)) {
764			/*
765			 * ignore this entry if it's not valid
766			 */
767			continue;
768		}
769		if (cstate == p) {
770			cstate++;
771		} else if (p->cs_type == cstate->cs_type) {
772			/*
773			 * if there are duplicate entries, we keep the
774			 * last one. This fixes:
775			 * 1) some buggy BIOS have total duplicate entries.
776			 * 2) ACPI Spec allows the same cstate entry with
777			 *    different power and latency, we use the one
778			 *    with more power saving.
779			 */
780			(void) memcpy(p, cstate, sizeof (cpu_acpi_cstate_t));
781		} else {
782			/*
783			 * we got a valid entry, cache it to the
784			 * cstate structure
785			 */
786			p = cstate++;
787			count++;
788		}
789	}
790
791	if (count < 2) {
792		cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid count %d < 2 for "
793		    "CPU %d", count, handle->cs_id);
794		kmem_free(CPU_ACPI_CSTATES(handle), alloc_size);
795		CPU_ACPI_CSTATES(handle) = NULL;
796		CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0;
797		goto out;
798	}
799	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
800	if (cstate[0].cs_type != CPU_ACPI_C1) {
801		cmn_err(CE_NOTE, "!cpu_acpi: _CST first element type not "
802		    "C1: %d for CPU %d", (int)cstate->cs_type, handle->cs_id);
803		kmem_free(CPU_ACPI_CSTATES(handle), alloc_size);
804		CPU_ACPI_CSTATES(handle) = NULL;
805		CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0;
806		goto out;
807	}
808
809	if (count != cnt) {
810		void	*orig = CPU_ACPI_CSTATES(handle);
811
812		CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)count;
813		CPU_ACPI_CSTATES(handle) = kmem_zalloc(
814		    CPU_ACPI_CSTATES_SIZE(count), KM_SLEEP);
815		(void) memcpy(CPU_ACPI_CSTATES(handle), orig,
816		    CPU_ACPI_CSTATES_SIZE(count));
817		kmem_free(orig, alloc_size);
818	}
819
820	CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CST_CACHED);
821
822	ret = 0;
823
824out:
825	if (abuf.Pointer != NULL)
826		AcpiOsFree(abuf.Pointer);
827	return (ret);
828}
829
830/*
831 * Cache the _PCT, _PSS, _PSD and _PPC data.
832 */
833int
834cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle)
835{
836	if (cpu_acpi_cache_pct(handle) < 0) {
837		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
838		    int, PCT_OBJ);
839		return (-1);
840	}
841
842	if (cpu_acpi_cache_pstates(handle) != 0) {
843		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
844		    int, PSS_OBJ);
845		return (-1);
846	}
847
848	if (cpu_acpi_cache_psd(handle) < 0) {
849		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
850		    int, PSD_OBJ);
851		return (-1);
852	}
853
854	cpu_acpi_cache_ppc(handle);
855
856	return (0);
857}
858
859void
860cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle)
861{
862	if (handle != NULL) {
863		if (CPU_ACPI_PSTATES(handle)) {
864			kmem_free(CPU_ACPI_PSTATES(handle),
865			    CPU_ACPI_PSTATES_SIZE(
866			    CPU_ACPI_PSTATES_COUNT(handle)));
867			CPU_ACPI_PSTATES(handle) = NULL;
868		}
869	}
870}
871
872/*
873 * Cache the _PTC, _TSS, _TSD and _TPC data.
874 */
875int
876cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle)
877{
878	int ret;
879
880	if (cpu_acpi_cache_ptc(handle) < 0) {
881		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
882		    int, PTC_OBJ);
883		return (-1);
884	}
885
886	if ((ret = cpu_acpi_cache_tstates(handle)) != 0) {
887		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
888		    int, TSS_OBJ);
889		return (ret);
890	}
891
892	if (cpu_acpi_cache_tsd(handle) < 0) {
893		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
894		    int, TSD_OBJ);
895		return (-1);
896	}
897
898	cpu_acpi_cache_tpc(handle);
899
900	return (0);
901}
902
903void
904cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle)
905{
906	if (handle != NULL) {
907		if (CPU_ACPI_TSTATES(handle)) {
908			kmem_free(CPU_ACPI_TSTATES(handle),
909			    CPU_ACPI_TSTATES_SIZE(
910			    CPU_ACPI_TSTATES_COUNT(handle)));
911			CPU_ACPI_TSTATES(handle) = NULL;
912		}
913	}
914}
915
916/*
917 * Cache the _CST data.
918 */
919int
920cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle)
921{
922	int ret;
923
924	if ((ret = cpu_acpi_cache_cst(handle)) != 0) {
925		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
926		    int, CST_OBJ);
927		return (ret);
928	}
929
930	if (cpu_acpi_cache_csd(handle) < 0) {
931		DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id,
932		    int, CSD_OBJ);
933		return (-1);
934	}
935
936	return (0);
937}
938
939void
940cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle)
941{
942	if (handle != NULL) {
943		if (CPU_ACPI_CSTATES(handle)) {
944			kmem_free(CPU_ACPI_CSTATES(handle),
945			    CPU_ACPI_CSTATES_SIZE(
946			    CPU_ACPI_CSTATES_COUNT(handle)));
947			CPU_ACPI_CSTATES(handle) = NULL;
948		}
949	}
950}
951
952/*
953 * Register a handler for processor change notifications.
954 */
955void
956cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle,
957    ACPI_NOTIFY_HANDLER handler, void *ctx)
958{
959	if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle,
960	    ACPI_DEVICE_NOTIFY, handler, ctx)))
961		cmn_err(CE_NOTE, "!cpu_acpi: Unable to register "
962		    "notify handler for CPU %d.", handle->cs_id);
963}
964
965/*
966 * Remove a handler for processor change notifications.
967 */
968void
969cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle,
970    ACPI_NOTIFY_HANDLER handler)
971{
972	if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle->cs_handle,
973	    ACPI_DEVICE_NOTIFY, handler)))
974		cmn_err(CE_NOTE, "!cpu_acpi: Unable to remove "
975		    "notify handler for CPU %d.", handle->cs_id);
976}
977
978/*
979 * Write _PDC.
980 */
981int
982cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count,
983    uint32_t *capabilities)
984{
985	ACPI_STATUS astatus;
986	ACPI_OBJECT obj;
987	ACPI_OBJECT_LIST list = { 1, &obj};
988	uint32_t *buffer;
989	uint32_t *bufptr;
990	uint32_t bufsize;
991	int i;
992	int ret = 0;
993
994	bufsize = (count + 2) * sizeof (uint32_t);
995	buffer = kmem_zalloc(bufsize, KM_SLEEP);
996	buffer[0] = revision;
997	buffer[1] = count;
998	bufptr = &buffer[2];
999	for (i = 0; i < count; i++)
1000		*bufptr++ = *capabilities++;
1001
1002	obj.Type = ACPI_TYPE_BUFFER;
1003	obj.Buffer.Length = bufsize;
1004	obj.Buffer.Pointer = (void *)buffer;
1005
1006	/*
1007	 * Fetch the ??? (if present) for the CPU node.
1008	 */
1009	astatus = AcpiEvaluateObject(handle->cs_handle, "_PDC", &list, NULL);
1010	if (ACPI_FAILURE(astatus)) {
1011		if (astatus == AE_NOT_FOUND) {
1012			DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id,
1013			    int, PDC_OBJ, int, astatus);
1014			ret = 1;
1015		} else {
1016			cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating _PDC "
1017			    "package for CPU %d.", astatus, handle->cs_id);
1018			ret = -1;
1019		}
1020	}
1021
1022	kmem_free(buffer, bufsize);
1023	return (ret);
1024}
1025
1026/*
1027 * Write to system IO port.
1028 */
1029int
1030cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width)
1031{
1032	if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) {
1033		cmn_err(CE_NOTE, "!cpu_acpi: error writing system IO port "
1034		    "%lx.", (long)address);
1035		return (-1);
1036	}
1037	return (0);
1038}
1039
1040/*
1041 * Read from a system IO port.
1042 */
1043int
1044cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width)
1045{
1046	if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) {
1047		cmn_err(CE_NOTE, "!cpu_acpi: error reading system IO port "
1048		    "%lx.", (long)address);
1049		return (-1);
1050	}
1051	return (0);
1052}
1053
1054/*
1055 * Return supported frequencies.
1056 */
1057uint_t
1058cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds)
1059{
1060	cpu_acpi_pstate_t *pstate;
1061	int *hspeeds;
1062	uint_t nspeeds;
1063	int i;
1064
1065	nspeeds = CPU_ACPI_PSTATES_COUNT(handle);
1066	pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
1067	hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP);
1068	for (i = 0; i < nspeeds; i++) {
1069		hspeeds[i] = CPU_ACPI_FREQ(pstate);
1070		pstate++;
1071	}
1072	*speeds = hspeeds;
1073	return (nspeeds);
1074}
1075
1076/*
1077 * Free resources allocated by cpu_acpi_get_speeds().
1078 */
1079void
1080cpu_acpi_free_speeds(int *speeds, uint_t nspeeds)
1081{
1082	kmem_free(speeds, nspeeds * sizeof (int));
1083}
1084
1085uint_t
1086cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle)
1087{
1088	if (CPU_ACPI_CSTATES(handle))
1089		return (CPU_ACPI_CSTATES_COUNT(handle));
1090	else
1091		return (1);
1092}
1093
1094void
1095cpu_acpi_set_register(uint32_t bitreg, uint32_t value)
1096{
1097	(void) AcpiWriteBitRegister(bitreg, value);
1098}
1099
1100void
1101cpu_acpi_get_register(uint32_t bitreg, uint32_t *value)
1102{
1103	(void) AcpiReadBitRegister(bitreg, value);
1104}
1105
1106/*
1107 * Map the dip to an ACPI handle for the device.
1108 */
1109cpu_acpi_handle_t
1110cpu_acpi_init(cpu_t *cp)
1111{
1112	cpu_acpi_handle_t handle;
1113
1114	handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP);
1115
1116	if (ACPI_FAILURE(acpica_get_handle_cpu(cp->cpu_id,
1117	    &handle->cs_handle))) {
1118		kmem_free(handle, sizeof (cpu_acpi_state_t));
1119		return (NULL);
1120	}
1121	handle->cs_id = cp->cpu_id;
1122	return (handle);
1123}
1124
1125/*
1126 * Free any resources.
1127 */
1128void
1129cpu_acpi_fini(cpu_acpi_handle_t handle)
1130{
1131	if (handle)
1132		kmem_free(handle, sizeof (cpu_acpi_state_t));
1133}
1134