xref: /illumos-gate/usr/src/uts/intel/io/acpica/osl.c (revision 35786f68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 Joyent, Inc.
26  */
27 /*
28  * Copyright (c) 2009-2010, Intel Corporation.
29  * All rights reserved.
30  */
31 /*
32  * ACPI CA OSL for Solaris x86
33  */
34 
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/psm.h>
38 #include <sys/pci_cfgspace.h>
39 #include <sys/apic.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/pci.h>
44 #include <sys/kobj.h>
45 #include <sys/taskq.h>
46 #include <sys/strlog.h>
47 #include <sys/x86_archext.h>
48 #include <sys/note.h>
49 #include <sys/promif.h>
50 
51 #include <sys/acpi/accommon.h>
52 #include <sys/acpica.h>
53 
54 #define	MAX_DAT_FILE_SIZE	(64*1024)
55 
56 /* local functions */
57 static int CompressEisaID(char *np);
58 
59 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
60 static int acpica_query_bbn_problem(void);
61 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
62 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
63 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
64 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
65 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
66 
67 /*
68  * Event queue vars
69  */
70 int acpica_eventq_init = 0;
71 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
72 
73 /*
74  * Priorities relative to minclsyspri that each taskq
75  * run at; OSL_NOTIFY_HANDLER needs to run at a higher
76  * priority than OSL_GPE_HANDLER.  There's an implicit
77  * assumption that no priority here results in exceeding
78  * maxclsyspri.
79  * Note: these initializations need to match the order of
80  * ACPI_EXECUTE_TYPE.
81  */
82 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
83 	0,	/* OSL_GLOBAL_LOCK_HANDLER */
84 	2,	/* OSL_NOTIFY_HANDLER */
85 	0,	/* OSL_GPE_HANDLER */
86 	0,	/* OSL_DEBUGGER_THREAD */
87 	0,	/* OSL_EC_POLL_HANDLER */
88 	0	/* OSL_EC_BURST_HANDLER */
89 };
90 
91 /*
92  * Note, if you change this path, you need to update
93  * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
94  */
95 static char *acpi_table_path = "/boot/acpi/tables/";
96 
97 /* non-zero while scan_d2a_map() is working */
98 static int scanning_d2a_map = 0;
99 static int d2a_done = 0;
100 
101 /* features supported by ACPICA and ACPI device configuration. */
102 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
103 static uint64_t acpica_devcfg_features = 0;
104 
105 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
106 int acpica_use_safe_delay = 0;
107 
108 /* CPU mapping data */
109 struct cpu_map_item {
110 	processorid_t	cpu_id;
111 	UINT32		proc_id;
112 	UINT32		apic_id;
113 	ACPI_HANDLE	obj;
114 };
115 
116 kmutex_t cpu_map_lock;
117 static struct cpu_map_item **cpu_map = NULL;
118 static int cpu_map_count_max = 0;
119 static int cpu_map_count = 0;
120 static int cpu_map_built = 0;
121 
122 /*
123  * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
124  * This flag is used to check for uppc-only systems by detecting whether
125  * acpica_map_cpu() has been called or not.
126  */
127 static int cpu_map_called = 0;
128 
129 static int acpi_has_broken_bbn = -1;
130 
131 /* buffer for AcpiOsVprintf() */
132 #define	ACPI_OSL_PR_BUFLEN	1024
133 static char *acpi_osl_pr_buffer = NULL;
134 static int acpi_osl_pr_buflen;
135 
136 #define	D2A_DEBUG
137 
138 /*
139  *
140  */
141 static void
142 discard_event_queues()
143 {
144 	int	i;
145 
146 	/*
147 	 * destroy event queues
148 	 */
149 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
150 		if (osl_eventq[i])
151 			ddi_taskq_destroy(osl_eventq[i]);
152 	}
153 }
154 
155 
156 /*
157  *
158  */
159 static ACPI_STATUS
160 init_event_queues()
161 {
162 	char	namebuf[32];
163 	int	i, error = 0;
164 
165 	/*
166 	 * Initialize event queues
167 	 */
168 
169 	/* Always allocate only 1 thread per queue to force FIFO execution */
170 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
171 		snprintf(namebuf, 32, "ACPI%d", i);
172 		osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
173 		    osl_eventq_pri_delta[i] + minclsyspri, 0);
174 		if (osl_eventq[i] == NULL)
175 			error++;
176 	}
177 
178 	if (error != 0) {
179 		discard_event_queues();
180 #ifdef	DEBUG
181 		cmn_err(CE_WARN, "!acpica: could not initialize event queues");
182 #endif
183 		return (AE_ERROR);
184 	}
185 
186 	acpica_eventq_init = 1;
187 	return (AE_OK);
188 }
189 
190 /*
191  * One-time initialization of OSL layer
192  */
193 ACPI_STATUS
194 AcpiOsInitialize(void)
195 {
196 	/*
197 	 * Allocate buffer for AcpiOsVprintf() here to avoid
198 	 * kmem_alloc()/kmem_free() at high PIL
199 	 */
200 	acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
201 	if (acpi_osl_pr_buffer != NULL)
202 		acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
203 
204 	return (AE_OK);
205 }
206 
207 /*
208  * One-time shut-down of OSL layer
209  */
210 ACPI_STATUS
211 AcpiOsTerminate(void)
212 {
213 
214 	if (acpi_osl_pr_buffer != NULL)
215 		kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
216 
217 	discard_event_queues();
218 	return (AE_OK);
219 }
220 
221 
222 ACPI_PHYSICAL_ADDRESS
223 AcpiOsGetRootPointer()
224 {
225 	ACPI_PHYSICAL_ADDRESS Address;
226 
227 	/*
228 	 * For EFI firmware, the root pointer is defined in EFI systab.
229 	 * The boot code process the table and put the physical address
230 	 * in the acpi-root-tab property.
231 	 */
232 	Address = ddi_prop_get_int64(DDI_DEV_T_ANY, ddi_root_node(),
233 	    DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
234 
235 	if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
236 		Address = NULL;
237 
238 	return (Address);
239 }
240 
241 /*ARGSUSED*/
242 ACPI_STATUS
243 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
244     ACPI_STRING *NewVal)
245 {
246 
247 	*NewVal = 0;
248 	return (AE_OK);
249 }
250 
251 static void
252 acpica_strncpy(char *dest, const char *src, int len)
253 {
254 
255 	/*LINTED*/
256 	while ((*dest++ = *src++) && (--len > 0))
257 		/* copy the string */;
258 	*dest = '\0';
259 }
260 
261 ACPI_STATUS
262 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
263     ACPI_TABLE_HEADER **NewTable)
264 {
265 	char signature[5];
266 	char oemid[7];
267 	char oemtableid[9];
268 	struct _buf *file;
269 	char *buf1, *buf2;
270 	int count;
271 	char acpi_table_loc[128];
272 
273 	acpica_strncpy(signature, ExistingTable->Signature, 4);
274 	acpica_strncpy(oemid, ExistingTable->OemId, 6);
275 	acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
276 
277 	/* File name format is "signature_oemid_oemtableid.dat" */
278 	(void) strcpy(acpi_table_loc, acpi_table_path);
279 	(void) strcat(acpi_table_loc, signature); /* for example, DSDT */
280 	(void) strcat(acpi_table_loc, "_");
281 	(void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
282 	(void) strcat(acpi_table_loc, "_");
283 	(void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
284 	(void) strcat(acpi_table_loc, ".dat");
285 
286 	file = kobj_open_file(acpi_table_loc);
287 	if (file == (struct _buf *)-1) {
288 		*NewTable = 0;
289 		return (AE_OK);
290 	} else {
291 		buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
292 		count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
293 		if (count >= MAX_DAT_FILE_SIZE) {
294 			cmn_err(CE_WARN, "!acpica: table %s file size too big",
295 			    acpi_table_loc);
296 			*NewTable = 0;
297 		} else {
298 			buf2 = (char *)kmem_alloc(count, KM_SLEEP);
299 			(void) memcpy(buf2, buf1, count);
300 			*NewTable = (ACPI_TABLE_HEADER *)buf2;
301 			cmn_err(CE_NOTE, "!acpica: replacing table: %s",
302 			    acpi_table_loc);
303 		}
304 	}
305 	kobj_close_file(file);
306 	kmem_free(buf1, MAX_DAT_FILE_SIZE);
307 
308 	return (AE_OK);
309 }
310 
311 ACPI_STATUS
312 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
313     ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
314 {
315 	return (AE_SUPPORT);
316 }
317 
318 /*
319  * ACPI semaphore implementation
320  */
321 typedef struct {
322 	kmutex_t	mutex;
323 	kcondvar_t	cv;
324 	uint32_t	available;
325 	uint32_t	initial;
326 	uint32_t	maximum;
327 } acpi_sema_t;
328 
329 /*
330  *
331  */
332 void
333 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
334 {
335 	mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
336 	cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
337 	/* no need to enter mutex here at creation */
338 	sp->available = count;
339 	sp->initial = count;
340 	sp->maximum = max;
341 }
342 
343 /*
344  *
345  */
346 void
347 acpi_sema_destroy(acpi_sema_t *sp)
348 {
349 
350 	cv_destroy(&sp->cv);
351 	mutex_destroy(&sp->mutex);
352 }
353 
354 /*
355  *
356  */
357 ACPI_STATUS
358 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
359 {
360 	ACPI_STATUS rv = AE_OK;
361 	clock_t deadline;
362 
363 	mutex_enter(&sp->mutex);
364 
365 	if (sp->available >= count) {
366 		/*
367 		 * Enough units available, no blocking
368 		 */
369 		sp->available -= count;
370 		mutex_exit(&sp->mutex);
371 		return (rv);
372 	} else if (wait_time == 0) {
373 		/*
374 		 * Not enough units available and timeout
375 		 * specifies no blocking
376 		 */
377 		rv = AE_TIME;
378 		mutex_exit(&sp->mutex);
379 		return (rv);
380 	}
381 
382 	/*
383 	 * Not enough units available and timeout specifies waiting
384 	 */
385 	if (wait_time != ACPI_WAIT_FOREVER)
386 		deadline = ddi_get_lbolt() +
387 		    (clock_t)drv_usectohz(wait_time * 1000);
388 
389 	do {
390 		if (wait_time == ACPI_WAIT_FOREVER)
391 			cv_wait(&sp->cv, &sp->mutex);
392 		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
393 			rv = AE_TIME;
394 			break;
395 		}
396 	} while (sp->available < count);
397 
398 	/* if we dropped out of the wait with AE_OK, we got the units */
399 	if (rv == AE_OK)
400 		sp->available -= count;
401 
402 	mutex_exit(&sp->mutex);
403 	return (rv);
404 }
405 
406 /*
407  *
408  */
409 void
410 acpi_sema_v(acpi_sema_t *sp, unsigned count)
411 {
412 	mutex_enter(&sp->mutex);
413 	sp->available += count;
414 	cv_broadcast(&sp->cv);
415 	mutex_exit(&sp->mutex);
416 }
417 
418 
419 ACPI_STATUS
420 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
421     ACPI_HANDLE *OutHandle)
422 {
423 	acpi_sema_t *sp;
424 
425 	if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
426 		return (AE_BAD_PARAMETER);
427 
428 	sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
429 	acpi_sema_init(sp, MaxUnits, InitialUnits);
430 	*OutHandle = (ACPI_HANDLE)sp;
431 	return (AE_OK);
432 }
433 
434 
435 ACPI_STATUS
436 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
437 {
438 
439 	if (Handle == NULL)
440 		return (AE_BAD_PARAMETER);
441 
442 	acpi_sema_destroy((acpi_sema_t *)Handle);
443 	kmem_free((void *)Handle, sizeof (acpi_sema_t));
444 	return (AE_OK);
445 }
446 
447 ACPI_STATUS
448 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
449 {
450 
451 	if ((Handle == NULL) || (Units < 1))
452 		return (AE_BAD_PARAMETER);
453 
454 	return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
455 }
456 
457 ACPI_STATUS
458 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
459 {
460 
461 	if ((Handle == NULL) || (Units < 1))
462 		return (AE_BAD_PARAMETER);
463 
464 	acpi_sema_v((acpi_sema_t *)Handle, Units);
465 	return (AE_OK);
466 }
467 
468 ACPI_STATUS
469 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
470 {
471 	kmutex_t *mp;
472 
473 	if (OutHandle == NULL)
474 		return (AE_BAD_PARAMETER);
475 
476 	mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
477 	mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
478 	*OutHandle = (ACPI_HANDLE)mp;
479 	return (AE_OK);
480 }
481 
482 void
483 AcpiOsDeleteLock(ACPI_HANDLE Handle)
484 {
485 
486 	if (Handle == NULL)
487 		return;
488 
489 	mutex_destroy((kmutex_t *)Handle);
490 	kmem_free((void *)Handle, sizeof (kmutex_t));
491 }
492 
493 ACPI_CPU_FLAGS
494 AcpiOsAcquireLock(ACPI_HANDLE Handle)
495 {
496 
497 
498 	if (Handle == NULL)
499 		return (AE_BAD_PARAMETER);
500 
501 	if (curthread == CPU->cpu_idle_thread) {
502 		while (!mutex_tryenter((kmutex_t *)Handle))
503 			/* spin */;
504 	} else
505 		mutex_enter((kmutex_t *)Handle);
506 	return (AE_OK);
507 }
508 
509 void
510 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
511 {
512 	_NOTE(ARGUNUSED(Flags))
513 
514 	mutex_exit((kmutex_t *)Handle);
515 }
516 
517 
518 void *
519 AcpiOsAllocate(ACPI_SIZE Size)
520 {
521 	ACPI_SIZE *tmp_ptr;
522 
523 	Size += sizeof (Size);
524 	tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
525 	*tmp_ptr++ = Size;
526 	return (tmp_ptr);
527 }
528 
529 void
530 AcpiOsFree(void *Memory)
531 {
532 	ACPI_SIZE	size, *tmp_ptr;
533 
534 	tmp_ptr = (ACPI_SIZE *)Memory;
535 	tmp_ptr -= 1;
536 	size = *tmp_ptr;
537 	kmem_free(tmp_ptr, size);
538 }
539 
540 static int napics_found;	/* number of ioapic addresses in array */
541 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
542 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
543 static void *dummy_ioapicadr;
544 
545 void
546 acpica_find_ioapics(void)
547 {
548 	int			madt_seen, madt_size;
549 	ACPI_SUBTABLE_HEADER		*ap;
550 	ACPI_MADT_IO_APIC		*mia;
551 
552 	if (acpi_mapic_dtp != NULL)
553 		return;	/* already parsed table */
554 	if (AcpiGetTable(ACPI_SIG_MADT, 1,
555 	    (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
556 		return;
557 
558 	napics_found = 0;
559 
560 	/*
561 	 * Search the MADT for ioapics
562 	 */
563 	ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
564 	madt_size = acpi_mapic_dtp->Header.Length;
565 	madt_seen = sizeof (*acpi_mapic_dtp);
566 
567 	while (madt_seen < madt_size) {
568 
569 		switch (ap->Type) {
570 		case ACPI_MADT_TYPE_IO_APIC:
571 			mia = (ACPI_MADT_IO_APIC *) ap;
572 			if (napics_found < MAX_IO_APIC) {
573 				ioapic_paddr[napics_found++] =
574 				    (ACPI_PHYSICAL_ADDRESS)
575 				    (mia->Address & PAGEMASK);
576 			}
577 			break;
578 
579 		default:
580 			break;
581 		}
582 
583 		/* advance to next entry */
584 		madt_seen += ap->Length;
585 		ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
586 	}
587 	if (dummy_ioapicadr == NULL)
588 		dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
589 }
590 
591 
592 void *
593 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
594 {
595 	int	i;
596 
597 	/*
598 	 * If the iopaic address table is populated, check if trying
599 	 * to access an ioapic.  Instead, return a pointer to a dummy ioapic.
600 	 */
601 	for (i = 0; i < napics_found; i++) {
602 		if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
603 			return (dummy_ioapicadr);
604 	}
605 	/* FUTUREWORK: test PhysicalAddress for > 32 bits */
606 	return (psm_map_new((paddr_t)PhysicalAddress,
607 	    (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
608 }
609 
610 void
611 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
612 {
613 	/*
614 	 * Check if trying to unmap dummy ioapic address.
615 	 */
616 	if (LogicalAddress == dummy_ioapicadr)
617 		return;
618 
619 	psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
620 }
621 
622 /*ARGSUSED*/
623 ACPI_STATUS
624 AcpiOsGetPhysicalAddress(void *LogicalAddress,
625     ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
626 {
627 
628 	/* UNIMPLEMENTED: not invoked by ACPI CA code */
629 	return (AE_NOT_IMPLEMENTED);
630 }
631 
632 
633 ACPI_OSD_HANDLER acpi_isr;
634 void *acpi_isr_context;
635 
636 uint_t
637 acpi_wrapper_isr(char *arg)
638 {
639 	_NOTE(ARGUNUSED(arg))
640 
641 	int	status;
642 
643 	status = (*acpi_isr)(acpi_isr_context);
644 
645 	if (status == ACPI_INTERRUPT_HANDLED) {
646 		return (DDI_INTR_CLAIMED);
647 	} else {
648 		return (DDI_INTR_UNCLAIMED);
649 	}
650 }
651 
652 static int acpi_intr_hooked = 0;
653 
654 ACPI_STATUS
655 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
656     ACPI_OSD_HANDLER ServiceRoutine,
657     void *Context)
658 {
659 	_NOTE(ARGUNUSED(InterruptNumber))
660 
661 	int retval;
662 	int sci_vect;
663 	iflag_t sci_flags;
664 
665 	acpi_isr = ServiceRoutine;
666 	acpi_isr_context = Context;
667 
668 	/*
669 	 * Get SCI (adjusted for PIC/APIC mode if necessary)
670 	 */
671 	if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
672 		return (AE_ERROR);
673 	}
674 
675 #ifdef	DEBUG
676 	cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
677 #endif
678 
679 	retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
680 	    "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
681 	if (retval) {
682 		acpi_intr_hooked = 1;
683 		return (AE_OK);
684 	} else
685 		return (AE_BAD_PARAMETER);
686 }
687 
688 ACPI_STATUS
689 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
690     ACPI_OSD_HANDLER ServiceRoutine)
691 {
692 	_NOTE(ARGUNUSED(ServiceRoutine))
693 
694 #ifdef	DEBUG
695 	cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
696 #endif
697 	if (acpi_intr_hooked) {
698 		rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
699 		    InterruptNumber);
700 		acpi_intr_hooked = 0;
701 	}
702 	return (AE_OK);
703 }
704 
705 
706 ACPI_THREAD_ID
707 AcpiOsGetThreadId(void)
708 {
709 	/*
710 	 * ACPI CA doesn't care what actual value is returned as long
711 	 * as it is non-zero and unique to each existing thread.
712 	 * ACPI CA assumes that thread ID is castable to a pointer,
713 	 * so we use the current thread pointer.
714 	 */
715 	return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
716 }
717 
718 /*
719  *
720  */
721 ACPI_STATUS
722 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK  Function,
723     void *Context)
724 {
725 
726 	if (!acpica_eventq_init) {
727 		/*
728 		 * Create taskqs for event handling
729 		 */
730 		if (init_event_queues() != AE_OK)
731 			return (AE_ERROR);
732 	}
733 
734 	if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
735 	    DDI_NOSLEEP) == DDI_FAILURE) {
736 #ifdef	DEBUG
737 		cmn_err(CE_WARN, "!acpica: unable to dispatch event");
738 #endif
739 		return (AE_ERROR);
740 	}
741 	return (AE_OK);
742 
743 }
744 
745 
746 void
747 AcpiOsWaitEventsComplete(void)
748 {
749 	int	i;
750 
751 	/*
752 	 * Wait for event queues to be empty.
753 	 */
754 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
755 		if (osl_eventq[i] != NULL) {
756 			ddi_taskq_wait(osl_eventq[i]);
757 		}
758 	}
759 }
760 
761 void
762 AcpiOsSleep(ACPI_INTEGER Milliseconds)
763 {
764 	/*
765 	 * During kernel startup, before the first tick interrupt
766 	 * has taken place, we can't call delay; very late in
767 	 * kernel shutdown or suspend/resume, clock interrupts
768 	 * are blocked, so delay doesn't work then either.
769 	 * So we busy wait if lbolt == 0 (kernel startup)
770 	 * or if acpica_use_safe_delay has been set to a
771 	 * non-zero value.
772 	 */
773 	if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
774 		drv_usecwait(Milliseconds * 1000);
775 	else
776 		delay(drv_usectohz(Milliseconds * 1000));
777 }
778 
779 void
780 AcpiOsStall(UINT32 Microseconds)
781 {
782 	drv_usecwait(Microseconds);
783 }
784 
785 
786 /*
787  * Implementation of "Windows 2001" compatible I/O permission map
788  *
789  */
790 #define	OSL_IO_NONE	(0)
791 #define	OSL_IO_READ	(1<<0)
792 #define	OSL_IO_WRITE	(1<<1)
793 #define	OSL_IO_RW	(OSL_IO_READ | OSL_IO_WRITE)
794 #define	OSL_IO_TERM	(1<<2)
795 #define	OSL_IO_DEFAULT	OSL_IO_RW
796 
797 static struct io_perm  {
798 	ACPI_IO_ADDRESS	low;
799 	ACPI_IO_ADDRESS	high;
800 	uint8_t		perm;
801 } osl_io_perm[] = {
802 	{ 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
803 };
804 
805 
806 /*
807  *
808  */
809 static struct io_perm *
810 osl_io_find_perm(ACPI_IO_ADDRESS addr)
811 {
812 	struct io_perm *p;
813 
814 	p = osl_io_perm;
815 	while (p != NULL) {
816 		if ((p->low <= addr) && (addr <= p->high))
817 			break;
818 		p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
819 	}
820 
821 	return (p);
822 }
823 
824 /*
825  *
826  */
827 ACPI_STATUS
828 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
829 {
830 	struct io_perm *p;
831 
832 	/* verify permission */
833 	p = osl_io_find_perm(Address);
834 	if (p && (p->perm & OSL_IO_READ) == 0) {
835 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
836 		    (long)Address, Width);
837 		*Value = 0xffffffff;
838 		return (AE_ERROR);
839 	}
840 
841 	switch (Width) {
842 	case 8:
843 		*Value = inb(Address);
844 		break;
845 	case 16:
846 		*Value = inw(Address);
847 		break;
848 	case 32:
849 		*Value = inl(Address);
850 		break;
851 	default:
852 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
853 		    (long)Address, Width);
854 		return (AE_BAD_PARAMETER);
855 	}
856 	return (AE_OK);
857 }
858 
859 ACPI_STATUS
860 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
861 {
862 	struct io_perm *p;
863 
864 	/* verify permission */
865 	p = osl_io_find_perm(Address);
866 	if (p && (p->perm & OSL_IO_WRITE) == 0) {
867 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
868 		    (long)Address, Width);
869 		return (AE_ERROR);
870 	}
871 
872 	switch (Width) {
873 	case 8:
874 		outb(Address, Value);
875 		break;
876 	case 16:
877 		outw(Address, Value);
878 		break;
879 	case 32:
880 		outl(Address, Value);
881 		break;
882 	default:
883 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
884 		    (long)Address, Width);
885 		return (AE_BAD_PARAMETER);
886 	}
887 	return (AE_OK);
888 }
889 
890 
891 /*
892  *
893  */
894 
895 #define	OSL_RW(ptr, val, type, rw) \
896 	{ if (rw) *((type *)(ptr)) = *((type *) val); \
897 	    else *((type *) val) = *((type *)(ptr)); }
898 
899 
900 static void
901 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
902     UINT32 Width, int write)
903 {
904 	size_t	maplen = Width / 8;
905 	caddr_t	ptr;
906 
907 	ptr = psm_map_new((paddr_t)Address, maplen,
908 	    PSM_PROT_WRITE | PSM_PROT_READ);
909 
910 	switch (maplen) {
911 	case 1:
912 		OSL_RW(ptr, Value, uint8_t, write);
913 		break;
914 	case 2:
915 		OSL_RW(ptr, Value, uint16_t, write);
916 		break;
917 	case 4:
918 		OSL_RW(ptr, Value, uint32_t, write);
919 		break;
920 	case 8:
921 		OSL_RW(ptr, Value, uint64_t, write);
922 		break;
923 	default:
924 		cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
925 		    Width);
926 		break;
927 	}
928 
929 	psm_unmap(ptr, maplen);
930 }
931 
932 ACPI_STATUS
933 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
934     UINT64 *Value, UINT32 Width)
935 {
936 	osl_rw_memory(Address, Value, Width, 0);
937 	return (AE_OK);
938 }
939 
940 ACPI_STATUS
941 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
942     UINT64 Value, UINT32 Width)
943 {
944 	osl_rw_memory(Address, &Value, Width, 1);
945 	return (AE_OK);
946 }
947 
948 
949 ACPI_STATUS
950 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
951     UINT64 *Value, UINT32 Width)
952 {
953 
954 	switch (Width) {
955 	case 8:
956 		*Value = (UINT64)(*pci_getb_func)
957 		    (PciId->Bus, PciId->Device, PciId->Function, Reg);
958 		break;
959 	case 16:
960 		*Value = (UINT64)(*pci_getw_func)
961 		    (PciId->Bus, PciId->Device, PciId->Function, Reg);
962 		break;
963 	case 32:
964 		*Value = (UINT64)(*pci_getl_func)
965 		    (PciId->Bus, PciId->Device, PciId->Function, Reg);
966 		break;
967 	case 64:
968 	default:
969 		cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
970 		    Reg, Width);
971 		return (AE_BAD_PARAMETER);
972 	}
973 	return (AE_OK);
974 }
975 
976 /*
977  *
978  */
979 int acpica_write_pci_config_ok = 1;
980 
981 ACPI_STATUS
982 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
983     UINT64 Value, UINT32 Width)
984 {
985 
986 	if (!acpica_write_pci_config_ok) {
987 		cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
988 		    " %lx %d not permitted", PciId->Bus, PciId->Device,
989 		    PciId->Function, Reg, (long)Value, Width);
990 		return (AE_OK);
991 	}
992 
993 	switch (Width) {
994 	case 8:
995 		(*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
996 		    Reg, (uint8_t)Value);
997 		break;
998 	case 16:
999 		(*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
1000 		    Reg, (uint16_t)Value);
1001 		break;
1002 	case 32:
1003 		(*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
1004 		    Reg, (uint32_t)Value);
1005 		break;
1006 	case 64:
1007 	default:
1008 		cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
1009 		    Reg, Width);
1010 		return (AE_BAD_PARAMETER);
1011 	}
1012 	return (AE_OK);
1013 }
1014 
1015 /*
1016  * Called with ACPI_HANDLEs for both a PCI Config Space
1017  * OpRegion and (what ACPI CA thinks is) the PCI device
1018  * to which this ConfigSpace OpRegion belongs.
1019  *
1020  * ACPI CA uses _BBN and _ADR objects to determine the default
1021  * values for bus, segment, device and function; anything ACPI CA
1022  * can't figure out from the ACPI tables will be 0.  One very
1023  * old 32-bit x86 system is known to have broken _BBN; this is
1024  * not addressed here.
1025  *
1026  * Some BIOSes implement _BBN() by reading PCI config space
1027  * on bus #0 - which means that we'll recurse when we attempt
1028  * to create the devinfo-to-ACPI map.  If Derive is called during
1029  * scan_d2a_map, we don't translate the bus # and return.
1030  *
1031  * We get the parent of the OpRegion, which must be a PCI
1032  * node, fetch the associated devinfo node and snag the
1033  * b/d/f from it.
1034  */
1035 void
1036 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1037     ACPI_PCI_ID **PciId)
1038 {
1039 	ACPI_HANDLE handle;
1040 	dev_info_t *dip;
1041 	int bus, device, func, devfn;
1042 
1043 	/*
1044 	 * See above - avoid recursing during scanning_d2a_map.
1045 	 */
1046 	if (scanning_d2a_map)
1047 		return;
1048 
1049 	/*
1050 	 * Get the OpRegion's parent
1051 	 */
1052 	if (AcpiGetParent(chandle, &handle) != AE_OK)
1053 		return;
1054 
1055 	/*
1056 	 * If we've mapped the ACPI node to the devinfo
1057 	 * tree, use the devinfo reg property
1058 	 */
1059 	if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1060 	    (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1061 		(*PciId)->Bus = bus;
1062 		(*PciId)->Device = device;
1063 		(*PciId)->Function = func;
1064 	}
1065 }
1066 
1067 
1068 /*ARGSUSED*/
1069 BOOLEAN
1070 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1071 {
1072 
1073 	/* Always says yes; all mapped memory assumed readable */
1074 	return (1);
1075 }
1076 
1077 /*ARGSUSED*/
1078 BOOLEAN
1079 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1080 {
1081 
1082 	/* Always says yes; all mapped memory assumed writable */
1083 	return (1);
1084 }
1085 
1086 UINT64
1087 AcpiOsGetTimer(void)
1088 {
1089 	/* gethrtime() returns 1nS resolution; convert to 100nS granules */
1090 	return ((gethrtime() + 50) / 100);
1091 }
1092 
1093 static struct AcpiOSIFeature_s {
1094 	uint64_t	control_flag;
1095 	const char	*feature_name;
1096 } AcpiOSIFeatures[] = {
1097 	{ ACPI_FEATURE_OSI_MODULE,	"Module Device" },
1098 	{ 0,				"Processor Device" }
1099 };
1100 
1101 /*ARGSUSED*/
1102 ACPI_STATUS
1103 AcpiOsValidateInterface(char *feature)
1104 {
1105 	int i;
1106 
1107 	ASSERT(feature != NULL);
1108 	for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1109 	    i++) {
1110 		if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1111 			continue;
1112 		}
1113 		/* Check whether required core features are available. */
1114 		if (AcpiOSIFeatures[i].control_flag != 0 &&
1115 		    acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1116 		    AcpiOSIFeatures[i].control_flag) {
1117 			break;
1118 		}
1119 		/* Feature supported. */
1120 		return (AE_OK);
1121 	}
1122 
1123 	return (AE_SUPPORT);
1124 }
1125 
1126 /*ARGSUSED*/
1127 ACPI_STATUS
1128 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1129     ACPI_SIZE length)
1130 {
1131 	return (AE_OK);
1132 }
1133 
1134 ACPI_STATUS
1135 AcpiOsSignal(UINT32 Function, void *Info)
1136 {
1137 	_NOTE(ARGUNUSED(Function, Info))
1138 
1139 	/* FUTUREWORK: debugger support */
1140 
1141 	cmn_err(CE_NOTE, "!OsSignal unimplemented");
1142 	return (AE_OK);
1143 }
1144 
1145 void ACPI_INTERNAL_VAR_XFACE
1146 AcpiOsPrintf(const char *Format, ...)
1147 {
1148 	va_list ap;
1149 
1150 	va_start(ap, Format);
1151 	AcpiOsVprintf(Format, ap);
1152 	va_end(ap);
1153 }
1154 
1155 /*ARGSUSED*/
1156 ACPI_STATUS
1157 AcpiOsEnterSleep(UINT8 SleepState, UINT32 Rega, UINT32 Regb)
1158 {
1159 	return (AE_OK);
1160 }
1161 
1162 /*
1163  * When != 0, sends output to console
1164  * Patchable with kmdb or /etc/system.
1165  */
1166 int acpica_console_out = 0;
1167 
1168 #define	ACPICA_OUTBUF_LEN	160
1169 char	acpica_outbuf[ACPICA_OUTBUF_LEN];
1170 int	acpica_outbuf_offset;
1171 
1172 /*
1173  *
1174  */
1175 static void
1176 acpica_pr_buf(char *buf)
1177 {
1178 	char c, *bufp, *outp;
1179 	int	out_remaining;
1180 
1181 	/*
1182 	 * copy the supplied buffer into the output buffer
1183 	 * when we hit a '\n' or overflow the output buffer,
1184 	 * output and reset the output buffer
1185 	 */
1186 	bufp = buf;
1187 	outp = acpica_outbuf + acpica_outbuf_offset;
1188 	out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1189 	while (c = *bufp++) {
1190 		*outp++ = c;
1191 		if (c == '\n' || --out_remaining == 0) {
1192 			*outp = '\0';
1193 			switch (acpica_console_out) {
1194 			case 1:
1195 				printf(acpica_outbuf);
1196 				break;
1197 			case 2:
1198 				prom_printf(acpica_outbuf);
1199 				break;
1200 			case 0:
1201 			default:
1202 				(void) strlog(0, 0, 0,
1203 				    SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1204 				    acpica_outbuf);
1205 				break;
1206 			}
1207 			acpica_outbuf_offset = 0;
1208 			outp = acpica_outbuf;
1209 			out_remaining = ACPICA_OUTBUF_LEN - 1;
1210 		}
1211 	}
1212 
1213 	acpica_outbuf_offset = outp - acpica_outbuf;
1214 }
1215 
1216 void
1217 AcpiOsVprintf(const char *Format, va_list Args)
1218 {
1219 
1220 	/*
1221 	 * If AcpiOsInitialize() failed to allocate a string buffer,
1222 	 * resort to vprintf().
1223 	 */
1224 	if (acpi_osl_pr_buffer == NULL) {
1225 		vprintf(Format, Args);
1226 		return;
1227 	}
1228 
1229 	/*
1230 	 * It is possible that a very long debug output statement will
1231 	 * be truncated; this is silently ignored.
1232 	 */
1233 	(void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1234 	acpica_pr_buf(acpi_osl_pr_buffer);
1235 }
1236 
1237 void
1238 AcpiOsRedirectOutput(void *Destination)
1239 {
1240 	_NOTE(ARGUNUSED(Destination))
1241 
1242 	/* FUTUREWORK: debugger support */
1243 
1244 #ifdef	DEBUG
1245 	cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1246 #endif
1247 }
1248 
1249 
1250 UINT32
1251 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1252 {
1253 	_NOTE(ARGUNUSED(Buffer))
1254 	_NOTE(ARGUNUSED(len))
1255 	_NOTE(ARGUNUSED(BytesRead))
1256 
1257 	/* FUTUREWORK: debugger support */
1258 
1259 	return (0);
1260 }
1261 
1262 /*
1263  * Device tree binding
1264  */
1265 static ACPI_STATUS
1266 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1267 {
1268 	_NOTE(ARGUNUSED(lvl));
1269 
1270 	int sta, hid, bbn;
1271 	int busno = (intptr_t)ctxp;
1272 	ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1273 
1274 	/* Check whether device exists. */
1275 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1276 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1277 		/*
1278 		 * Skip object if device doesn't exist.
1279 		 * According to ACPI Spec,
1280 		 * 1) setting either bit 0 or bit 3 means that device exists.
1281 		 * 2) Absence of _STA method means all status bits set.
1282 		 */
1283 		return (AE_CTRL_DEPTH);
1284 	}
1285 
1286 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1287 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1288 		/* Non PCI/PCIe host bridge. */
1289 		return (AE_OK);
1290 	}
1291 
1292 	if (acpi_has_broken_bbn) {
1293 		ACPI_BUFFER rb;
1294 		rb.Pointer = NULL;
1295 		rb.Length = ACPI_ALLOCATE_BUFFER;
1296 
1297 		/* Decree _BBN == n from PCI<n> */
1298 		if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1299 			return (AE_CTRL_TERMINATE);
1300 		}
1301 		bbn = ((char *)rb.Pointer)[3] - '0';
1302 		AcpiOsFree(rb.Pointer);
1303 		if (bbn == busno || busno == 0) {
1304 			*hdlp = hdl;
1305 			return (AE_CTRL_TERMINATE);
1306 		}
1307 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1308 		if (bbn == busno) {
1309 			*hdlp = hdl;
1310 			return (AE_CTRL_TERMINATE);
1311 		}
1312 	} else if (busno == 0) {
1313 		*hdlp = hdl;
1314 		return (AE_CTRL_TERMINATE);
1315 	}
1316 
1317 	return (AE_CTRL_DEPTH);
1318 }
1319 
1320 static int
1321 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1322 {
1323 	ACPI_HANDLE sbobj, busobj;
1324 
1325 	/* initialize static flag by querying ACPI namespace for bug */
1326 	if (acpi_has_broken_bbn == -1)
1327 		acpi_has_broken_bbn = acpica_query_bbn_problem();
1328 
1329 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1330 		busobj = NULL;
1331 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1332 		    acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1333 		    (void **)&busobj);
1334 		if (busobj != NULL) {
1335 			*rh = busobj;
1336 			return (AE_OK);
1337 		}
1338 	}
1339 
1340 	return (AE_ERROR);
1341 }
1342 
1343 static ACPI_STATUS
1344 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1345 {
1346 	_NOTE(ARGUNUSED(lvl));
1347 	_NOTE(ARGUNUSED(rvpp));
1348 
1349 	int sta, hid, bbn;
1350 	int *cntp = (int *)ctxp;
1351 
1352 	/* Check whether device exists. */
1353 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1354 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1355 		/*
1356 		 * Skip object if device doesn't exist.
1357 		 * According to ACPI Spec,
1358 		 * 1) setting either bit 0 or bit 3 means that device exists.
1359 		 * 2) Absence of _STA method means all status bits set.
1360 		 */
1361 		return (AE_CTRL_DEPTH);
1362 	}
1363 
1364 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1365 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1366 		/* Non PCI/PCIe host bridge. */
1367 		return (AE_OK);
1368 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1369 	    bbn == 0 && ++(*cntp) > 1) {
1370 		/*
1371 		 * If we find more than one bus with a 0 _BBN
1372 		 * we have the problem that BigBear's BIOS shows
1373 		 */
1374 		return (AE_CTRL_TERMINATE);
1375 	} else {
1376 		/*
1377 		 * Skip children of PCI/PCIe host bridge.
1378 		 */
1379 		return (AE_CTRL_DEPTH);
1380 	}
1381 }
1382 
1383 /*
1384  * Look for ACPI problem where _BBN is zero for multiple PCI buses
1385  * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1386  * below if it exists.
1387  */
1388 static int
1389 acpica_query_bbn_problem(void)
1390 {
1391 	ACPI_HANDLE sbobj;
1392 	int zerobbncnt;
1393 	void *rv;
1394 
1395 	zerobbncnt = 0;
1396 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1397 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1398 		    acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1399 	}
1400 
1401 	return (zerobbncnt > 1 ? 1 : 0);
1402 }
1403 
1404 static const char hextab[] = "0123456789ABCDEF";
1405 
1406 static int
1407 hexdig(int c)
1408 {
1409 	/*
1410 	 *  Get hex digit:
1411 	 *
1412 	 *  Returns the 4-bit hex digit named by the input character.  Returns
1413 	 *  zero if the input character is not valid hex!
1414 	 */
1415 
1416 	int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1417 	int j = sizeof (hextab);
1418 
1419 	while (--j && (x != hextab[j])) {
1420 	}
1421 	return (j);
1422 }
1423 
1424 static int
1425 CompressEisaID(char *np)
1426 {
1427 	/*
1428 	 *  Compress an EISA device name:
1429 	 *
1430 	 *  This routine converts a 7-byte ASCII device name into the 4-byte
1431 	 *  compressed form used by EISA (50 bytes of ROM to save 1 byte of
1432 	 *  NV-RAM!)
1433 	 */
1434 
1435 	union { char octets[4]; int retval; } myu;
1436 
1437 	myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1438 	myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1439 	myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1440 	myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1441 
1442 	return (myu.retval);
1443 }
1444 
1445 ACPI_STATUS
1446 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1447 {
1448 	ACPI_STATUS status;
1449 	ACPI_BUFFER rb;
1450 	ACPI_OBJECT ro;
1451 
1452 	rb.Pointer = &ro;
1453 	rb.Length = sizeof (ro);
1454 	if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1455 	    ACPI_TYPE_INTEGER)) == AE_OK)
1456 		*rint = ro.Integer.Value;
1457 
1458 	return (status);
1459 }
1460 
1461 static int
1462 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1463 {
1464 	ACPI_BUFFER rb;
1465 	ACPI_OBJECT *rv;
1466 
1467 	rb.Pointer = NULL;
1468 	rb.Length = ACPI_ALLOCATE_BUFFER;
1469 	if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1470 	    rb.Length != 0) {
1471 		rv = rb.Pointer;
1472 		if (rv->Type == ACPI_TYPE_INTEGER) {
1473 			*rint = rv->Integer.Value;
1474 			AcpiOsFree(rv);
1475 			return (AE_OK);
1476 		} else if (rv->Type == ACPI_TYPE_STRING) {
1477 			char *stringData;
1478 
1479 			/* Convert the string into an EISA ID */
1480 			if (rv->String.Pointer == NULL) {
1481 				AcpiOsFree(rv);
1482 				return (AE_ERROR);
1483 			}
1484 
1485 			stringData = rv->String.Pointer;
1486 
1487 			/*
1488 			 * If the string is an EisaID, it must be 7
1489 			 * characters; if it's an ACPI ID, it will be 8
1490 			 * (and we don't care about ACPI ids here).
1491 			 */
1492 			if (strlen(stringData) != 7) {
1493 				AcpiOsFree(rv);
1494 				return (AE_ERROR);
1495 			}
1496 
1497 			*rint = CompressEisaID(stringData);
1498 			AcpiOsFree(rv);
1499 			return (AE_OK);
1500 		} else
1501 			AcpiOsFree(rv);
1502 	}
1503 	return (AE_ERROR);
1504 }
1505 
1506 /*
1507  * Create linkage between devinfo nodes and ACPI nodes
1508  */
1509 ACPI_STATUS
1510 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1511 {
1512 	ACPI_STATUS status;
1513 	ACPI_BUFFER rb;
1514 
1515 	/*
1516 	 * Tag the devinfo node with the ACPI name
1517 	 */
1518 	rb.Pointer = NULL;
1519 	rb.Length = ACPI_ALLOCATE_BUFFER;
1520 	status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1521 	if (ACPI_FAILURE(status)) {
1522 		cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1523 	} else {
1524 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1525 		    "acpi-namespace", (char *)rb.Pointer);
1526 		AcpiOsFree(rb.Pointer);
1527 
1528 		/*
1529 		 * Tag the ACPI node with the dip
1530 		 */
1531 		status = acpica_set_devinfo(acpiobj, dip);
1532 		ASSERT(ACPI_SUCCESS(status));
1533 	}
1534 
1535 	return (status);
1536 }
1537 
1538 /*
1539  * Destroy linkage between devinfo nodes and ACPI nodes
1540  */
1541 ACPI_STATUS
1542 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1543 {
1544 	(void) acpica_unset_devinfo(acpiobj);
1545 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1546 
1547 	return (AE_OK);
1548 }
1549 
1550 /*
1551  * Return the ACPI device node matching the CPU dev_info node.
1552  */
1553 ACPI_STATUS
1554 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1555 {
1556 	int i;
1557 
1558 	/*
1559 	 * if cpu_map itself is NULL, we're a uppc system and
1560 	 * acpica_build_processor_map() hasn't been called yet.
1561 	 * So call it here
1562 	 */
1563 	if (cpu_map == NULL) {
1564 		(void) acpica_build_processor_map();
1565 		if (cpu_map == NULL)
1566 			return (AE_ERROR);
1567 	}
1568 
1569 	if (cpu_id < 0) {
1570 		return (AE_ERROR);
1571 	}
1572 
1573 	/*
1574 	 * search object with cpuid in cpu_map
1575 	 */
1576 	mutex_enter(&cpu_map_lock);
1577 	for (i = 0; i < cpu_map_count; i++) {
1578 		if (cpu_map[i]->cpu_id == cpu_id) {
1579 			break;
1580 		}
1581 	}
1582 	if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1583 		*rh = cpu_map[i]->obj;
1584 		mutex_exit(&cpu_map_lock);
1585 		return (AE_OK);
1586 	}
1587 
1588 	/* Handle special case for uppc-only systems. */
1589 	if (cpu_map_called == 0) {
1590 		uint32_t apicid = cpuid_get_apicid(CPU);
1591 		if (apicid != UINT32_MAX) {
1592 			for (i = 0; i < cpu_map_count; i++) {
1593 				if (cpu_map[i]->apic_id == apicid) {
1594 					break;
1595 				}
1596 			}
1597 			if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1598 				*rh = cpu_map[i]->obj;
1599 				mutex_exit(&cpu_map_lock);
1600 				return (AE_OK);
1601 			}
1602 		}
1603 	}
1604 	mutex_exit(&cpu_map_lock);
1605 
1606 	return (AE_ERROR);
1607 }
1608 
1609 /*
1610  * Determine if this object is a processor
1611  */
1612 static ACPI_STATUS
1613 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1614 {
1615 	ACPI_STATUS status;
1616 	ACPI_OBJECT_TYPE objtype;
1617 	unsigned long acpi_id;
1618 	ACPI_BUFFER rb;
1619 	ACPI_DEVICE_INFO *di;
1620 
1621 	if (AcpiGetType(obj, &objtype) != AE_OK)
1622 		return (AE_OK);
1623 
1624 	if (objtype == ACPI_TYPE_PROCESSOR) {
1625 		/* process a Processor */
1626 		rb.Pointer = NULL;
1627 		rb.Length = ACPI_ALLOCATE_BUFFER;
1628 		status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1629 		    ACPI_TYPE_PROCESSOR);
1630 		if (status != AE_OK) {
1631 			cmn_err(CE_WARN, "!acpica: error probing Processor");
1632 			return (status);
1633 		}
1634 		acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1635 		AcpiOsFree(rb.Pointer);
1636 	} else if (objtype == ACPI_TYPE_DEVICE) {
1637 		/* process a processor Device */
1638 		status = AcpiGetObjectInfo(obj, &di);
1639 		if (status != AE_OK) {
1640 			cmn_err(CE_WARN,
1641 			    "!acpica: error probing Processor Device\n");
1642 			return (status);
1643 		}
1644 
1645 		if (!(di->Valid & ACPI_VALID_UID) ||
1646 		    ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1647 			ACPI_FREE(di);
1648 			cmn_err(CE_WARN,
1649 			    "!acpica: error probing Processor Device _UID\n");
1650 			return (AE_ERROR);
1651 		}
1652 		ACPI_FREE(di);
1653 	}
1654 	(void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1655 
1656 	return (AE_OK);
1657 }
1658 
1659 void
1660 scan_d2a_map(void)
1661 {
1662 	dev_info_t *dip, *cdip;
1663 	ACPI_HANDLE acpiobj;
1664 	char *device_type_prop;
1665 	int bus;
1666 	static int map_error = 0;
1667 
1668 	if (map_error || (d2a_done != 0))
1669 		return;
1670 
1671 	scanning_d2a_map = 1;
1672 
1673 	/*
1674 	 * Find all child-of-root PCI buses, and find their corresponding
1675 	 * ACPI child-of-root PCI nodes.  For each one, add to the
1676 	 * d2a table.
1677 	 */
1678 
1679 	for (dip = ddi_get_child(ddi_root_node());
1680 	    dip != NULL;
1681 	    dip = ddi_get_next_sibling(dip)) {
1682 
1683 		/* prune non-PCI nodes */
1684 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1685 		    DDI_PROP_DONTPASS,
1686 		    "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1687 			continue;
1688 
1689 		if ((strcmp("pci", device_type_prop) != 0) &&
1690 		    (strcmp("pciex", device_type_prop) != 0)) {
1691 			ddi_prop_free(device_type_prop);
1692 			continue;
1693 		}
1694 
1695 		ddi_prop_free(device_type_prop);
1696 
1697 		/*
1698 		 * To get bus number of dip, get first child and get its
1699 		 * bus number.  If NULL, just continue, because we don't
1700 		 * care about bus nodes with no children anyway.
1701 		 */
1702 		if ((cdip = ddi_get_child(dip)) == NULL)
1703 			continue;
1704 
1705 		if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1706 #ifdef D2ADEBUG
1707 			cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1708 #endif
1709 			map_error = 1;
1710 			scanning_d2a_map = 0;
1711 			d2a_done = 1;
1712 			return;
1713 		}
1714 
1715 		if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1716 #ifdef D2ADEBUG
1717 			cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1718 #endif
1719 			map_error = 1;
1720 			continue;
1721 		}
1722 
1723 		acpica_tag_devinfo(dip, acpiobj);
1724 
1725 		/* call recursively to enumerate subtrees */
1726 		scan_d2a_subtree(dip, acpiobj, bus);
1727 	}
1728 
1729 	scanning_d2a_map = 0;
1730 	d2a_done = 1;
1731 }
1732 
1733 /*
1734  * For all acpi child devices of acpiobj, find their matching
1735  * dip under "dip" argument.  (matching means "matches dev/fn").
1736  * bus is assumed to already be a match from caller, and is
1737  * used here only to record in the d2a entry.  Recurse if necessary.
1738  */
1739 static void
1740 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1741 {
1742 	int acpi_devfn, hid;
1743 	ACPI_HANDLE acld;
1744 	dev_info_t *dcld;
1745 	int dcld_b, dcld_d, dcld_f;
1746 	int dev, func;
1747 	char *device_type_prop;
1748 
1749 	acld = NULL;
1750 	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1751 	    == AE_OK) {
1752 		/* get the dev/func we're looking for in the devinfo tree */
1753 		if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1754 			continue;
1755 		dev = (acpi_devfn >> 16) & 0xFFFF;
1756 		func = acpi_devfn & 0xFFFF;
1757 
1758 		/* look through all the immediate children of dip */
1759 		for (dcld = ddi_get_child(dip); dcld != NULL;
1760 		    dcld = ddi_get_next_sibling(dcld)) {
1761 			if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1762 				continue;
1763 
1764 			/* dev must match; function must match or wildcard */
1765 			if (dcld_d != dev ||
1766 			    (func != 0xFFFF && func != dcld_f))
1767 				continue;
1768 			bus = dcld_b;
1769 
1770 			/* found a match, record it */
1771 			acpica_tag_devinfo(dcld, acld);
1772 
1773 			/* if we find a bridge, recurse from here */
1774 			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1775 			    DDI_PROP_DONTPASS, "device_type",
1776 			    &device_type_prop) == DDI_PROP_SUCCESS) {
1777 				if ((strcmp("pci", device_type_prop) == 0) ||
1778 				    (strcmp("pciex", device_type_prop) == 0))
1779 					scan_d2a_subtree(dcld, acld, bus);
1780 				ddi_prop_free(device_type_prop);
1781 			}
1782 
1783 			/* done finding a match, so break now */
1784 			break;
1785 		}
1786 	}
1787 }
1788 
1789 /*
1790  * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1791  */
1792 int
1793 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1794 {
1795 	pci_regspec_t *pci_rp;
1796 	int len;
1797 
1798 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1799 	    "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1800 		return (-1);
1801 
1802 	if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1803 		ddi_prop_free(pci_rp);
1804 		return (-1);
1805 	}
1806 	if (bus != NULL)
1807 		*bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1808 	if (device != NULL)
1809 		*device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1810 	if (func != NULL)
1811 		*func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1812 	ddi_prop_free(pci_rp);
1813 	return (0);
1814 }
1815 
1816 /*
1817  * Return the ACPI device node matching this dev_info node, if it
1818  * exists in the ACPI tree.
1819  */
1820 ACPI_STATUS
1821 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1822 {
1823 	ACPI_STATUS status;
1824 	char *acpiname;
1825 
1826 #ifdef	DEBUG
1827 	if (d2a_done == 0)
1828 		cmn_err(CE_WARN, "!acpica_get_handle:"
1829 		    " no ACPI mapping for %s", ddi_node_name(dip));
1830 #endif
1831 
1832 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1833 	    "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1834 		return (AE_ERROR);
1835 	}
1836 
1837 	status = AcpiGetHandle(NULL, acpiname, rh);
1838 	ddi_prop_free((void *)acpiname);
1839 	return (status);
1840 }
1841 
1842 
1843 
1844 /*
1845  * Manage OS data attachment to ACPI nodes
1846  */
1847 
1848 /*
1849  * Return the (dev_info_t *) associated with the ACPI node.
1850  */
1851 ACPI_STATUS
1852 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1853 {
1854 	ACPI_STATUS status;
1855 	void *ptr;
1856 
1857 	status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1858 	if (status == AE_OK)
1859 		*dipp = (dev_info_t *)ptr;
1860 
1861 	return (status);
1862 }
1863 
1864 /*
1865  * Set the dev_info_t associated with the ACPI node.
1866  */
1867 static ACPI_STATUS
1868 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1869 {
1870 	ACPI_STATUS status;
1871 
1872 	status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1873 	return (status);
1874 }
1875 
1876 /*
1877  * Unset the dev_info_t associated with the ACPI node.
1878  */
1879 static ACPI_STATUS
1880 acpica_unset_devinfo(ACPI_HANDLE obj)
1881 {
1882 	return (AcpiDetachData(obj, acpica_devinfo_handler));
1883 }
1884 
1885 /*
1886  *
1887  */
1888 void
1889 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1890 {
1891 	/* no-op */
1892 }
1893 
1894 ACPI_STATUS
1895 acpica_build_processor_map(void)
1896 {
1897 	ACPI_STATUS status;
1898 	void *rv;
1899 
1900 	/*
1901 	 * shouldn't be called more than once anyway
1902 	 */
1903 	if (cpu_map_built)
1904 		return (AE_OK);
1905 
1906 	/*
1907 	 * ACPI device configuration driver has built mapping information
1908 	 * among processor id and object handle, no need to probe again.
1909 	 */
1910 	if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1911 		cpu_map_built = 1;
1912 		return (AE_OK);
1913 	}
1914 
1915 	/*
1916 	 * Look for Processor objects
1917 	 */
1918 	status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1919 	    ACPI_ROOT_OBJECT,
1920 	    4,
1921 	    acpica_probe_processor,
1922 	    NULL,
1923 	    NULL,
1924 	    &rv);
1925 	ASSERT(status == AE_OK);
1926 
1927 	/*
1928 	 * Look for processor Device objects
1929 	 */
1930 	status = AcpiGetDevices("ACPI0007",
1931 	    acpica_probe_processor,
1932 	    NULL,
1933 	    &rv);
1934 	ASSERT(status == AE_OK);
1935 	cpu_map_built = 1;
1936 
1937 	return (status);
1938 }
1939 
1940 /*
1941  * Grow cpu map table on demand.
1942  */
1943 static void
1944 acpica_grow_cpu_map(void)
1945 {
1946 	if (cpu_map_count == cpu_map_count_max) {
1947 		size_t sz;
1948 		struct cpu_map_item **new_map;
1949 
1950 		ASSERT(cpu_map_count_max < INT_MAX / 2);
1951 		cpu_map_count_max += max_ncpus;
1952 		new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1953 		    KM_SLEEP);
1954 		if (cpu_map_count != 0) {
1955 			ASSERT(cpu_map != NULL);
1956 			sz = sizeof (cpu_map[0]) * cpu_map_count;
1957 			kcopy(cpu_map, new_map, sz);
1958 			kmem_free(cpu_map, sz);
1959 		}
1960 		cpu_map = new_map;
1961 	}
1962 }
1963 
1964 /*
1965  * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1966  * ACPI handle). The mapping table will be setup in two steps:
1967  * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1968  *    processor id and ACPI object handle.
1969  * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1970  * On systems with which have ACPI device configuration for CPUs enabled,
1971  * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1972  * otherwise acpica_map_cpu() will be called before
1973  * acpica_add_processor_to_map().
1974  */
1975 ACPI_STATUS
1976 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1977 {
1978 	int i;
1979 	ACPI_STATUS rc = AE_OK;
1980 	struct cpu_map_item *item = NULL;
1981 
1982 	ASSERT(obj != NULL);
1983 	if (obj == NULL) {
1984 		return (AE_ERROR);
1985 	}
1986 
1987 	mutex_enter(&cpu_map_lock);
1988 
1989 	/*
1990 	 * Special case for uppc
1991 	 * If we're a uppc system and ACPI device configuration for CPU has
1992 	 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1993 	 * call acpica_map_cpu(). So create one and use the passed-in processor
1994 	 * as CPU 0
1995 	 * Assumption: the first CPU returned by
1996 	 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1997 	 * Unfortunately there appears to be no good way to ASSERT this.
1998 	 */
1999 	if (cpu_map == NULL &&
2000 	    !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
2001 		acpica_grow_cpu_map();
2002 		ASSERT(cpu_map != NULL);
2003 		item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2004 		item->cpu_id = 0;
2005 		item->proc_id = acpi_id;
2006 		item->apic_id = apic_id;
2007 		item->obj = obj;
2008 		cpu_map[0] = item;
2009 		cpu_map_count = 1;
2010 		mutex_exit(&cpu_map_lock);
2011 		return (AE_OK);
2012 	}
2013 
2014 	for (i = 0; i < cpu_map_count; i++) {
2015 		if (cpu_map[i]->obj == obj) {
2016 			rc = AE_ALREADY_EXISTS;
2017 			break;
2018 		} else if (cpu_map[i]->proc_id == acpi_id) {
2019 			ASSERT(item == NULL);
2020 			item = cpu_map[i];
2021 		}
2022 	}
2023 
2024 	if (rc == AE_OK) {
2025 		if (item != NULL) {
2026 			/*
2027 			 * ACPI alias objects may cause more than one objects
2028 			 * with the same ACPI processor id, only remember the
2029 			 * the first object encountered.
2030 			 */
2031 			if (item->obj == NULL) {
2032 				item->obj = obj;
2033 				item->apic_id = apic_id;
2034 			} else {
2035 				rc = AE_ALREADY_EXISTS;
2036 			}
2037 		} else if (cpu_map_count >= INT_MAX / 2) {
2038 			rc = AE_NO_MEMORY;
2039 		} else {
2040 			acpica_grow_cpu_map();
2041 			ASSERT(cpu_map != NULL);
2042 			ASSERT(cpu_map_count < cpu_map_count_max);
2043 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2044 			item->cpu_id = -1;
2045 			item->proc_id = acpi_id;
2046 			item->apic_id = apic_id;
2047 			item->obj = obj;
2048 			cpu_map[cpu_map_count] = item;
2049 			cpu_map_count++;
2050 		}
2051 	}
2052 
2053 	mutex_exit(&cpu_map_lock);
2054 
2055 	return (rc);
2056 }
2057 
2058 ACPI_STATUS
2059 acpica_remove_processor_from_map(UINT32 acpi_id)
2060 {
2061 	int i;
2062 	ACPI_STATUS rc = AE_NOT_EXIST;
2063 
2064 	mutex_enter(&cpu_map_lock);
2065 	for (i = 0; i < cpu_map_count; i++) {
2066 		if (cpu_map[i]->proc_id != acpi_id) {
2067 			continue;
2068 		}
2069 		cpu_map[i]->obj = NULL;
2070 		/* Free item if no more reference to it. */
2071 		if (cpu_map[i]->cpu_id == -1) {
2072 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2073 			cpu_map[i] = NULL;
2074 			cpu_map_count--;
2075 			if (i != cpu_map_count) {
2076 				cpu_map[i] = cpu_map[cpu_map_count];
2077 				cpu_map[cpu_map_count] = NULL;
2078 			}
2079 		}
2080 		rc = AE_OK;
2081 		break;
2082 	}
2083 	mutex_exit(&cpu_map_lock);
2084 
2085 	return (rc);
2086 }
2087 
2088 ACPI_STATUS
2089 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2090 {
2091 	int i;
2092 	ACPI_STATUS rc = AE_OK;
2093 	struct cpu_map_item *item = NULL;
2094 
2095 	ASSERT(cpuid != -1);
2096 	if (cpuid == -1) {
2097 		return (AE_ERROR);
2098 	}
2099 
2100 	mutex_enter(&cpu_map_lock);
2101 	cpu_map_called = 1;
2102 	for (i = 0; i < cpu_map_count; i++) {
2103 		if (cpu_map[i]->cpu_id == cpuid) {
2104 			rc = AE_ALREADY_EXISTS;
2105 			break;
2106 		} else if (cpu_map[i]->proc_id == acpi_id) {
2107 			ASSERT(item == NULL);
2108 			item = cpu_map[i];
2109 		}
2110 	}
2111 	if (rc == AE_OK) {
2112 		if (item != NULL) {
2113 			if (item->cpu_id == -1) {
2114 				item->cpu_id = cpuid;
2115 			} else {
2116 				rc = AE_ALREADY_EXISTS;
2117 			}
2118 		} else if (cpu_map_count >= INT_MAX / 2) {
2119 			rc = AE_NO_MEMORY;
2120 		} else {
2121 			acpica_grow_cpu_map();
2122 			ASSERT(cpu_map != NULL);
2123 			ASSERT(cpu_map_count < cpu_map_count_max);
2124 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2125 			item->cpu_id = cpuid;
2126 			item->proc_id = acpi_id;
2127 			item->apic_id = UINT32_MAX;
2128 			item->obj = NULL;
2129 			cpu_map[cpu_map_count] = item;
2130 			cpu_map_count++;
2131 		}
2132 	}
2133 	mutex_exit(&cpu_map_lock);
2134 
2135 	return (rc);
2136 }
2137 
2138 ACPI_STATUS
2139 acpica_unmap_cpu(processorid_t cpuid)
2140 {
2141 	int i;
2142 	ACPI_STATUS rc = AE_NOT_EXIST;
2143 
2144 	ASSERT(cpuid != -1);
2145 	if (cpuid == -1) {
2146 		return (rc);
2147 	}
2148 
2149 	mutex_enter(&cpu_map_lock);
2150 	for (i = 0; i < cpu_map_count; i++) {
2151 		if (cpu_map[i]->cpu_id != cpuid) {
2152 			continue;
2153 		}
2154 		cpu_map[i]->cpu_id = -1;
2155 		/* Free item if no more reference. */
2156 		if (cpu_map[i]->obj == NULL) {
2157 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2158 			cpu_map[i] = NULL;
2159 			cpu_map_count--;
2160 			if (i != cpu_map_count) {
2161 				cpu_map[i] = cpu_map[cpu_map_count];
2162 				cpu_map[cpu_map_count] = NULL;
2163 			}
2164 		}
2165 		rc = AE_OK;
2166 		break;
2167 	}
2168 	mutex_exit(&cpu_map_lock);
2169 
2170 	return (rc);
2171 }
2172 
2173 ACPI_STATUS
2174 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2175 {
2176 	int i;
2177 	ACPI_STATUS rc = AE_NOT_EXIST;
2178 
2179 	ASSERT(cpuid != -1);
2180 	if (cpuid == -1) {
2181 		return (rc);
2182 	}
2183 
2184 	mutex_enter(&cpu_map_lock);
2185 	for (i = 0; i < cpu_map_count; i++) {
2186 		if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2187 			*hdlp = cpu_map[i]->obj;
2188 			rc = AE_OK;
2189 			break;
2190 		}
2191 	}
2192 	mutex_exit(&cpu_map_lock);
2193 
2194 	return (rc);
2195 }
2196 
2197 ACPI_STATUS
2198 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2199 {
2200 	int i;
2201 	ACPI_STATUS rc = AE_NOT_EXIST;
2202 
2203 	mutex_enter(&cpu_map_lock);
2204 	for (i = 0; i < cpu_map_count; i++) {
2205 		if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2206 			*hdlp = cpu_map[i]->obj;
2207 			rc = AE_OK;
2208 			break;
2209 		}
2210 	}
2211 	mutex_exit(&cpu_map_lock);
2212 
2213 	return (rc);
2214 }
2215 
2216 ACPI_STATUS
2217 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2218 {
2219 	int i;
2220 	ACPI_STATUS rc = AE_NOT_EXIST;
2221 
2222 	ASSERT(apicid != UINT32_MAX);
2223 	if (apicid == UINT32_MAX) {
2224 		return (rc);
2225 	}
2226 
2227 	mutex_enter(&cpu_map_lock);
2228 	for (i = 0; i < cpu_map_count; i++) {
2229 		if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2230 			*hdlp = cpu_map[i]->obj;
2231 			rc = AE_OK;
2232 			break;
2233 		}
2234 	}
2235 	mutex_exit(&cpu_map_lock);
2236 
2237 	return (rc);
2238 }
2239 
2240 ACPI_STATUS
2241 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2242 {
2243 	int i;
2244 	ACPI_STATUS rc = AE_NOT_EXIST;
2245 
2246 	ASSERT(cpuidp != NULL);
2247 	if (hdl == NULL || cpuidp == NULL) {
2248 		return (rc);
2249 	}
2250 
2251 	*cpuidp = -1;
2252 	mutex_enter(&cpu_map_lock);
2253 	for (i = 0; i < cpu_map_count; i++) {
2254 		if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2255 			*cpuidp = cpu_map[i]->cpu_id;
2256 			rc = AE_OK;
2257 			break;
2258 		}
2259 	}
2260 	mutex_exit(&cpu_map_lock);
2261 
2262 	return (rc);
2263 }
2264 
2265 ACPI_STATUS
2266 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2267 {
2268 	int i;
2269 	ACPI_STATUS rc = AE_NOT_EXIST;
2270 
2271 	ASSERT(rp != NULL);
2272 	if (hdl == NULL || rp == NULL) {
2273 		return (rc);
2274 	}
2275 
2276 	*rp = UINT32_MAX;
2277 	mutex_enter(&cpu_map_lock);
2278 	for (i = 0; i < cpu_map_count; i++) {
2279 		if (cpu_map[i]->obj == hdl &&
2280 		    cpu_map[i]->apic_id != UINT32_MAX) {
2281 			*rp = cpu_map[i]->apic_id;
2282 			rc = AE_OK;
2283 			break;
2284 		}
2285 	}
2286 	mutex_exit(&cpu_map_lock);
2287 
2288 	return (rc);
2289 }
2290 
2291 ACPI_STATUS
2292 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2293 {
2294 	int i;
2295 	ACPI_STATUS rc = AE_NOT_EXIST;
2296 
2297 	ASSERT(rp != NULL);
2298 	if (hdl == NULL || rp == NULL) {
2299 		return (rc);
2300 	}
2301 
2302 	*rp = UINT32_MAX;
2303 	mutex_enter(&cpu_map_lock);
2304 	for (i = 0; i < cpu_map_count; i++) {
2305 		if (cpu_map[i]->obj == hdl) {
2306 			*rp = cpu_map[i]->proc_id;
2307 			rc = AE_OK;
2308 			break;
2309 		}
2310 	}
2311 	mutex_exit(&cpu_map_lock);
2312 
2313 	return (rc);
2314 }
2315 
2316 void
2317 acpica_set_core_feature(uint64_t features)
2318 {
2319 	atomic_or_64(&acpica_core_features, features);
2320 }
2321 
2322 void
2323 acpica_clear_core_feature(uint64_t features)
2324 {
2325 	atomic_and_64(&acpica_core_features, ~features);
2326 }
2327 
2328 uint64_t
2329 acpica_get_core_feature(uint64_t features)
2330 {
2331 	return (acpica_core_features & features);
2332 }
2333 
2334 void
2335 acpica_set_devcfg_feature(uint64_t features)
2336 {
2337 	atomic_or_64(&acpica_devcfg_features, features);
2338 }
2339 
2340 void
2341 acpica_clear_devcfg_feature(uint64_t features)
2342 {
2343 	atomic_and_64(&acpica_devcfg_features, ~features);
2344 }
2345 
2346 uint64_t
2347 acpica_get_devcfg_feature(uint64_t features)
2348 {
2349 	return (acpica_devcfg_features & features);
2350 }
2351 
2352 void
2353 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2354 {
2355 	*gbl_FADT = &AcpiGbl_FADT;
2356 }
2357 
2358 void
2359 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2360 {
2361 	if (pstates && AcpiGbl_FADT.PstateControl != 0)
2362 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2363 		    AcpiGbl_FADT.PstateControl);
2364 
2365 	if (cstates && AcpiGbl_FADT.CstControl != 0)
2366 		(void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2367 		    AcpiGbl_FADT.CstControl);
2368 }
2369 
2370 uint32_t
2371 acpi_strtoul(const char *str, char **ep, int base)
2372 {
2373 	ulong_t v;
2374 
2375 	if (ddi_strtoul(str, ep, base, &v) != 0 || v > ACPI_UINT32_MAX) {
2376 		return (ACPI_UINT32_MAX);
2377 	}
2378 
2379 	return ((uint32_t)v);
2380 }
2381 
2382 /*
2383  * In prior versions of ACPI, the AcpiGetObjectInfo() function would provide
2384  * information about the status of the object via the _STA method. This has been
2385  * removed and this function is used to replace.
2386  *
2387  * Not every ACPI object has a _STA method. In cases where it is not found, then
2388  * the OSPM (aka us) is supposed to interpret that as though it indicates that
2389  * the device is present, enabled, shown in the UI, and functioning. This is the
2390  * value 0xF.
2391  */
2392 ACPI_STATUS
2393 acpica_get_object_status(ACPI_HANDLE obj, int *statusp)
2394 {
2395 	ACPI_STATUS status;
2396 	int ival;
2397 
2398 	status = acpica_eval_int(obj, METHOD_NAME__STA, &ival);
2399 	if (ACPI_FAILURE(status)) {
2400 		if (status == AE_NOT_FOUND) {
2401 			*statusp = 0xf;
2402 			return (AE_OK);
2403 		}
2404 
2405 		return (status);
2406 	}
2407 
2408 	/*
2409 	 * This should not be a negative value. However, firmware is often the
2410 	 * enemy. If it does, complain and treat that as a hard failure.
2411 	 */
2412 	if (ival < 0) {
2413 		cmn_err(CE_WARN, "!acpica_get_object_status: encountered "
2414 		    "negative _STA value on obj %p", obj);
2415 		return (AE_ERROR);
2416 	}
2417 
2418 	*statusp = ival;
2419 	return (AE_OK);
2420 }
2421