xref: /illumos-gate/usr/src/uts/intel/io/acpica/osl.c (revision aa2aa9a6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 /*
31  * ACPI CA OSL for Solaris x86
32  */
33 
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/psm.h>
37 #include <sys/pci_cfgspace.h>
38 #include <sys/apic.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/pci.h>
43 #include <sys/kobj.h>
44 #include <sys/taskq.h>
45 #include <sys/strlog.h>
46 #include <sys/note.h>
47 
48 #include <sys/acpi/acpi.h>
49 #include <sys/acpica.h>
50 
51 #define	MAX_DAT_FILE_SIZE	(64*1024)
52 
53 /* local functions */
54 static int CompressEisaID(char *np);
55 
56 static void scan_d2a_map(void);
57 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
58 
59 static int acpica_query_bbn_problem(void);
60 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
61 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
62 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
63 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
64 static void acpica_devinfo_handler(ACPI_HANDLE, UINT32, void *);
65 
66 /*
67  * Event queue vars
68  */
69 int acpica_eventq_init = 0;
70 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
71 
72 /*
73  * Priorities relative to minclsyspri that each taskq
74  * run at; OSL_NOTIFY_HANDLER needs to run at a higher
75  * priority than OSL_GPE_HANDLER.  There's an implicit
76  * assumption that no priority here results in exceeding
77  * maxclsyspri.
78  * Note: these initializations need to match the order of
79  * ACPI_EXECUTE_TYPE.
80  */
81 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
82 	0,	/* OSL_GLOBAL_LOCK_HANDLER */
83 	2,	/* OSL_NOTIFY_HANDLER */
84 	0,	/* OSL_GPE_HANDLER */
85 	0,	/* OSL_DEBUGGER_THREAD */
86 	0,	/* OSL_EC_POLL_HANDLER */
87 	0	/* OSL_EC_BURST_HANDLER */
88 };
89 
90 /*
91  * Note, if you change this path, you need to update
92  * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
93  */
94 static char *acpi_table_path = "/boot/acpi/tables/";
95 
96 /* non-zero while scan_d2a_map() is working */
97 static int scanning_d2a_map = 0;
98 static int d2a_done = 0;
99 
100 /* features supported by ACPICA and ACPI device configuration. */
101 uint64_t acpica_core_features = 0;
102 static uint64_t acpica_devcfg_features = 0;
103 
104 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
105 int acpica_use_safe_delay = 0;
106 
107 /* CPU mapping data */
108 struct cpu_map_item {
109 	processorid_t	cpu_id;
110 	UINT32		proc_id;
111 	UINT32		apic_id;
112 	ACPI_HANDLE	obj;
113 };
114 
115 static kmutex_t cpu_map_lock;
116 static struct cpu_map_item **cpu_map = NULL;
117 static int cpu_map_count_max = 0;
118 static int cpu_map_count = 0;
119 static int cpu_map_built = 0;
120 
121 static int acpi_has_broken_bbn = -1;
122 
123 /* buffer for AcpiOsVprintf() */
124 #define	ACPI_OSL_PR_BUFLEN	1024
125 static char *acpi_osl_pr_buffer = NULL;
126 static int acpi_osl_pr_buflen;
127 
128 #define	D2A_DEBUG
129 
130 /*
131  *
132  */
133 static void
134 discard_event_queues()
135 {
136 	int	i;
137 
138 	/*
139 	 * destroy event queues
140 	 */
141 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
142 		if (osl_eventq[i])
143 			ddi_taskq_destroy(osl_eventq[i]);
144 	}
145 }
146 
147 
148 /*
149  *
150  */
151 static ACPI_STATUS
152 init_event_queues()
153 {
154 	char	namebuf[32];
155 	int	i, error = 0;
156 
157 	/*
158 	 * Initialize event queues
159 	 */
160 
161 	/* Always allocate only 1 thread per queue to force FIFO execution */
162 	for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
163 		snprintf(namebuf, 32, "ACPI%d", i);
164 		osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
165 		    osl_eventq_pri_delta[i] + minclsyspri, 0);
166 		if (osl_eventq[i] == NULL)
167 			error++;
168 	}
169 
170 	if (error != 0) {
171 		discard_event_queues();
172 #ifdef	DEBUG
173 		cmn_err(CE_WARN, "!acpica: could not initialize event queues");
174 #endif
175 		return (AE_ERROR);
176 	}
177 
178 	acpica_eventq_init = 1;
179 	return (AE_OK);
180 }
181 
182 /*
183  * One-time initialization of OSL layer
184  */
185 ACPI_STATUS
186 AcpiOsInitialize(void)
187 {
188 	/*
189 	 * Allocate buffer for AcpiOsVprintf() here to avoid
190 	 * kmem_alloc()/kmem_free() at high PIL
191 	 */
192 	acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
193 	if (acpi_osl_pr_buffer != NULL)
194 		acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
195 
196 	return (AE_OK);
197 }
198 
199 /*
200  * One-time shut-down of OSL layer
201  */
202 ACPI_STATUS
203 AcpiOsTerminate(void)
204 {
205 
206 	if (acpi_osl_pr_buffer != NULL)
207 		kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
208 
209 	discard_event_queues();
210 	return (AE_OK);
211 }
212 
213 
214 ACPI_PHYSICAL_ADDRESS
215 AcpiOsGetRootPointer()
216 {
217 	ACPI_PHYSICAL_ADDRESS Address;
218 
219 	/*
220 	 * For EFI firmware, the root pointer is defined in EFI systab.
221 	 * The boot code process the table and put the physical address
222 	 * in the acpi-root-tab property.
223 	 */
224 	Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
225 	    DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
226 
227 	if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
228 		Address = NULL;
229 
230 	return (Address);
231 }
232 
233 /*ARGSUSED*/
234 ACPI_STATUS
235 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
236 				ACPI_STRING *NewVal)
237 {
238 
239 	*NewVal = 0;
240 	return (AE_OK);
241 }
242 
243 static void
244 acpica_strncpy(char *dest, const char *src, int len)
245 {
246 
247 	/*LINTED*/
248 	while ((*dest++ = *src++) && (--len > 0))
249 		/* copy the string */;
250 	*dest = '\0';
251 }
252 
253 ACPI_STATUS
254 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
255 			ACPI_TABLE_HEADER **NewTable)
256 {
257 	char signature[5];
258 	char oemid[7];
259 	char oemtableid[9];
260 	struct _buf *file;
261 	char *buf1, *buf2;
262 	int count;
263 	char acpi_table_loc[128];
264 
265 	acpica_strncpy(signature, ExistingTable->Signature, 4);
266 	acpica_strncpy(oemid, ExistingTable->OemId, 6);
267 	acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
268 
269 #ifdef	DEBUG
270 	cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
271 	    " OEM TABLE ID [%s] OEM rev %x",
272 	    signature, ExistingTable->Revision, oemid, oemtableid,
273 	    ExistingTable->OemRevision);
274 #endif
275 
276 	/* File name format is "signature_oemid_oemtableid.dat" */
277 	(void) strcpy(acpi_table_loc, acpi_table_path);
278 	(void) strcat(acpi_table_loc, signature); /* for example, DSDT */
279 	(void) strcat(acpi_table_loc, "_");
280 	(void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
281 	(void) strcat(acpi_table_loc, "_");
282 	(void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
283 	(void) strcat(acpi_table_loc, ".dat");
284 
285 	file = kobj_open_file(acpi_table_loc);
286 	if (file == (struct _buf *)-1) {
287 		*NewTable = 0;
288 		return (AE_OK);
289 	} else {
290 		buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
291 		count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
292 		if (count >= MAX_DAT_FILE_SIZE) {
293 			cmn_err(CE_WARN, "!acpica: table %s file size too big",
294 			    acpi_table_loc);
295 			*NewTable = 0;
296 		} else {
297 			buf2 = (char *)kmem_alloc(count, KM_SLEEP);
298 			(void) memcpy(buf2, buf1, count);
299 			*NewTable = (ACPI_TABLE_HEADER *)buf2;
300 			cmn_err(CE_NOTE, "!acpica: replacing table: %s",
301 			    acpi_table_loc);
302 		}
303 	}
304 	kobj_close_file(file);
305 	kmem_free(buf1, MAX_DAT_FILE_SIZE);
306 
307 	return (AE_OK);
308 }
309 
310 
311 /*
312  * ACPI semaphore implementation
313  */
314 typedef struct {
315 	kmutex_t	mutex;
316 	kcondvar_t	cv;
317 	uint32_t	available;
318 	uint32_t	initial;
319 	uint32_t	maximum;
320 } acpi_sema_t;
321 
322 /*
323  *
324  */
325 void
326 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
327 {
328 	mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
329 	cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
330 	/* no need to enter mutex here at creation */
331 	sp->available = count;
332 	sp->initial = count;
333 	sp->maximum = max;
334 }
335 
336 /*
337  *
338  */
339 void
340 acpi_sema_destroy(acpi_sema_t *sp)
341 {
342 
343 	cv_destroy(&sp->cv);
344 	mutex_destroy(&sp->mutex);
345 }
346 
347 /*
348  *
349  */
350 ACPI_STATUS
351 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
352 {
353 	ACPI_STATUS rv = AE_OK;
354 	clock_t deadline;
355 
356 	mutex_enter(&sp->mutex);
357 
358 	if (sp->available >= count) {
359 		/*
360 		 * Enough units available, no blocking
361 		 */
362 		sp->available -= count;
363 		mutex_exit(&sp->mutex);
364 		return (rv);
365 	} else if (wait_time == 0) {
366 		/*
367 		 * Not enough units available and timeout
368 		 * specifies no blocking
369 		 */
370 		rv = AE_TIME;
371 		mutex_exit(&sp->mutex);
372 		return (rv);
373 	}
374 
375 	/*
376 	 * Not enough units available and timeout specifies waiting
377 	 */
378 	if (wait_time != ACPI_WAIT_FOREVER)
379 		deadline = ddi_get_lbolt() +
380 		    (clock_t)drv_usectohz(wait_time * 1000);
381 
382 	do {
383 		if (wait_time == ACPI_WAIT_FOREVER)
384 			cv_wait(&sp->cv, &sp->mutex);
385 		else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
386 			rv = AE_TIME;
387 			break;
388 		}
389 	} while (sp->available < count);
390 
391 	/* if we dropped out of the wait with AE_OK, we got the units */
392 	if (rv == AE_OK)
393 		sp->available -= count;
394 
395 	mutex_exit(&sp->mutex);
396 	return (rv);
397 }
398 
399 /*
400  *
401  */
402 void
403 acpi_sema_v(acpi_sema_t *sp, unsigned count)
404 {
405 	mutex_enter(&sp->mutex);
406 	sp->available += count;
407 	cv_broadcast(&sp->cv);
408 	mutex_exit(&sp->mutex);
409 }
410 
411 
412 ACPI_STATUS
413 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
414 ACPI_HANDLE *OutHandle)
415 {
416 	acpi_sema_t *sp;
417 
418 	if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
419 		return (AE_BAD_PARAMETER);
420 
421 	sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
422 	acpi_sema_init(sp, MaxUnits, InitialUnits);
423 	*OutHandle = (ACPI_HANDLE)sp;
424 	return (AE_OK);
425 }
426 
427 
428 ACPI_STATUS
429 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
430 {
431 
432 	if (Handle == NULL)
433 		return (AE_BAD_PARAMETER);
434 
435 	acpi_sema_destroy((acpi_sema_t *)Handle);
436 	kmem_free((void *)Handle, sizeof (acpi_sema_t));
437 	return (AE_OK);
438 }
439 
440 ACPI_STATUS
441 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
442 {
443 
444 	if ((Handle == NULL) || (Units < 1))
445 		return (AE_BAD_PARAMETER);
446 
447 	return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
448 }
449 
450 ACPI_STATUS
451 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
452 {
453 
454 	if ((Handle == NULL) || (Units < 1))
455 		return (AE_BAD_PARAMETER);
456 
457 	acpi_sema_v((acpi_sema_t *)Handle, Units);
458 	return (AE_OK);
459 }
460 
461 ACPI_STATUS
462 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
463 {
464 	kmutex_t *mp;
465 
466 	if (OutHandle == NULL)
467 		return (AE_BAD_PARAMETER);
468 
469 	mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
470 	mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
471 	*OutHandle = (ACPI_HANDLE)mp;
472 	return (AE_OK);
473 }
474 
475 void
476 AcpiOsDeleteLock(ACPI_HANDLE Handle)
477 {
478 
479 	if (Handle == NULL)
480 		return;
481 
482 	mutex_destroy((kmutex_t *)Handle);
483 	kmem_free((void *)Handle, sizeof (kmutex_t));
484 }
485 
486 ACPI_CPU_FLAGS
487 AcpiOsAcquireLock(ACPI_HANDLE Handle)
488 {
489 
490 
491 	if (Handle == NULL)
492 		return (AE_BAD_PARAMETER);
493 
494 	if (curthread == CPU->cpu_idle_thread) {
495 		while (!mutex_tryenter((kmutex_t *)Handle))
496 			/* spin */;
497 	} else
498 		mutex_enter((kmutex_t *)Handle);
499 	return (AE_OK);
500 }
501 
502 void
503 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
504 {
505 	_NOTE(ARGUNUSED(Flags))
506 
507 	mutex_exit((kmutex_t *)Handle);
508 }
509 
510 
511 void *
512 AcpiOsAllocate(ACPI_SIZE Size)
513 {
514 	ACPI_SIZE *tmp_ptr;
515 
516 	Size += sizeof (Size);
517 	tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
518 	*tmp_ptr++ = Size;
519 	return (tmp_ptr);
520 }
521 
522 void
523 AcpiOsFree(void *Memory)
524 {
525 	ACPI_SIZE	size, *tmp_ptr;
526 
527 	tmp_ptr = (ACPI_SIZE *)Memory;
528 	tmp_ptr -= 1;
529 	size = *tmp_ptr;
530 	kmem_free(tmp_ptr, size);
531 }
532 
533 static int napics_found;	/* number of ioapic addresses in array */
534 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
535 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
536 static void *dummy_ioapicadr;
537 
538 void
539 acpica_find_ioapics(void)
540 {
541 	int			madt_seen, madt_size;
542 	ACPI_SUBTABLE_HEADER		*ap;
543 	ACPI_MADT_IO_APIC		*mia;
544 
545 	if (acpi_mapic_dtp != NULL)
546 		return;	/* already parsed table */
547 	if (AcpiGetTable(ACPI_SIG_MADT, 1,
548 	    (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
549 		return;
550 
551 	napics_found = 0;
552 
553 	/*
554 	 * Search the MADT for ioapics
555 	 */
556 	ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
557 	madt_size = acpi_mapic_dtp->Header.Length;
558 	madt_seen = sizeof (*acpi_mapic_dtp);
559 
560 	while (madt_seen < madt_size) {
561 
562 		switch (ap->Type) {
563 		case ACPI_MADT_TYPE_IO_APIC:
564 			mia = (ACPI_MADT_IO_APIC *) ap;
565 			if (napics_found < MAX_IO_APIC) {
566 				ioapic_paddr[napics_found++] =
567 				    (ACPI_PHYSICAL_ADDRESS)
568 				    (mia->Address & PAGEMASK);
569 			}
570 			break;
571 
572 		default:
573 			break;
574 		}
575 
576 		/* advance to next entry */
577 		madt_seen += ap->Length;
578 		ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
579 	}
580 	if (dummy_ioapicadr == NULL)
581 		dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
582 }
583 
584 
585 void *
586 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
587 {
588 	int	i;
589 
590 	/*
591 	 * If the iopaic address table is populated, check if trying
592 	 * to access an ioapic.  Instead, return a pointer to a dummy ioapic.
593 	 */
594 	for (i = 0; i < napics_found; i++) {
595 		if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
596 			return (dummy_ioapicadr);
597 	}
598 	/* FUTUREWORK: test PhysicalAddress for > 32 bits */
599 	return (psm_map_new((paddr_t)PhysicalAddress,
600 	    (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
601 }
602 
603 void
604 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
605 {
606 	/*
607 	 * Check if trying to unmap dummy ioapic address.
608 	 */
609 	if (LogicalAddress == dummy_ioapicadr)
610 		return;
611 
612 	psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
613 }
614 
615 /*ARGSUSED*/
616 ACPI_STATUS
617 AcpiOsGetPhysicalAddress(void *LogicalAddress,
618 			ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
619 {
620 
621 	/* UNIMPLEMENTED: not invoked by ACPI CA code */
622 	return (AE_NOT_IMPLEMENTED);
623 }
624 
625 
626 ACPI_OSD_HANDLER acpi_isr;
627 void *acpi_isr_context;
628 
629 uint_t
630 acpi_wrapper_isr(char *arg)
631 {
632 	_NOTE(ARGUNUSED(arg))
633 
634 	int	status;
635 
636 	status = (*acpi_isr)(acpi_isr_context);
637 
638 	if (status == ACPI_INTERRUPT_HANDLED) {
639 		return (DDI_INTR_CLAIMED);
640 	} else {
641 		return (DDI_INTR_UNCLAIMED);
642 	}
643 }
644 
645 static int acpi_intr_hooked = 0;
646 
647 ACPI_STATUS
648 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
649 		ACPI_OSD_HANDLER ServiceRoutine,
650 		void *Context)
651 {
652 	_NOTE(ARGUNUSED(InterruptNumber))
653 
654 	int retval;
655 	int sci_vect;
656 	iflag_t sci_flags;
657 
658 	acpi_isr = ServiceRoutine;
659 	acpi_isr_context = Context;
660 
661 	/*
662 	 * Get SCI (adjusted for PIC/APIC mode if necessary)
663 	 */
664 	if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
665 		return (AE_ERROR);
666 	}
667 
668 #ifdef	DEBUG
669 	cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
670 #endif
671 
672 	retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
673 	    "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
674 	if (retval) {
675 		acpi_intr_hooked = 1;
676 		return (AE_OK);
677 	} else
678 		return (AE_BAD_PARAMETER);
679 }
680 
681 ACPI_STATUS
682 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
683 			ACPI_OSD_HANDLER ServiceRoutine)
684 {
685 	_NOTE(ARGUNUSED(ServiceRoutine))
686 
687 #ifdef	DEBUG
688 	cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
689 #endif
690 	if (acpi_intr_hooked) {
691 		rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
692 		    InterruptNumber);
693 		acpi_intr_hooked = 0;
694 	}
695 	return (AE_OK);
696 }
697 
698 
699 ACPI_THREAD_ID
700 AcpiOsGetThreadId(void)
701 {
702 	/*
703 	 * ACPI CA doesn't care what actual value is returned as long
704 	 * as it is non-zero and unique to each existing thread.
705 	 * ACPI CA assumes that thread ID is castable to a pointer,
706 	 * so we use the current thread pointer.
707 	 */
708 	return (curthread);
709 }
710 
711 /*
712  *
713  */
714 ACPI_STATUS
715 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK  Function,
716     void *Context)
717 {
718 
719 	if (!acpica_eventq_init) {
720 		/*
721 		 * Create taskqs for event handling
722 		 */
723 		if (init_event_queues() != AE_OK)
724 			return (AE_ERROR);
725 	}
726 
727 	if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
728 	    DDI_NOSLEEP) == DDI_FAILURE) {
729 #ifdef	DEBUG
730 		cmn_err(CE_WARN, "!acpica: unable to dispatch event");
731 #endif
732 		return (AE_ERROR);
733 	}
734 	return (AE_OK);
735 
736 }
737 
738 void
739 AcpiOsSleep(ACPI_INTEGER Milliseconds)
740 {
741 	/*
742 	 * During kernel startup, before the first tick interrupt
743 	 * has taken place, we can't call delay; very late in
744 	 * kernel shutdown or suspend/resume, clock interrupts
745 	 * are blocked, so delay doesn't work then either.
746 	 * So we busy wait if lbolt == 0 (kernel startup)
747 	 * or if acpica_use_safe_delay has been set to a
748 	 * non-zero value.
749 	 */
750 	if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
751 		drv_usecwait(Milliseconds * 1000);
752 	else
753 		delay(drv_usectohz(Milliseconds * 1000));
754 }
755 
756 void
757 AcpiOsStall(UINT32 Microseconds)
758 {
759 	drv_usecwait(Microseconds);
760 }
761 
762 
763 /*
764  * Implementation of "Windows 2001" compatible I/O permission map
765  *
766  */
767 #define	OSL_IO_NONE	(0)
768 #define	OSL_IO_READ	(1<<0)
769 #define	OSL_IO_WRITE	(1<<1)
770 #define	OSL_IO_RW	(OSL_IO_READ | OSL_IO_WRITE)
771 #define	OSL_IO_TERM	(1<<2)
772 #define	OSL_IO_DEFAULT	OSL_IO_RW
773 
774 static struct io_perm  {
775 	ACPI_IO_ADDRESS	low;
776 	ACPI_IO_ADDRESS	high;
777 	uint8_t		perm;
778 } osl_io_perm[] = {
779 	{ 0xcf8, 0xd00, OSL_IO_NONE | OSL_IO_TERM }
780 };
781 
782 
783 /*
784  *
785  */
786 static struct io_perm *
787 osl_io_find_perm(ACPI_IO_ADDRESS addr)
788 {
789 	struct io_perm *p;
790 
791 	p = osl_io_perm;
792 	while (p != NULL) {
793 		if ((p->low <= addr) && (addr <= p->high))
794 			break;
795 		p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
796 	}
797 
798 	return (p);
799 }
800 
801 /*
802  *
803  */
804 ACPI_STATUS
805 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
806 {
807 	struct io_perm *p;
808 
809 	/* verify permission */
810 	p = osl_io_find_perm(Address);
811 	if (p && (p->perm & OSL_IO_READ) == 0) {
812 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
813 		    (long)Address, Width);
814 		*Value = 0xffffffff;
815 		return (AE_ERROR);
816 	}
817 
818 	switch (Width) {
819 	case 8:
820 		*Value = inb(Address);
821 		break;
822 	case 16:
823 		*Value = inw(Address);
824 		break;
825 	case 32:
826 		*Value = inl(Address);
827 		break;
828 	default:
829 		cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
830 		    (long)Address, Width);
831 		return (AE_BAD_PARAMETER);
832 	}
833 	return (AE_OK);
834 }
835 
836 ACPI_STATUS
837 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
838 {
839 	struct io_perm *p;
840 
841 	/* verify permission */
842 	p = osl_io_find_perm(Address);
843 	if (p && (p->perm & OSL_IO_WRITE) == 0) {
844 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
845 		    (long)Address, Width);
846 		return (AE_ERROR);
847 	}
848 
849 	switch (Width) {
850 	case 8:
851 		outb(Address, Value);
852 		break;
853 	case 16:
854 		outw(Address, Value);
855 		break;
856 	case 32:
857 		outl(Address, Value);
858 		break;
859 	default:
860 		cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
861 		    (long)Address, Width);
862 		return (AE_BAD_PARAMETER);
863 	}
864 	return (AE_OK);
865 }
866 
867 
868 /*
869  *
870  */
871 
872 #define	OSL_RW(ptr, val, type, rw) \
873 	{ if (rw) *((type *)(ptr)) = *((type *) val); \
874 	    else *((type *) val) = *((type *)(ptr)); }
875 
876 
877 static void
878 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
879     UINT32 Width, int write)
880 {
881 	size_t	maplen = Width / 8;
882 	caddr_t	ptr;
883 
884 	ptr = psm_map_new((paddr_t)Address, maplen,
885 	    PSM_PROT_WRITE | PSM_PROT_READ);
886 
887 	switch (maplen) {
888 	case 1:
889 		OSL_RW(ptr, Value, uint8_t, write);
890 		break;
891 	case 2:
892 		OSL_RW(ptr, Value, uint16_t, write);
893 		break;
894 	case 4:
895 		OSL_RW(ptr, Value, uint32_t, write);
896 		break;
897 	default:
898 		cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
899 		    Width);
900 		break;
901 	}
902 
903 	psm_unmap(ptr, maplen);
904 }
905 
906 ACPI_STATUS
907 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
908 		UINT32 *Value, UINT32 Width)
909 {
910 	osl_rw_memory(Address, Value, Width, 0);
911 	return (AE_OK);
912 }
913 
914 ACPI_STATUS
915 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
916 		UINT32 Value, UINT32 Width)
917 {
918 	osl_rw_memory(Address, &Value, Width, 1);
919 	return (AE_OK);
920 }
921 
922 
923 ACPI_STATUS
924 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
925 			void *Value, UINT32 Width)
926 {
927 
928 	switch (Width) {
929 	case 8:
930 		*((UINT64 *)Value) = (UINT64)(*pci_getb_func)
931 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
932 		break;
933 	case 16:
934 		*((UINT64 *)Value) = (UINT64)(*pci_getw_func)
935 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
936 		break;
937 	case 32:
938 		*((UINT64 *)Value) = (UINT64)(*pci_getl_func)
939 		    (PciId->Bus, PciId->Device, PciId->Function, Register);
940 		break;
941 	case 64:
942 	default:
943 		cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
944 		    Register, Width);
945 		return (AE_BAD_PARAMETER);
946 	}
947 	return (AE_OK);
948 }
949 
950 /*
951  *
952  */
953 int acpica_write_pci_config_ok = 1;
954 
955 ACPI_STATUS
956 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Register,
957 		ACPI_INTEGER Value, UINT32 Width)
958 {
959 
960 	if (!acpica_write_pci_config_ok) {
961 		cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
962 		    " %lx %d not permitted", PciId->Bus, PciId->Device,
963 		    PciId->Function, Register, (long)Value, Width);
964 		return (AE_OK);
965 	}
966 
967 	switch (Width) {
968 	case 8:
969 		(*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
970 		    Register, (uint8_t)Value);
971 		break;
972 	case 16:
973 		(*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
974 		    Register, (uint16_t)Value);
975 		break;
976 	case 32:
977 		(*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
978 		    Register, (uint32_t)Value);
979 		break;
980 	case 64:
981 	default:
982 		cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
983 		    Register, Width);
984 		return (AE_BAD_PARAMETER);
985 	}
986 	return (AE_OK);
987 }
988 
989 /*
990  * Called with ACPI_HANDLEs for both a PCI Config Space
991  * OpRegion and (what ACPI CA thinks is) the PCI device
992  * to which this ConfigSpace OpRegion belongs.  Since
993  * ACPI CA depends on a valid _BBN object being present
994  * and this is not always true (one old x86 had broken _BBN),
995  * we go ahead and get the correct PCI bus number using the
996  * devinfo mapping (which compensates for broken _BBN).
997  *
998  * Default values for bus, segment, device and function are
999  * all 0 when ACPI CA can't figure them out.
1000  *
1001  * Some BIOSes implement _BBN() by reading PCI config space
1002  * on bus #0 - which means that we'll recurse when we attempt
1003  * to create the devinfo-to-ACPI map.  If Derive is called during
1004  * scan_d2a_map, we don't translate the bus # and return.
1005  *
1006  * We get the parent of the OpRegion, which must be a PCI
1007  * node, fetch the associated devinfo node and snag the
1008  * b/d/f from it.
1009  */
1010 void
1011 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1012 		ACPI_PCI_ID **PciId)
1013 {
1014 	ACPI_HANDLE handle;
1015 	dev_info_t *dip;
1016 	int bus, device, func, devfn;
1017 
1018 
1019 	/*
1020 	 * See above - avoid recursing during scanning_d2a_map.
1021 	 */
1022 	if (scanning_d2a_map)
1023 		return;
1024 
1025 	/*
1026 	 * Get the OpRegion's parent
1027 	 */
1028 	if (AcpiGetParent(chandle, &handle) != AE_OK)
1029 		return;
1030 
1031 	/*
1032 	 * If we've mapped the ACPI node to the devinfo
1033 	 * tree, use the devinfo reg property
1034 	 */
1035 	if (acpica_get_devinfo(handle, &dip) == AE_OK) {
1036 		(void) acpica_get_bdf(dip, &bus, &device, &func);
1037 		(*PciId)->Bus = bus;
1038 		(*PciId)->Device = device;
1039 		(*PciId)->Function = func;
1040 	} else if (acpica_eval_int(handle, "_ADR", &devfn) == AE_OK) {
1041 		/* no devinfo node - just confirm the d/f */
1042 		(*PciId)->Device = (devfn >> 16) & 0xFFFF;
1043 		(*PciId)->Function = devfn & 0xFFFF;
1044 	}
1045 }
1046 
1047 
1048 /*ARGSUSED*/
1049 BOOLEAN
1050 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1051 {
1052 
1053 	/* Always says yes; all mapped memory assumed readable */
1054 	return (1);
1055 }
1056 
1057 /*ARGSUSED*/
1058 BOOLEAN
1059 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1060 {
1061 
1062 	/* Always says yes; all mapped memory assumed writable */
1063 	return (1);
1064 }
1065 
1066 UINT64
1067 AcpiOsGetTimer(void)
1068 {
1069 	/* gethrtime() returns 1nS resolution; convert to 100nS granules */
1070 	return ((gethrtime() + 50) / 100);
1071 }
1072 
1073 static struct AcpiOSIFeature_s {
1074 	uint64_t	control_flag;
1075 	const char	*feature_name;
1076 } AcpiOSIFeatures[] = {
1077 	{ ACPI_FEATURE_OSI_MODULE,	"Module Device" },
1078 	{ 0,				"Processor Device" }
1079 };
1080 
1081 /*ARGSUSED*/
1082 ACPI_STATUS
1083 AcpiOsValidateInterface(char *feature)
1084 {
1085 	int i;
1086 
1087 	ASSERT(feature != NULL);
1088 	for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1089 	    i++) {
1090 		if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1091 			continue;
1092 		}
1093 		/* Check whether required core features are available. */
1094 		if (AcpiOSIFeatures[i].control_flag != 0 &&
1095 		    acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1096 		    AcpiOSIFeatures[i].control_flag) {
1097 			break;
1098 		}
1099 		/* Feature supported. */
1100 		return (AE_OK);
1101 	}
1102 
1103 	return (AE_SUPPORT);
1104 }
1105 
1106 /*ARGSUSED*/
1107 ACPI_STATUS
1108 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1109     ACPI_SIZE length)
1110 {
1111 	return (AE_OK);
1112 }
1113 
1114 ACPI_STATUS
1115 AcpiOsSignal(UINT32 Function, void *Info)
1116 {
1117 	_NOTE(ARGUNUSED(Function, Info))
1118 
1119 	/* FUTUREWORK: debugger support */
1120 
1121 	cmn_err(CE_NOTE, "!OsSignal unimplemented");
1122 	return (AE_OK);
1123 }
1124 
1125 void ACPI_INTERNAL_VAR_XFACE
1126 AcpiOsPrintf(const char *Format, ...)
1127 {
1128 	va_list ap;
1129 
1130 	va_start(ap, Format);
1131 	AcpiOsVprintf(Format, ap);
1132 	va_end(ap);
1133 }
1134 
1135 /*
1136  * When != 0, sends output to console
1137  * Patchable with kmdb or /etc/system.
1138  */
1139 int acpica_console_out = 0;
1140 
1141 #define	ACPICA_OUTBUF_LEN	160
1142 char	acpica_outbuf[ACPICA_OUTBUF_LEN];
1143 int	acpica_outbuf_offset;
1144 
1145 /*
1146  *
1147  */
1148 static void
1149 acpica_pr_buf(char *buf)
1150 {
1151 	char c, *bufp, *outp;
1152 	int	out_remaining;
1153 
1154 	/*
1155 	 * copy the supplied buffer into the output buffer
1156 	 * when we hit a '\n' or overflow the output buffer,
1157 	 * output and reset the output buffer
1158 	 */
1159 	bufp = buf;
1160 	outp = acpica_outbuf + acpica_outbuf_offset;
1161 	out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1162 	while (c = *bufp++) {
1163 		*outp++ = c;
1164 		if (c == '\n' || --out_remaining == 0) {
1165 			*outp = '\0';
1166 			if (acpica_console_out)
1167 				printf(acpica_outbuf);
1168 			else
1169 				(void) strlog(0, 0, 0,
1170 				    SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1171 				    acpica_outbuf);
1172 			acpica_outbuf_offset = 0;
1173 			outp = acpica_outbuf;
1174 			out_remaining = ACPICA_OUTBUF_LEN - 1;
1175 		}
1176 	}
1177 
1178 	acpica_outbuf_offset = outp - acpica_outbuf;
1179 }
1180 
1181 void
1182 AcpiOsVprintf(const char *Format, va_list Args)
1183 {
1184 
1185 	/*
1186 	 * If AcpiOsInitialize() failed to allocate a string buffer,
1187 	 * resort to vprintf().
1188 	 */
1189 	if (acpi_osl_pr_buffer == NULL) {
1190 		vprintf(Format, Args);
1191 		return;
1192 	}
1193 
1194 	/*
1195 	 * It is possible that a very long debug output statement will
1196 	 * be truncated; this is silently ignored.
1197 	 */
1198 	(void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1199 	acpica_pr_buf(acpi_osl_pr_buffer);
1200 }
1201 
1202 void
1203 AcpiOsRedirectOutput(void *Destination)
1204 {
1205 	_NOTE(ARGUNUSED(Destination))
1206 
1207 	/* FUTUREWORK: debugger support */
1208 
1209 #ifdef	DEBUG
1210 	cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1211 #endif
1212 }
1213 
1214 
1215 UINT32
1216 AcpiOsGetLine(char *Buffer)
1217 {
1218 	_NOTE(ARGUNUSED(Buffer))
1219 
1220 	/* FUTUREWORK: debugger support */
1221 
1222 	return (0);
1223 }
1224 
1225 /*
1226  * Device tree binding
1227  */
1228 static ACPI_STATUS
1229 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1230 {
1231 	_NOTE(ARGUNUSED(lvl));
1232 
1233 	int sta, hid, bbn;
1234 	int busno = (intptr_t)ctxp;
1235 	ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1236 
1237 	/* Check whether device exists. */
1238 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1239 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1240 		/*
1241 		 * Skip object if device doesn't exist.
1242 		 * According to ACPI Spec,
1243 		 * 1) setting either bit 0 or bit 3 means that device exists.
1244 		 * 2) Absence of _STA method means all status bits set.
1245 		 */
1246 		return (AE_CTRL_DEPTH);
1247 	}
1248 
1249 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1250 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1251 		/* Non PCI/PCIe host bridge. */
1252 		return (AE_OK);
1253 	}
1254 
1255 	if (acpi_has_broken_bbn) {
1256 		ACPI_BUFFER rb;
1257 		rb.Pointer = NULL;
1258 		rb.Length = ACPI_ALLOCATE_BUFFER;
1259 
1260 		/* Decree _BBN == n from PCI<n> */
1261 		if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1262 			return (AE_CTRL_TERMINATE);
1263 		}
1264 		bbn = ((char *)rb.Pointer)[3] - '0';
1265 		AcpiOsFree(rb.Pointer);
1266 		if (bbn == busno || busno == 0) {
1267 			*hdlp = hdl;
1268 			return (AE_CTRL_TERMINATE);
1269 		}
1270 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1271 		if (bbn == busno) {
1272 			*hdlp = hdl;
1273 			return (AE_CTRL_TERMINATE);
1274 		}
1275 	} else if (busno == 0) {
1276 		*hdlp = hdl;
1277 		return (AE_CTRL_TERMINATE);
1278 	}
1279 
1280 	return (AE_CTRL_DEPTH);
1281 }
1282 
1283 static int
1284 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1285 {
1286 	ACPI_HANDLE sbobj, busobj;
1287 
1288 	/* initialize static flag by querying ACPI namespace for bug */
1289 	if (acpi_has_broken_bbn == -1)
1290 		acpi_has_broken_bbn = acpica_query_bbn_problem();
1291 
1292 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1293 		busobj = NULL;
1294 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1295 		    acpica_find_pcibus_walker, (void *)(intptr_t)busno,
1296 		    (void **)&busobj);
1297 		if (busobj != NULL) {
1298 			*rh = busobj;
1299 			return (AE_OK);
1300 		}
1301 	}
1302 
1303 	return (AE_ERROR);
1304 }
1305 
1306 static ACPI_STATUS
1307 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1308 {
1309 	_NOTE(ARGUNUSED(lvl));
1310 	_NOTE(ARGUNUSED(rvpp));
1311 
1312 	int sta, hid, bbn;
1313 	int *cntp = (int *)ctxp;
1314 
1315 	/* Check whether device exists. */
1316 	if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1317 	    !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1318 		/*
1319 		 * Skip object if device doesn't exist.
1320 		 * According to ACPI Spec,
1321 		 * 1) setting either bit 0 or bit 3 means that device exists.
1322 		 * 2) Absence of _STA method means all status bits set.
1323 		 */
1324 		return (AE_CTRL_DEPTH);
1325 	}
1326 
1327 	if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1328 	    (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1329 		/* Non PCI/PCIe host bridge. */
1330 		return (AE_OK);
1331 	} else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1332 	    bbn == 0 && ++(*cntp) > 1) {
1333 		/*
1334 		 * If we find more than one bus with a 0 _BBN
1335 		 * we have the problem that BigBear's BIOS shows
1336 		 */
1337 		return (AE_CTRL_TERMINATE);
1338 	} else {
1339 		/*
1340 		 * Skip children of PCI/PCIe host bridge.
1341 		 */
1342 		return (AE_CTRL_DEPTH);
1343 	}
1344 }
1345 
1346 /*
1347  * Look for ACPI problem where _BBN is zero for multiple PCI buses
1348  * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1349  * below if it exists.
1350  */
1351 static int
1352 acpica_query_bbn_problem(void)
1353 {
1354 	ACPI_HANDLE sbobj;
1355 	int zerobbncnt;
1356 	void *rv;
1357 
1358 	zerobbncnt = 0;
1359 	if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1360 		(void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1361 		    acpica_query_bbn_walker, &zerobbncnt, &rv);
1362 	}
1363 
1364 	return (zerobbncnt > 1 ? 1 : 0);
1365 }
1366 
1367 static const char hextab[] = "0123456789ABCDEF";
1368 
1369 static int
1370 hexdig(int c)
1371 {
1372 	/*
1373 	 *  Get hex digit:
1374 	 *
1375 	 *  Returns the 4-bit hex digit named by the input character.  Returns
1376 	 *  zero if the input character is not valid hex!
1377 	 */
1378 
1379 	int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1380 	int j = sizeof (hextab);
1381 
1382 	while (--j && (x != hextab[j])) {
1383 	}
1384 	return (j);
1385 }
1386 
1387 static int
1388 CompressEisaID(char *np)
1389 {
1390 	/*
1391 	 *  Compress an EISA device name:
1392 	 *
1393 	 *  This routine converts a 7-byte ASCII device name into the 4-byte
1394 	 *  compressed form used by EISA (50 bytes of ROM to save 1 byte of
1395 	 *  NV-RAM!)
1396 	 */
1397 
1398 	union { char octets[4]; int retval; } myu;
1399 
1400 	myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1401 	myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1402 	myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1403 	myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1404 
1405 	return (myu.retval);
1406 }
1407 
1408 ACPI_STATUS
1409 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1410 {
1411 	ACPI_STATUS status;
1412 	ACPI_BUFFER rb;
1413 	ACPI_OBJECT ro;
1414 
1415 	rb.Pointer = &ro;
1416 	rb.Length = sizeof (ro);
1417 	if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1418 	    ACPI_TYPE_INTEGER)) == AE_OK)
1419 		*rint = ro.Integer.Value;
1420 
1421 	return (status);
1422 }
1423 
1424 static int
1425 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1426 {
1427 	ACPI_BUFFER rb;
1428 	ACPI_OBJECT *rv;
1429 
1430 	rb.Pointer = NULL;
1431 	rb.Length = ACPI_ALLOCATE_BUFFER;
1432 	if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1433 	    rb.Length != 0) {
1434 		rv = rb.Pointer;
1435 		if (rv->Type == ACPI_TYPE_INTEGER) {
1436 			*rint = rv->Integer.Value;
1437 			AcpiOsFree(rv);
1438 			return (AE_OK);
1439 		} else if (rv->Type == ACPI_TYPE_STRING) {
1440 			char *stringData;
1441 
1442 			/* Convert the string into an EISA ID */
1443 			if (rv->String.Pointer == NULL) {
1444 				AcpiOsFree(rv);
1445 				return (AE_ERROR);
1446 			}
1447 
1448 			stringData = rv->String.Pointer;
1449 
1450 			/*
1451 			 * If the string is an EisaID, it must be 7
1452 			 * characters; if it's an ACPI ID, it will be 8
1453 			 * (and we don't care about ACPI ids here).
1454 			 */
1455 			if (strlen(stringData) != 7) {
1456 				AcpiOsFree(rv);
1457 				return (AE_ERROR);
1458 			}
1459 
1460 			*rint = CompressEisaID(stringData);
1461 			AcpiOsFree(rv);
1462 			return (AE_OK);
1463 		} else
1464 			AcpiOsFree(rv);
1465 	}
1466 	return (AE_ERROR);
1467 }
1468 
1469 /*
1470  * Create linkage between devinfo nodes and ACPI nodes
1471  */
1472 ACPI_STATUS
1473 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1474 {
1475 	ACPI_STATUS status;
1476 	ACPI_BUFFER rb;
1477 
1478 	/*
1479 	 * Tag the devinfo node with the ACPI name
1480 	 */
1481 	rb.Pointer = NULL;
1482 	rb.Length = ACPI_ALLOCATE_BUFFER;
1483 	status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1484 	if (ACPI_FAILURE(status)) {
1485 		cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1486 	} else {
1487 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1488 		    "acpi-namespace", (char *)rb.Pointer);
1489 		AcpiOsFree(rb.Pointer);
1490 
1491 		/*
1492 		 * Tag the ACPI node with the dip
1493 		 */
1494 		status = acpica_set_devinfo(acpiobj, dip);
1495 		ASSERT(ACPI_SUCCESS(status));
1496 	}
1497 
1498 	return (status);
1499 }
1500 
1501 /*
1502  * Destroy linkage between devinfo nodes and ACPI nodes
1503  */
1504 ACPI_STATUS
1505 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1506 {
1507 	(void) acpica_unset_devinfo(acpiobj);
1508 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1509 
1510 	return (AE_OK);
1511 }
1512 
1513 /*
1514  * Return the ACPI device node matching the CPU dev_info node.
1515  */
1516 ACPI_STATUS
1517 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1518 {
1519 	int i;
1520 
1521 	/*
1522 	 * if cpu_map itself is NULL, we're a uppc system and
1523 	 * acpica_build_processor_map() hasn't been called yet.
1524 	 * So call it here
1525 	 */
1526 	if (cpu_map == NULL) {
1527 		(void) acpica_build_processor_map();
1528 		if (cpu_map == NULL)
1529 			return (AE_ERROR);
1530 	}
1531 
1532 	if (cpu_id < 0) {
1533 		return (AE_ERROR);
1534 	}
1535 
1536 	/*
1537 	 * search object with cpuid in cpu_map
1538 	 */
1539 	mutex_enter(&cpu_map_lock);
1540 	for (i = 0; i < cpu_map_count; i++) {
1541 		if (cpu_map[i]->cpu_id == cpu_id) {
1542 			break;
1543 		}
1544 	}
1545 	if (i >= cpu_map_count || (cpu_map[i]->obj == NULL)) {
1546 		mutex_exit(&cpu_map_lock);
1547 		return (AE_ERROR);
1548 	}
1549 	*rh = cpu_map[cpu_id]->obj;
1550 	mutex_exit(&cpu_map_lock);
1551 
1552 	return (AE_OK);
1553 }
1554 
1555 /*
1556  * Determine if this object is a processor
1557  */
1558 static ACPI_STATUS
1559 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1560 {
1561 	ACPI_STATUS status;
1562 	ACPI_OBJECT_TYPE objtype;
1563 	unsigned long acpi_id;
1564 	ACPI_BUFFER rb;
1565 
1566 	if (AcpiGetType(obj, &objtype) != AE_OK)
1567 		return (AE_OK);
1568 
1569 	if (objtype == ACPI_TYPE_PROCESSOR) {
1570 		/* process a Processor */
1571 		rb.Pointer = NULL;
1572 		rb.Length = ACPI_ALLOCATE_BUFFER;
1573 		status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1574 		    ACPI_TYPE_PROCESSOR);
1575 		if (status != AE_OK) {
1576 			cmn_err(CE_WARN, "!acpica: error probing Processor");
1577 			return (status);
1578 		}
1579 		acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1580 		AcpiOsFree(rb.Pointer);
1581 	} else if (objtype == ACPI_TYPE_DEVICE) {
1582 		/* process a processor Device */
1583 		rb.Pointer = NULL;
1584 		rb.Length = ACPI_ALLOCATE_BUFFER;
1585 		status = AcpiGetObjectInfo(obj, &rb);
1586 		if (status != AE_OK) {
1587 			cmn_err(CE_WARN,
1588 			    "!acpica: error probing Processor Device\n");
1589 			return (status);
1590 		}
1591 		ASSERT(((ACPI_OBJECT *)rb.Pointer)->Type ==
1592 		    ACPI_TYPE_DEVICE);
1593 
1594 		if (ddi_strtoul(
1595 		    ((ACPI_DEVICE_INFO *)rb.Pointer)->UniqueId.Value,
1596 		    NULL, 10, &acpi_id) != 0) {
1597 			AcpiOsFree(rb.Pointer);
1598 			cmn_err(CE_WARN,
1599 			    "!acpica: error probing Processor Device _UID\n");
1600 			return (AE_ERROR);
1601 		}
1602 		AcpiOsFree(rb.Pointer);
1603 	}
1604 	(void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1605 
1606 	return (AE_OK);
1607 }
1608 
1609 static void
1610 scan_d2a_map(void)
1611 {
1612 	dev_info_t *dip, *cdip;
1613 	ACPI_HANDLE acpiobj;
1614 	char *device_type_prop;
1615 	int bus;
1616 	static int map_error = 0;
1617 
1618 	if (map_error)
1619 		return;
1620 
1621 	scanning_d2a_map = 1;
1622 
1623 	/*
1624 	 * Find all child-of-root PCI buses, and find their corresponding
1625 	 * ACPI child-of-root PCI nodes.  For each one, add to the
1626 	 * d2a table.
1627 	 */
1628 
1629 	for (dip = ddi_get_child(ddi_root_node());
1630 	    dip != NULL;
1631 	    dip = ddi_get_next_sibling(dip)) {
1632 
1633 		/* prune non-PCI nodes */
1634 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1635 		    DDI_PROP_DONTPASS,
1636 		    "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1637 			continue;
1638 
1639 		if ((strcmp("pci", device_type_prop) != 0) &&
1640 		    (strcmp("pciex", device_type_prop) != 0)) {
1641 			ddi_prop_free(device_type_prop);
1642 			continue;
1643 		}
1644 
1645 		ddi_prop_free(device_type_prop);
1646 
1647 		/*
1648 		 * To get bus number of dip, get first child and get its
1649 		 * bus number.  If NULL, just continue, because we don't
1650 		 * care about bus nodes with no children anyway.
1651 		 */
1652 		if ((cdip = ddi_get_child(dip)) == NULL)
1653 			continue;
1654 
1655 		if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1656 #ifdef D2ADEBUG
1657 			cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1658 #endif
1659 			map_error = 1;
1660 			scanning_d2a_map = 0;
1661 			d2a_done = 1;
1662 			return;
1663 		}
1664 
1665 		if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1666 #ifdef D2ADEBUG
1667 			cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1668 #endif
1669 			map_error = 1;
1670 			continue;
1671 		}
1672 
1673 		acpica_tag_devinfo(dip, acpiobj);
1674 
1675 		/* call recursively to enumerate subtrees */
1676 		scan_d2a_subtree(dip, acpiobj, bus);
1677 	}
1678 
1679 	scanning_d2a_map = 0;
1680 	d2a_done = 1;
1681 }
1682 
1683 /*
1684  * For all acpi child devices of acpiobj, find their matching
1685  * dip under "dip" argument.  (matching means "matches dev/fn").
1686  * bus is assumed to already be a match from caller, and is
1687  * used here only to record in the d2a entry.  Recurse if necessary.
1688  */
1689 static void
1690 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1691 {
1692 	int acpi_devfn, hid;
1693 	ACPI_HANDLE acld;
1694 	dev_info_t *dcld;
1695 	int dcld_b, dcld_d, dcld_f;
1696 	int dev, func;
1697 	char *device_type_prop;
1698 
1699 	acld = NULL;
1700 	while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1701 	    == AE_OK) {
1702 		/* get the dev/func we're looking for in the devinfo tree */
1703 		if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1704 			continue;
1705 		dev = (acpi_devfn >> 16) & 0xFFFF;
1706 		func = acpi_devfn & 0xFFFF;
1707 
1708 		/* look through all the immediate children of dip */
1709 		for (dcld = ddi_get_child(dip); dcld != NULL;
1710 		    dcld = ddi_get_next_sibling(dcld)) {
1711 			if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1712 				continue;
1713 
1714 			/* dev must match; function must match or wildcard */
1715 			if (dcld_d != dev ||
1716 			    (func != 0xFFFF && func != dcld_f))
1717 				continue;
1718 			bus = dcld_b;
1719 
1720 			/* found a match, record it */
1721 			acpica_tag_devinfo(dcld, acld);
1722 
1723 			/* if we find a bridge, recurse from here */
1724 			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1725 			    DDI_PROP_DONTPASS, "device_type",
1726 			    &device_type_prop) == DDI_PROP_SUCCESS) {
1727 				if ((strcmp("pci", device_type_prop) == 0) ||
1728 				    (strcmp("pciex", device_type_prop) == 0))
1729 					scan_d2a_subtree(dcld, acld, bus);
1730 				ddi_prop_free(device_type_prop);
1731 			}
1732 
1733 			/* done finding a match, so break now */
1734 			break;
1735 		}
1736 	}
1737 }
1738 
1739 /*
1740  * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1741  */
1742 int
1743 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1744 {
1745 	pci_regspec_t *pci_rp;
1746 	int len;
1747 
1748 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1749 	    "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1750 		return (-1);
1751 
1752 	if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1753 		ddi_prop_free(pci_rp);
1754 		return (-1);
1755 	}
1756 	if (bus != NULL)
1757 		*bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1758 	if (device != NULL)
1759 		*device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1760 	if (func != NULL)
1761 		*func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1762 	ddi_prop_free(pci_rp);
1763 	return (0);
1764 }
1765 
1766 /*
1767  * Return the ACPI device node matching this dev_info node, if it
1768  * exists in the ACPI tree.
1769  */
1770 ACPI_STATUS
1771 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1772 {
1773 	ACPI_STATUS status;
1774 	char *acpiname;
1775 
1776 	if (!d2a_done)
1777 		scan_d2a_map();
1778 
1779 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1780 	    "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1781 		return (AE_ERROR);
1782 	}
1783 
1784 	status = AcpiGetHandle(NULL, acpiname, rh);
1785 	ddi_prop_free((void *)acpiname);
1786 	return (status);
1787 }
1788 
1789 
1790 
1791 /*
1792  * Manage OS data attachment to ACPI nodes
1793  */
1794 
1795 /*
1796  * Return the (dev_info_t *) associated with the ACPI node.
1797  */
1798 ACPI_STATUS
1799 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1800 {
1801 	ACPI_STATUS status;
1802 	void *ptr;
1803 
1804 	status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1805 	if (status == AE_OK)
1806 		*dipp = (dev_info_t *)ptr;
1807 
1808 	return (status);
1809 }
1810 
1811 /*
1812  * Set the dev_info_t associated with the ACPI node.
1813  */
1814 static ACPI_STATUS
1815 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1816 {
1817 	ACPI_STATUS status;
1818 
1819 	status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1820 	return (status);
1821 }
1822 
1823 /*
1824  * Unset the dev_info_t associated with the ACPI node.
1825  */
1826 static ACPI_STATUS
1827 acpica_unset_devinfo(ACPI_HANDLE obj)
1828 {
1829 	return (AcpiDetachData(obj, acpica_devinfo_handler));
1830 }
1831 
1832 /*
1833  *
1834  */
1835 void
1836 acpica_devinfo_handler(ACPI_HANDLE obj, UINT32 func, void *data)
1837 {
1838 	/* noop */
1839 }
1840 
1841 ACPI_STATUS
1842 acpica_build_processor_map(void)
1843 {
1844 	ACPI_STATUS status;
1845 	void *rv;
1846 
1847 	/*
1848 	 * shouldn't be called more than once anyway
1849 	 */
1850 	if (cpu_map_built)
1851 		return (AE_OK);
1852 
1853 	/*
1854 	 * ACPI device configuration driver has built mapping information
1855 	 * among processor id and object handle, no need to probe again.
1856 	 */
1857 	if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1858 		cpu_map_built = 1;
1859 		return (AE_OK);
1860 	}
1861 
1862 	/*
1863 	 * Look for Processor objects
1864 	 */
1865 	status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1866 	    ACPI_ROOT_OBJECT,
1867 	    4,
1868 	    acpica_probe_processor,
1869 	    NULL,
1870 	    &rv);
1871 	ASSERT(status == AE_OK);
1872 
1873 	/*
1874 	 * Look for processor Device objects
1875 	 */
1876 	status = AcpiGetDevices("ACPI0007",
1877 	    acpica_probe_processor,
1878 	    NULL,
1879 	    &rv);
1880 	ASSERT(status == AE_OK);
1881 	cpu_map_built = 1;
1882 
1883 	return (status);
1884 }
1885 
1886 /*
1887  * Grow cpu map table on demand.
1888  */
1889 static void
1890 acpica_grow_cpu_map(void)
1891 {
1892 	if (cpu_map_count == cpu_map_count_max) {
1893 		size_t sz;
1894 		struct cpu_map_item **new_map;
1895 
1896 		ASSERT(cpu_map_count_max < INT_MAX / 2);
1897 		cpu_map_count_max += max_ncpus;
1898 		new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1899 		    KM_SLEEP);
1900 		if (cpu_map_count != 0) {
1901 			ASSERT(cpu_map != NULL);
1902 			sz = sizeof (cpu_map[0]) * cpu_map_count;
1903 			kcopy(cpu_map, new_map, sz);
1904 			kmem_free(cpu_map, sz);
1905 		}
1906 		cpu_map = new_map;
1907 	}
1908 }
1909 
1910 /*
1911  * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1912  * ACPI handle). The mapping table will be setup in two steps:
1913  * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1914  *    processor id and ACPI object handle.
1915  * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1916  * On system with ACPI device configuration for CPU enabled, acpica_map_cpu()
1917  * will be called before acpica_add_processor_to_map(), otherwise
1918  * acpica_map_cpu() will be called after acpica_add_processor_to_map().
1919  */
1920 ACPI_STATUS
1921 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1922 {
1923 	int i;
1924 	ACPI_STATUS rc = AE_OK;
1925 	struct cpu_map_item *item = NULL;
1926 
1927 	ASSERT(obj != NULL);
1928 	if (obj == NULL) {
1929 		return (AE_ERROR);
1930 	}
1931 
1932 	mutex_enter(&cpu_map_lock);
1933 
1934 	/*
1935 	 * Special case for uppc
1936 	 * If we're a uppc system and ACPI device configuration for CPU has
1937 	 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1938 	 * call acpica_map_cpu(). So create one and use the passed-in processor
1939 	 * as CPU 0
1940 	 */
1941 	if (cpu_map == NULL &&
1942 	    !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1943 		acpica_grow_cpu_map();
1944 		ASSERT(cpu_map != NULL);
1945 		item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1946 		item->cpu_id = 0;
1947 		item->proc_id = acpi_id;
1948 		item->apic_id = apic_id;
1949 		item->obj = obj;
1950 		cpu_map[0] = item;
1951 		cpu_map_count = 1;
1952 		mutex_exit(&cpu_map_lock);
1953 		return (AE_OK);
1954 	}
1955 
1956 	for (i = 0; i < cpu_map_count; i++) {
1957 		if (cpu_map[i]->obj == obj) {
1958 			rc = AE_ALREADY_EXISTS;
1959 			break;
1960 		} else if (cpu_map[i]->proc_id == acpi_id) {
1961 			ASSERT(item == NULL);
1962 			item = cpu_map[i];
1963 		}
1964 	}
1965 
1966 	if (rc == AE_OK) {
1967 		if (item != NULL) {
1968 			/*
1969 			 * ACPI alias objects may cause more than one objects
1970 			 * with the same ACPI processor id, only remember the
1971 			 * the first object encountered.
1972 			 */
1973 			if (item->obj == NULL) {
1974 				item->obj = obj;
1975 				item->apic_id = apic_id;
1976 			} else {
1977 				rc = AE_ALREADY_EXISTS;
1978 			}
1979 		} else if (cpu_map_count >= INT_MAX / 2) {
1980 			rc = AE_NO_MEMORY;
1981 		} else {
1982 			acpica_grow_cpu_map();
1983 			ASSERT(cpu_map != NULL);
1984 			ASSERT(cpu_map_count < cpu_map_count_max);
1985 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1986 			item->cpu_id = -1;
1987 			item->proc_id = acpi_id;
1988 			item->apic_id = apic_id;
1989 			item->obj = obj;
1990 			cpu_map[cpu_map_count] = item;
1991 			cpu_map_count++;
1992 		}
1993 	}
1994 
1995 	mutex_exit(&cpu_map_lock);
1996 
1997 	return (rc);
1998 }
1999 
2000 ACPI_STATUS
2001 acpica_remove_processor_from_map(UINT32 acpi_id)
2002 {
2003 	int i;
2004 	ACPI_STATUS rc = AE_NOT_EXIST;
2005 
2006 	mutex_enter(&cpu_map_lock);
2007 	for (i = 0; i < cpu_map_count; i++) {
2008 		if (cpu_map[i]->proc_id != acpi_id) {
2009 			continue;
2010 		}
2011 		cpu_map[i]->obj = NULL;
2012 		/* Free item if no more reference to it. */
2013 		if (cpu_map[i]->cpu_id == -1) {
2014 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2015 			cpu_map[i] = NULL;
2016 			cpu_map_count--;
2017 			if (i != cpu_map_count) {
2018 				cpu_map[i] = cpu_map[cpu_map_count];
2019 				cpu_map[cpu_map_count] = NULL;
2020 			}
2021 		}
2022 		rc = AE_OK;
2023 		break;
2024 	}
2025 	mutex_exit(&cpu_map_lock);
2026 
2027 	return (rc);
2028 }
2029 
2030 ACPI_STATUS
2031 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2032 {
2033 	int i;
2034 	ACPI_STATUS rc = AE_OK;
2035 	struct cpu_map_item *item = NULL;
2036 
2037 	ASSERT(cpuid != -1);
2038 	if (cpuid == -1) {
2039 		return (AE_ERROR);
2040 	}
2041 
2042 	mutex_enter(&cpu_map_lock);
2043 	for (i = 0; i < cpu_map_count; i++) {
2044 		if (cpu_map[i]->cpu_id == cpuid) {
2045 			rc = AE_ALREADY_EXISTS;
2046 			break;
2047 		} else if (cpu_map[i]->proc_id == acpi_id) {
2048 			ASSERT(item == NULL);
2049 			item = cpu_map[i];
2050 		}
2051 	}
2052 	if (rc == AE_OK) {
2053 		if (item != NULL) {
2054 			if (item->cpu_id == -1) {
2055 				item->cpu_id = cpuid;
2056 			} else {
2057 				rc = AE_ALREADY_EXISTS;
2058 			}
2059 		} else if (cpu_map_count >= INT_MAX / 2) {
2060 			rc = AE_NO_MEMORY;
2061 		} else {
2062 			acpica_grow_cpu_map();
2063 			ASSERT(cpu_map != NULL);
2064 			ASSERT(cpu_map_count < cpu_map_count_max);
2065 			item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2066 			item->cpu_id = cpuid;
2067 			item->proc_id = acpi_id;
2068 			item->apic_id = UINT32_MAX;
2069 			item->obj = NULL;
2070 			cpu_map[cpu_map_count] = item;
2071 			cpu_map_count++;
2072 		}
2073 	}
2074 	mutex_exit(&cpu_map_lock);
2075 
2076 	return (rc);
2077 }
2078 
2079 ACPI_STATUS
2080 acpica_unmap_cpu(processorid_t cpuid)
2081 {
2082 	int i;
2083 	ACPI_STATUS rc = AE_NOT_EXIST;
2084 
2085 	ASSERT(cpuid != -1);
2086 	if (cpuid == -1) {
2087 		return (rc);
2088 	}
2089 
2090 	mutex_enter(&cpu_map_lock);
2091 	for (i = 0; i < cpu_map_count; i++) {
2092 		if (cpu_map[i]->cpu_id != cpuid) {
2093 			continue;
2094 		}
2095 		cpu_map[i]->cpu_id = -1;
2096 		/* Free item if no more reference. */
2097 		if (cpu_map[i]->obj == NULL) {
2098 			kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2099 			cpu_map[i] = NULL;
2100 			cpu_map_count--;
2101 			if (i != cpu_map_count) {
2102 				cpu_map[i] = cpu_map[cpu_map_count];
2103 				cpu_map[cpu_map_count] = NULL;
2104 			}
2105 		}
2106 		rc = AE_OK;
2107 		break;
2108 	}
2109 	mutex_exit(&cpu_map_lock);
2110 
2111 	return (rc);
2112 }
2113 
2114 ACPI_STATUS
2115 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2116 {
2117 	int i;
2118 	ACPI_STATUS rc = AE_NOT_EXIST;
2119 
2120 	ASSERT(cpuid != -1);
2121 	if (cpuid == -1) {
2122 		return (rc);
2123 	}
2124 
2125 	mutex_enter(&cpu_map_lock);
2126 	for (i = 0; i < cpu_map_count; i++) {
2127 		if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2128 			*hdlp = cpu_map[i]->obj;
2129 			rc = AE_OK;
2130 			break;
2131 		}
2132 	}
2133 	mutex_exit(&cpu_map_lock);
2134 
2135 	return (rc);
2136 }
2137 
2138 ACPI_STATUS
2139 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2140 {
2141 	int i;
2142 	ACPI_STATUS rc = AE_NOT_EXIST;
2143 
2144 	mutex_enter(&cpu_map_lock);
2145 	for (i = 0; i < cpu_map_count; i++) {
2146 		if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2147 			*hdlp = cpu_map[i]->obj;
2148 			rc = AE_OK;
2149 			break;
2150 		}
2151 	}
2152 	mutex_exit(&cpu_map_lock);
2153 
2154 	return (rc);
2155 }
2156 
2157 ACPI_STATUS
2158 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2159 {
2160 	int i;
2161 	ACPI_STATUS rc = AE_NOT_EXIST;
2162 
2163 	ASSERT(apicid != UINT32_MAX);
2164 	if (apicid == UINT32_MAX) {
2165 		return (rc);
2166 	}
2167 
2168 	mutex_enter(&cpu_map_lock);
2169 	for (i = 0; i < cpu_map_count; i++) {
2170 		if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2171 			*hdlp = cpu_map[i]->obj;
2172 			rc = AE_OK;
2173 			break;
2174 		}
2175 	}
2176 	mutex_exit(&cpu_map_lock);
2177 
2178 	return (rc);
2179 }
2180 
2181 void
2182 acpica_set_core_feature(uint64_t features)
2183 {
2184 	atomic_or_64(&acpica_core_features, features);
2185 }
2186 
2187 void
2188 acpica_clear_core_feature(uint64_t features)
2189 {
2190 	atomic_and_64(&acpica_core_features, ~features);
2191 }
2192 
2193 uint64_t
2194 acpica_get_core_feature(uint64_t features)
2195 {
2196 	return (acpica_core_features & features);
2197 }
2198 
2199 void
2200 acpica_set_devcfg_feature(uint64_t features)
2201 {
2202 	atomic_or_64(&acpica_devcfg_features, features);
2203 }
2204 
2205 void
2206 acpica_clear_devcfg_feature(uint64_t features)
2207 {
2208 	atomic_and_64(&acpica_devcfg_features, ~features);
2209 }
2210 
2211 uint64_t
2212 acpica_get_devcfg_feature(uint64_t features)
2213 {
2214 	return (acpica_devcfg_features & features);
2215 }
2216 
2217 void
2218 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2219 {
2220 	*gbl_FADT = &AcpiGbl_FADT;
2221 }
2222