1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * Copyright (c) 2009, Intel Corporation.
27 * All rights reserved.
28 */
29
30
31#include <sys/debug.h>
32#include <sys/sysmacros.h>
33#include <sys/types.h>
34#include <sys/kmem.h>
35#include <sys/sunddi.h>
36#include <sys/list.h>
37#include <sys/pci.h>
38#include <sys/pci_cfgspace.h>
39#include <sys/pci_impl.h>
40#include <sys/sunndi.h>
41#include <sys/ksynch.h>
42#include <sys/cmn_err.h>
43#include <sys/bootconf.h>
44#include <sys/int_fmtio.h>
45#include <sys/smbios.h>
46#include <sys/apic.h>
47#include <sys/acpi/acpi.h>
48#include <sys/acpica.h>
49#include <sys/immu.h>
50#include <sys/smp_impldefs.h>
51
52static void dmar_table_destroy(dmar_table_t *tbl);
53
54/*
55 * internal global variables
56 */
57static char	*dmar_raw;		/* raw DMAR ACPI table */
58static dmar_table_t *dmar_table;	/* converted form of DMAR table */
59
60/*
61 * global variables exported outside this file
62 */
63boolean_t dmar_print = B_FALSE;
64kmutex_t ioapic_drhd_lock;
65list_t ioapic_drhd_list;
66
67/* ######################################################################### */
68
69/*
70 * helper functions to read the "raw" DMAR table
71 */
72
73static uint8_t
74get_uint8(char *cp)
75{
76	uint8_t val = *((uint8_t *)cp);
77	return (val);
78}
79
80static uint16_t
81get_uint16(char *cp)
82{
83	uint16_t val = *((uint16_t *)cp);
84	return (val);
85}
86
87static uint32_t
88get_uint32(char *cp)
89{
90	uint32_t val = *((uint32_t *)cp);
91	return (val);
92}
93
94static uint64_t
95get_uint64(char *cp)
96{
97	uint64_t val = *((uint64_t *)cp);
98	return (val);
99}
100
101static char *
102get_str(char *cp, uint_t len)
103{
104	char *str = kmem_alloc(len + 1, KM_SLEEP);
105
106	(void) strlcpy(str, cp, len + 1);
107
108	return (str);
109}
110
111static void
112scope_list_free(list_t *scope_list)
113{
114	scope_t *scope;
115
116	if (list_is_empty(scope_list)) {
117		list_destroy(scope_list);
118		return;
119	}
120
121	while ((scope = list_remove_head(scope_list)) != NULL) {
122		kmem_free(scope, sizeof (scope_t));
123	}
124
125	ASSERT(list_is_empty(scope_list));
126	list_destroy(scope_list);
127}
128
129static void
130drhd_list_destroy(list_t *drhd_list)
131{
132	drhd_t *drhd;
133
134	ASSERT(drhd_list);
135
136	if (list_is_empty(drhd_list)) {
137		list_destroy(drhd_list);
138		return;
139	}
140
141	while ((drhd = list_remove_head(drhd_list)) != NULL) {
142		scope_list_free(&(drhd->dr_scope_list));
143		kmem_free(drhd, sizeof (drhd_t));
144	}
145
146	ASSERT(list_is_empty(drhd_list));
147	list_destroy(drhd_list);
148}
149
150static void
151rmrr_list_destroy(list_t *rmrr_list)
152{
153	rmrr_t *rmrr;
154
155	ASSERT(rmrr_list);
156
157	if (list_is_empty(rmrr_list)) {
158		list_destroy(rmrr_list);
159		return;
160	}
161
162	while ((rmrr = list_remove_head(rmrr_list)) != NULL) {
163		scope_list_free(&(rmrr->rm_scope_list));
164		kmem_free(rmrr, sizeof (rmrr_t));
165	}
166
167	ASSERT(list_is_empty(rmrr_list));
168	list_destroy(rmrr_list);
169}
170
171/*
172 * parse_scope()
173 *      parse a scope structure in the "raw" table
174 */
175static scope_t *
176parse_scope(char *shead)
177{
178	scope_t *scope;
179	char *phead;
180	int bus, dev, func;
181	uint8_t startbus;
182	uint8_t len;
183	int depth;
184
185	ASSERT(shead);
186
187	scope = kmem_zalloc(sizeof (scope_t), KM_SLEEP);
188	scope->scp_type = get_uint8(&shead[0]);
189	scope->scp_enumid = get_uint8(&shead[4]);
190
191	len = get_uint8(&shead[1]);
192	startbus = get_uint8(&shead[5]);
193	depth = (len - 6)/2;
194	ASSERT(depth >= 1);
195
196	phead = &shead[6];
197
198	bus = startbus;
199	dev = get_uint8(phead++);
200	func = get_uint8(phead++);
201
202	for (depth--; depth > 0; depth--) {
203		bus = pci_getb_func(bus, dev, func, PCI_BCNF_SECBUS);
204		dev = get_uint8(phead++);
205		func = get_uint8(phead++);
206	}
207
208	ASSERT(bus >= 0 && bus < 256);
209	ASSERT(dev >= 0 && dev < 32);
210	ASSERT(func >= 0 && func < 8);
211
212	/* ok we got the device BDF */
213	scope->scp_bus = bus;
214	scope->scp_dev = dev;
215	scope->scp_func = func;
216
217	return (scope);
218}
219
220
221/* setup the ioapic_drhd structure */
222static void
223ioapic_drhd_setup(void)
224{
225	mutex_init(&(ioapic_drhd_lock), NULL, MUTEX_DEFAULT, NULL);
226
227	mutex_enter(&(ioapic_drhd_lock));
228	list_create(&(ioapic_drhd_list), sizeof (ioapic_drhd_t),
229	    offsetof(ioapic_drhd_t, ioapic_node));
230	mutex_exit(&(ioapic_drhd_lock));
231}
232
233/* get ioapic source id for interrupt remapping */
234static void
235ioapic_drhd_insert(scope_t *scope, drhd_t *drhd)
236{
237	ioapic_drhd_t *idt;
238
239	idt = kmem_zalloc(sizeof (ioapic_drhd_t), KM_SLEEP);
240	idt->ioapic_ioapicid = scope->scp_enumid;
241	idt->ioapic_sid = ((scope->scp_bus << 8) | (scope->scp_dev << 3) |
242	    (scope->scp_func));
243	idt->ioapic_drhd = drhd;
244
245	mutex_enter(&ioapic_drhd_lock);
246	list_insert_tail(&ioapic_drhd_list, idt);
247	mutex_exit(&ioapic_drhd_lock);
248}
249
250static ioapic_drhd_t *
251ioapic_drhd_lookup(int ioapicid)
252{
253	ioapic_drhd_t *idt;
254
255	mutex_enter(&ioapic_drhd_lock);
256	idt = list_head(&ioapic_drhd_list);
257	for (; idt; idt = list_next(&ioapic_drhd_list, idt)) {
258		if (idt->ioapic_ioapicid == ioapicid) {
259			break;
260		}
261	}
262	mutex_exit(&ioapic_drhd_lock);
263
264	return (idt);
265}
266
267static void
268ioapic_drhd_destroy(void)
269{
270	ioapic_drhd_t *idt;
271
272	mutex_enter(&ioapic_drhd_lock);
273	while (idt = list_remove_head(&ioapic_drhd_list)) {
274		kmem_free(idt, sizeof (ioapic_drhd_t));
275	}
276	list_destroy(&ioapic_drhd_list);
277	mutex_exit(&(ioapic_drhd_lock));
278
279	mutex_destroy(&(ioapic_drhd_lock));
280}
281
282/*
283 * parse_drhd()
284 *   parse the drhd uints in dmar table
285 */
286static int
287parse_drhd(char *uhead, dmar_table_t *tbl)
288{
289	drhd_t *drhd;
290	int seg;
291	int len;
292	char *shead;
293	scope_t *scope;
294
295	ASSERT(uhead);
296	ASSERT(tbl);
297	ASSERT(get_uint16(&uhead[0]) == DMAR_DRHD);
298
299	seg = get_uint16(&uhead[6]);
300	if (seg < 0 || seg >= IMMU_MAXSEG) {
301		ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
302		    "in DRHD unit in ACPI DMAR table", seg);
303		return (DDI_FAILURE);
304	}
305
306	drhd = kmem_zalloc(sizeof (drhd_t), KM_SLEEP);
307	mutex_init(&(drhd->dr_lock), NULL, MUTEX_DEFAULT, NULL);
308	list_create(&(drhd->dr_scope_list), sizeof (scope_t),
309	    offsetof(scope_t, scp_node));
310
311	len = get_uint16(&uhead[2]);
312	drhd->dr_include_all =
313	    (get_uint8(&uhead[4]) & DMAR_INCLUDE_ALL) ? B_TRUE : B_FALSE;
314	drhd->dr_seg = seg;
315	drhd->dr_regs = get_uint64(&uhead[8]);
316
317	/*
318	 * parse each scope.
319	 */
320	shead = &uhead[16];
321	while (shead < &uhead[len - 1]) {
322		scope = parse_scope(shead);
323		if (scope == NULL) {
324			return (DDI_FAILURE);
325		}
326
327		if (scope->scp_type == DMAR_IOAPIC)  {
328			ioapic_drhd_insert(scope, drhd);
329		}
330
331		list_insert_tail(&(drhd->dr_scope_list), scope);
332		shead += get_uint8(&shead[1]);
333	}
334
335	list_insert_tail(&(tbl->tbl_drhd_list[drhd->dr_seg]), drhd);
336
337	return (DDI_SUCCESS);
338}
339
340/*
341 * parse_rmrr()
342 *   parse the rmrr units in dmar table
343 */
344static int
345parse_rmrr(char *uhead, dmar_table_t *tbl)
346{
347	rmrr_t *rmrr;
348	int seg;
349	int len;
350	char *shead;
351	scope_t *scope;
352
353	ASSERT(uhead);
354	ASSERT(tbl);
355	ASSERT(get_uint16(&uhead[0]) == DMAR_RMRR);
356
357	seg = get_uint16(&uhead[6]);
358	if (seg < 0 || seg >= IMMU_MAXSEG) {
359		ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
360		    "in RMRR unit in ACPI DMAR table", seg);
361		return (DDI_FAILURE);
362	}
363
364	rmrr = kmem_zalloc(sizeof (rmrr_t), KM_SLEEP);
365	mutex_init(&(rmrr->rm_lock), NULL, MUTEX_DEFAULT, NULL);
366	list_create(&(rmrr->rm_scope_list), sizeof (scope_t),
367	    offsetof(scope_t, scp_node));
368
369	/* RMRR region is [base,limit] */
370	len = get_uint16(&uhead[2]);
371	rmrr->rm_seg = get_uint16(&uhead[6]);
372	rmrr->rm_base = get_uint64(&uhead[8]);
373	rmrr->rm_limit = get_uint64(&uhead[16]);
374
375	if (rmrr->rm_base > rmrr->rm_limit) {
376		ddi_err(DER_WARN, NULL, "IMMU: BIOS bug detected: "
377		    "RMRR: base (%lx) > limit (%lx)",
378		    rmrr->rm_base, rmrr->rm_limit);
379		list_destroy(&(rmrr->rm_scope_list));
380		mutex_destroy(&(rmrr->rm_lock));
381		kmem_free(rmrr, sizeof (rmrr_t));
382		return (DDI_SUCCESS);
383	}
384
385	/*
386	 * parse each scope in RMRR
387	 */
388	shead = &uhead[24];
389	while (shead < &uhead[len - 1]) {
390		scope = parse_scope(shead);
391		if (scope == NULL) {
392			return (DDI_FAILURE);
393		}
394		list_insert_tail(&(rmrr->rm_scope_list), scope);
395		shead += get_uint8(&shead[1]);
396	}
397
398	list_insert_tail(&(tbl->tbl_rmrr_list[rmrr->rm_seg]), rmrr);
399
400	return (DDI_SUCCESS);
401}
402
403#define	TBL_OEM_ID_SZ		(6)
404#define	TBL_OEM_TBLID_SZ	(8)
405
406/*
407 * parse the "raw" DMAR table and convert it
408 * into a useful form.
409 */
410static int
411dmar_parse(dmar_table_t **tblpp, char *raw)
412{
413	char *uhead;
414	dmar_table_t *tbl;
415	int i;
416	char *unmstr;
417
418	ASSERT(raw);
419	ASSERT(tblpp);
420
421	*tblpp = NULL;
422
423	/*
424	 * do a sanity check. make sure the raw table
425	 * has the right signature
426	 */
427	if (raw[0] != 'D' || raw[1] != 'M' ||
428	    raw[2] != 'A' || raw[3] != 'R') {
429		ddi_err(DER_WARN, NULL, "IOMMU ACPI "
430		    "signature != \"DMAR\"");
431		return (DDI_FAILURE);
432	}
433
434	/*
435	 * the platform has intel iommu, create processed ACPI struct
436	 */
437	tbl = kmem_zalloc(sizeof (dmar_table_t), KM_SLEEP);
438	mutex_init(&(tbl->tbl_lock), NULL, MUTEX_DEFAULT, NULL);
439
440	tbl->tbl_raw = raw;
441
442	/*
443	 * Note we explicitly show offsets for clarity
444	 */
445	tbl->tbl_rawlen = get_uint32(&raw[4]);
446
447	/* XXX TO DO verify checksum of table */
448	tbl->tbl_oem_id = get_str(&raw[10], TBL_OEM_ID_SZ);
449	tbl->tbl_oem_tblid = get_str(&raw[16], TBL_OEM_TBLID_SZ);
450	tbl->tbl_oem_rev = get_uint32(&raw[24]);
451	tbl->tbl_haw = get_uint8(&raw[36]) + 1;
452	tbl->tbl_intrmap = (get_uint8(&raw[37]) & DMAR_INTRMAP_SUPPORT)
453	    ? B_TRUE : B_FALSE;
454
455	/* create lists for DRHD and RMRR */
456	for (i = 0; i < IMMU_MAXSEG; i++) {
457		list_create(&(tbl->tbl_drhd_list[i]), sizeof (drhd_t),
458		    offsetof(drhd_t, dr_node));
459		list_create(&(tbl->tbl_rmrr_list[i]), sizeof (rmrr_t),
460		    offsetof(rmrr_t, rm_node));
461	}
462
463	ioapic_drhd_setup();
464
465	/*
466	 * parse each unit. Currently only DRHD and RMRR types
467	 * are parsed. We ignore all other types of units.
468	 */
469	uhead = &raw[48];
470	while (uhead < &raw[tbl->tbl_rawlen - 1]) {
471		unmstr = NULL;
472		switch (get_uint16(uhead)) {
473		case DMAR_DRHD:
474			if (parse_drhd(uhead, tbl) != DDI_SUCCESS) {
475				goto failed;
476			}
477			break;
478		case DMAR_RMRR:
479			if (parse_rmrr(uhead, tbl) != DDI_SUCCESS) {
480				goto failed;
481			}
482			break;
483		case DMAR_ATSR:
484			unmstr = "ATSR";
485			break;
486		case DMAR_RHSA:
487			unmstr = "RHSA";
488			break;
489		default:
490			unmstr = "unknown unity type";
491			break;
492		}
493		if (unmstr) {
494			ddi_err(DER_NOTE, NULL, "DMAR ACPI table: "
495			    "skipping unsupported unit type %s", unmstr);
496		}
497		uhead += get_uint16(&uhead[2]);
498	}
499
500	*tblpp = tbl;
501	return (DDI_SUCCESS);
502
503failed:
504	dmar_table_destroy(tbl);
505	return (DDI_FAILURE);
506}
507
508static char *
509scope_type(int devtype)
510{
511	char *typestr;
512
513	switch (devtype) {
514	case DMAR_ENDPOINT:
515		typestr = "endpoint-device";
516		break;
517	case DMAR_SUBTREE:
518		typestr = "subtree-device";
519		break;
520	case DMAR_IOAPIC:
521		typestr = "IOAPIC";
522		break;
523	case DMAR_HPET:
524		typestr = "HPET";
525		break;
526	default:
527		typestr = "Unknown device";
528		break;
529	}
530
531	return (typestr);
532}
533
534static void
535print_scope_list(list_t *scope_list)
536{
537	scope_t *scope;
538
539	if (list_is_empty(scope_list))
540		return;
541
542	ddi_err(DER_CONT, NULL, "\tdevice list:\n");
543
544	for (scope = list_head(scope_list); scope;
545	    scope = list_next(scope_list, scope)) {
546		ddi_err(DER_CONT, NULL, "\t\ttype = %s\n",
547		    scope_type(scope->scp_type));
548		ddi_err(DER_CONT, NULL, "\n\t\tbus = %d\n",
549		    scope->scp_bus);
550		ddi_err(DER_CONT, NULL, "\t\tdev = %d\n",
551		    scope->scp_dev);
552		ddi_err(DER_CONT, NULL, "\t\tfunc = %d\n",
553		    scope->scp_func);
554	}
555}
556
557static void
558print_drhd_list(list_t *drhd_list)
559{
560	drhd_t *drhd;
561
562	if (list_is_empty(drhd_list))
563		return;
564
565	ddi_err(DER_CONT, NULL, "\ndrhd list:\n");
566
567	for (drhd = list_head(drhd_list); drhd;
568	    drhd = list_next(drhd_list, drhd)) {
569
570		ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
571		    drhd->dr_seg);
572		ddi_err(DER_CONT, NULL, "\treg_base = 0x%" PRIx64 "\n",
573		    drhd->dr_regs);
574		ddi_err(DER_CONT, NULL, "\tinclude_all = %s\n",
575		    drhd->dr_include_all == B_TRUE ? "TRUE" : "FALSE");
576		ddi_err(DER_CONT, NULL, "\tdip = 0x%p\n",
577		    (void *)drhd->dr_dip);
578
579		print_scope_list(&(drhd->dr_scope_list));
580	}
581}
582
583
584static void
585print_rmrr_list(list_t *rmrr_list)
586{
587	rmrr_t *rmrr;
588
589	if (list_is_empty(rmrr_list))
590		return;
591
592	ddi_err(DER_CONT, NULL, "\nrmrr list:\n");
593
594	for (rmrr = list_head(rmrr_list); rmrr;
595	    rmrr = list_next(rmrr_list, rmrr)) {
596
597		ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
598		    rmrr->rm_seg);
599		ddi_err(DER_CONT, NULL, "\tbase = 0x%lx\n",
600		    rmrr->rm_base);
601		ddi_err(DER_CONT, NULL, "\tlimit = 0x%lx\n",
602		    rmrr->rm_limit);
603
604		print_scope_list(&(rmrr->rm_scope_list));
605	}
606}
607
608/*
609 * print DMAR table
610 */
611static void
612dmar_table_print(dmar_table_t *tbl)
613{
614	int i;
615
616	if (dmar_print == B_FALSE) {
617		return;
618	}
619
620	/* print the title */
621	ddi_err(DER_CONT, NULL, "#### Start of dmar_table ####\n");
622	ddi_err(DER_CONT, NULL, "\thaw = %d\n", tbl->tbl_haw);
623	ddi_err(DER_CONT, NULL, "\tintr_remap = %s\n",
624	    tbl->tbl_intrmap == B_TRUE ? "<true>" : "<false>");
625
626	/* print drhd list */
627	for (i = 0; i < IMMU_MAXSEG; i++) {
628		print_drhd_list(&(tbl->tbl_drhd_list[i]));
629	}
630
631
632	/* print rmrr list */
633	for (i = 0; i < IMMU_MAXSEG; i++) {
634		print_rmrr_list(&(tbl->tbl_rmrr_list[i]));
635	}
636
637	ddi_err(DER_CONT, NULL, "#### END of dmar_table ####\n");
638}
639
640static void
641drhd_devi_create(drhd_t *drhd, int unit)
642{
643	struct ddi_parent_private_data *pdptr;
644	struct regspec reg;
645	dev_info_t *dip;
646
647	dip = ddi_add_child(root_devinfo, IMMU_UNIT_NAME,
648	    DEVI_SID_NODEID, unit);
649
650	drhd->dr_dip = dip;
651
652	reg.regspec_bustype = 0;
653	reg.regspec_addr = drhd->dr_regs;
654	reg.regspec_size = IMMU_REGSZ;
655
656	/*
657	 * update the reg properties
658	 *
659	 *   reg property will be used for register
660	 *   set access
661	 *
662	 * refer to the bus_map of root nexus driver
663	 * I/O or memory mapping:
664	 *
665	 * <bustype=0, addr=x, len=x>: memory
666	 * <bustype=1, addr=x, len=x>: i/o
667	 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
668	 */
669	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
670	    dip, "reg", (int *)&reg,
671	    sizeof (struct regspec) / sizeof (int));
672
673	/*
674	 * This is an artificially constructed dev_info, and we
675	 * need to set a few more things to be able to use it
676	 * for ddi_dma_alloc_handle/free_handle.
677	 */
678	ddi_set_driver(dip, ddi_get_driver(ddi_root_node()));
679	DEVI(dip)->devi_bus_dma_allochdl =
680	    DEVI(ddi_get_driver((ddi_root_node())));
681
682	pdptr = kmem_zalloc(sizeof (struct ddi_parent_private_data)
683	    + sizeof (struct regspec), KM_SLEEP);
684	pdptr->par_nreg = 1;
685	pdptr->par_reg = (struct regspec *)(pdptr + 1);
686	pdptr->par_reg->regspec_bustype = 0;
687	pdptr->par_reg->regspec_addr = drhd->dr_regs;
688	pdptr->par_reg->regspec_size = IMMU_REGSZ;
689	ddi_set_parent_data(dip, pdptr);
690}
691
692/*
693 * dmar_devinfos_create()
694 *
695 *   create the dev_info node in the device tree,
696 *   the info node is a nuxus child of the root
697 *   nexus
698 */
699static void
700dmar_devinfos_create(dmar_table_t *tbl)
701{
702	list_t *drhd_list;
703	drhd_t *drhd;
704	int i, unit;
705
706	for (i = 0; i < IMMU_MAXSEG; i++) {
707
708		drhd_list = &(tbl->tbl_drhd_list[i]);
709
710		if (list_is_empty(drhd_list))
711			continue;
712
713		drhd = list_head(drhd_list);
714		for (unit = 0; drhd;
715		    drhd = list_next(drhd_list, drhd), unit++) {
716			drhd_devi_create(drhd, unit);
717		}
718	}
719}
720
721static void
722drhd_devi_destroy(drhd_t *drhd)
723{
724	dev_info_t *dip;
725	int count;
726
727	dip = drhd->dr_dip;
728	ASSERT(dip);
729
730	ndi_devi_enter(root_devinfo, &count);
731	if (ndi_devi_offline(dip, NDI_DEVI_REMOVE) != DDI_SUCCESS) {
732		ddi_err(DER_WARN, dip, "Failed to destroy");
733	}
734	ndi_devi_exit(root_devinfo, count);
735	drhd->dr_dip = NULL;
736}
737
738/*
739 * dmar_devi_destroy()
740 *
741 * destroy dev_info nodes for all drhd units
742 */
743static void
744dmar_devi_destroy(dmar_table_t *tbl)
745{
746	drhd_t *drhd;
747	list_t *drhd_list;
748	int i;
749
750	for (i = 0; i < IMMU_MAXSEG; i++) {
751		drhd_list = &(tbl->tbl_drhd_list[i]);
752		if (list_is_empty(drhd_list))
753			continue;
754
755		drhd = list_head(drhd_list);
756		for (; drhd; drhd = list_next(drhd_list, drhd)) {
757			drhd_devi_destroy(drhd);
758		}
759	}
760}
761
762static int
763match_bdf(dev_info_t *ddip, void *arg)
764{
765	immu_arg_t *imarg = (immu_arg_t *)arg;
766	immu_devi_t *immu_devi;
767
768	ASSERT(ddip);
769	ASSERT(imarg);
770	ASSERT(imarg->ima_seg == 0);
771	ASSERT(imarg->ima_bus >= 0);
772	ASSERT(imarg->ima_devfunc >= 0);
773	ASSERT(imarg->ima_ddip == NULL);
774
775	/* rdip can be NULL */
776
777	mutex_enter(&(DEVI(ddip)->devi_lock));
778
779	immu_devi = IMMU_DEVI(ddip);
780	ASSERT(immu_devi);
781
782	if (immu_devi->imd_seg == imarg->ima_seg &&
783	    immu_devi->imd_bus == imarg->ima_bus &&
784	    immu_devi->imd_devfunc == imarg->ima_devfunc) {
785		imarg->ima_ddip = ddip;
786	}
787
788	mutex_exit(&(DEVI(ddip)->devi_lock));
789
790	return (imarg->ima_ddip ? DDI_WALK_TERMINATE : DDI_WALK_CONTINUE);
791}
792static void
793dmar_table_destroy(dmar_table_t *tbl)
794{
795	int i;
796
797	ASSERT(tbl);
798
799	/* destroy lists for DRHD and RMRR */
800	for (i = 0; i < IMMU_MAXSEG; i++) {
801		rmrr_list_destroy(&(tbl->tbl_rmrr_list[i]));
802		drhd_list_destroy(&(tbl->tbl_drhd_list[i]));
803	}
804
805	/* free strings */
806	kmem_free(tbl->tbl_oem_tblid, TBL_OEM_TBLID_SZ + 1);
807	kmem_free(tbl->tbl_oem_id, TBL_OEM_ID_SZ + 1);
808	tbl->tbl_raw = NULL; /* raw ACPI table doesn't have to be freed */
809	mutex_destroy(&(tbl->tbl_lock));
810	kmem_free(tbl, sizeof (dmar_table_t));
811}
812
813/*
814 * #########################################################################
815 * Functions exported by dmar.c
816 * This file deals with reading and processing the DMAR ACPI table
817 * #########################################################################
818 */
819
820/*
821 * immu_dmar_setup()
822 *	Check if the system has a DMAR ACPI table. If yes, the system
823 *	has Intel IOMMU hardware
824 */
825int
826immu_dmar_setup(void)
827{
828	if (AcpiGetTable("DMAR", 1, (ACPI_TABLE_HEADER **)&dmar_raw) != AE_OK) {
829		ddi_err(DER_LOG, NULL,
830		    "No DMAR ACPI table. No Intel IOMMU present\n");
831		dmar_raw = NULL;
832		return (DDI_FAILURE);
833	}
834	ASSERT(dmar_raw);
835	return (DDI_SUCCESS);
836}
837
838/*
839 * immu_dmar_parse()
840 *  Called by immu.c to parse and convert "raw" ACPI DMAR table
841 */
842int
843immu_dmar_parse(void)
844{
845	dmar_table_t *tbl = NULL;
846
847	/* we should already have found the "raw" table */
848	ASSERT(dmar_raw);
849
850	ddi_err(DER_CONT, NULL, "?Processing DMAR ACPI table\n");
851
852	dmar_table = NULL;
853
854	/*
855	 * parse DMAR ACPI table
856	 */
857	if (dmar_parse(&tbl, dmar_raw) != DDI_SUCCESS) {
858		ASSERT(tbl == NULL);
859		return (DDI_FAILURE);
860	}
861
862	ASSERT(tbl);
863
864	/*
865	 * create one devinfo for every drhd unit
866	 * in the DMAR table
867	 */
868	dmar_devinfos_create(tbl);
869
870	/*
871	 * print the dmar table if the debug option is set
872	 */
873	dmar_table_print(tbl);
874
875	dmar_table = tbl;
876
877	return (DDI_SUCCESS);
878}
879
880void
881immu_dmar_startup(void)
882{
883	/* nothing to do */
884}
885
886void
887immu_dmar_shutdown(void)
888{
889	/* nothing to do */
890}
891
892void
893immu_dmar_destroy(void)
894{
895	dmar_devi_destroy(dmar_table);
896	dmar_table_destroy(dmar_table);
897	ioapic_drhd_destroy();
898	dmar_table = NULL;
899	dmar_raw = NULL;
900}
901
902boolean_t
903immu_dmar_blacklisted(char **strptr, uint_t nstrs)
904{
905	dmar_table_t *tbl = dmar_table;
906	int i;
907	char oem_rev[IMMU_MAXNAMELEN];
908
909	ASSERT(tbl);
910
911	ASSERT((strptr == NULL) ^ (nstrs != 0));
912
913	/*
914	 * Must be a minimum of 4
915	 */
916	if (nstrs < 4) {
917		return (B_FALSE);
918	}
919
920	ddi_err(DER_CONT, NULL, "?System DMAR ACPI table information:\n");
921	ddi_err(DER_CONT, NULL, "?OEM-ID = <%s>\n", tbl->tbl_oem_id);
922	ddi_err(DER_CONT, NULL, "?Table-ID = <%s>\n", tbl->tbl_oem_tblid);
923	(void) snprintf(oem_rev, sizeof (oem_rev), "%d", tbl->tbl_oem_rev);
924	ddi_err(DER_CONT, NULL, "?Revision = <%s>\n", oem_rev);
925
926	for (i = 0; nstrs - i >= 4; i++) {
927		if (strcmp(*strptr++, "DMAR") == 0) {
928			if (strcmp(*strptr++, tbl->tbl_oem_id) == 0 &&
929			    (*strptr[0] == '\0' ||
930			    strcmp(*strptr++, tbl->tbl_oem_tblid) == 0) &&
931			    (*strptr[0] == '\0' ||
932			    strcmp(*strptr++, oem_rev) == 0)) {
933				return (B_TRUE);
934			}
935			i += 3; /* for loops adds 1 as well, so only 3 here */
936		}
937	}
938	return (B_FALSE);
939}
940
941void
942immu_dmar_rmrr_map(void)
943{
944	int seg;
945	int count;
946	dev_info_t *rdip;
947	scope_t *scope;
948	rmrr_t *rmrr;
949	dmar_table_t *tbl;
950
951	ASSERT(dmar_table);
952
953	tbl = dmar_table;
954
955	/* called during boot, when kernel is single threaded. No lock */
956
957	/*
958	 * for each segment, walk the rmrr list looking for an exact match
959	 */
960	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
961		rmrr = list_head(&(tbl->tbl_rmrr_list)[seg]);
962		for (; rmrr; rmrr = list_next(&(tbl->tbl_rmrr_list)[seg],
963		    rmrr)) {
964
965			/*
966			 * try to match BDF *exactly* to a device scope.
967			 */
968			scope = list_head(&(rmrr->rm_scope_list));
969			for (; scope;
970			    scope = list_next(&(rmrr->rm_scope_list), scope)) {
971				immu_arg_t imarg = {0};
972				memrng_t mrng = {0};
973
974				/* PCI endpoint devices only */
975				if (scope->scp_type != DMAR_ENDPOINT)
976					continue;
977
978				imarg.ima_seg = seg;
979				imarg.ima_bus = scope->scp_bus;
980				imarg.ima_devfunc =
981				    IMMU_PCI_DEVFUNC(scope->scp_dev,
982				    scope->scp_func);
983				imarg.ima_ddip = NULL;
984				imarg.ima_rdip = NULL;
985
986				ASSERT(root_devinfo);
987				/* XXX should be optimized */
988				ndi_devi_enter(root_devinfo, &count);
989				ddi_walk_devs(ddi_get_child(root_devinfo),
990				    match_bdf, &imarg);
991				ndi_devi_exit(root_devinfo, count);
992
993				if (imarg.ima_ddip == NULL) {
994					ddi_err(DER_WARN, NULL,
995					    "No dip found for "
996					    "bus=0x%x, dev=0x%x, func= 0x%x",
997					    scope->scp_bus, scope->scp_dev,
998					    scope->scp_func);
999					continue;
1000				}
1001
1002				rdip = imarg.ima_ddip;
1003				/*
1004				 * This address must be in the BIOS reserved
1005				 * map
1006				 */
1007				if (!address_in_memlist(bios_rsvd,
1008				    (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1009				    rmrr->rm_base + 1)) {
1010					ddi_err(DER_WARN, rdip, "RMRR range "
1011					    " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1012					    " not in BIOS reserved map",
1013					    rmrr->rm_base, rmrr->rm_limit);
1014				}
1015
1016				/* XXX could be more efficient */
1017				memlist_read_lock();
1018				if (address_in_memlist(phys_install,
1019				    (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1020				    rmrr->rm_base + 1)) {
1021					ddi_err(DER_WARN, rdip, "RMRR range "
1022					    " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1023					    " is in physinstall map",
1024					    rmrr->rm_base, rmrr->rm_limit);
1025				}
1026				memlist_read_unlock();
1027
1028				(void) immu_dvma_device_setup(rdip, 0);
1029
1030				ddi_err(DER_LOG, rdip,
1031				    "IMMU: Mapping RMRR range "
1032				    "[0x%" PRIx64 " - 0x%"PRIx64 "]",
1033				    rmrr->rm_base, rmrr->rm_limit);
1034
1035				mrng.mrng_start =
1036				    IMMU_ROUNDOWN((uintptr_t)rmrr->rm_base);
1037				mrng.mrng_npages =
1038				    IMMU_ROUNDUP((uintptr_t)rmrr->rm_limit -
1039				    (uintptr_t)rmrr->rm_base + 1) /
1040				    IMMU_PAGESIZE;
1041
1042				(void) immu_map_memrange(rdip, &mrng);
1043			}
1044		}
1045	}
1046
1047}
1048
1049immu_t *
1050immu_dmar_get_immu(dev_info_t *rdip)
1051{
1052	int seg;
1053	int tlevel;
1054	int level;
1055	drhd_t *drhd;
1056	drhd_t *tdrhd;
1057	scope_t *scope;
1058	dmar_table_t *tbl;
1059
1060	ASSERT(dmar_table);
1061
1062	tbl = dmar_table;
1063
1064	mutex_enter(&(tbl->tbl_lock));
1065
1066	/*
1067	 * for each segment, walk the drhd list looking for an exact match
1068	 */
1069	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1070		drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1071		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1072		    drhd)) {
1073
1074			/*
1075			 * we are currently searching for exact matches so
1076			 * skip "include all" (catchall) and subtree matches
1077			 */
1078			if (drhd->dr_include_all == B_TRUE)
1079				continue;
1080
1081			/*
1082			 * try to match BDF *exactly* to a device scope.
1083			 */
1084			scope = list_head(&(drhd->dr_scope_list));
1085			for (; scope;
1086			    scope = list_next(&(drhd->dr_scope_list), scope)) {
1087				immu_arg_t imarg = {0};
1088
1089				/* PCI endpoint devices only */
1090				if (scope->scp_type != DMAR_ENDPOINT)
1091					continue;
1092
1093				imarg.ima_seg = seg;
1094				imarg.ima_bus = scope->scp_bus;
1095				imarg.ima_devfunc =
1096				    IMMU_PCI_DEVFUNC(scope->scp_dev,
1097				    scope->scp_func);
1098				imarg.ima_ddip = NULL;
1099				imarg.ima_rdip = rdip;
1100				level = 0;
1101				if (immu_walk_ancestor(rdip, NULL, match_bdf,
1102				    &imarg, &level, IMMU_FLAGS_DONTPASS)
1103				    != DDI_SUCCESS) {
1104					/* skip - nothing else we can do */
1105					continue;
1106				}
1107
1108				/* Should have walked only 1 level i.e. rdip */
1109				ASSERT(level == 1);
1110
1111				if (imarg.ima_ddip) {
1112					ASSERT(imarg.ima_ddip == rdip);
1113					goto found;
1114				}
1115			}
1116		}
1117	}
1118
1119	/*
1120	 * walk the drhd list looking for subtree match
1121	 * i.e. is the device a descendant of a devscope BDF.
1122	 * We want the lowest subtree.
1123	 */
1124	tdrhd = NULL;
1125	tlevel = 0;
1126	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1127		drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1128		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1129		    drhd)) {
1130
1131			/* looking for subtree match */
1132			if (drhd->dr_include_all == B_TRUE)
1133				continue;
1134
1135			/*
1136			 * try to match the device scope
1137			 */
1138			scope = list_head(&(drhd->dr_scope_list));
1139			for (; scope;
1140			    scope = list_next(&(drhd->dr_scope_list), scope)) {
1141				immu_arg_t imarg = {0};
1142
1143				/* PCI subtree only */
1144				if (scope->scp_type != DMAR_SUBTREE)
1145					continue;
1146
1147				imarg.ima_seg = seg;
1148				imarg.ima_bus = scope->scp_bus;
1149				imarg.ima_devfunc =
1150				    IMMU_PCI_DEVFUNC(scope->scp_dev,
1151				    scope->scp_func);
1152
1153				imarg.ima_ddip = NULL;
1154				imarg.ima_rdip = rdip;
1155				level = 0;
1156				if (immu_walk_ancestor(rdip, NULL, match_bdf,
1157				    &imarg, &level, 0) != DDI_SUCCESS) {
1158					/* skip - nothing else we can do */
1159					continue;
1160				}
1161
1162				/* should have walked 1 level i.e. rdip */
1163				ASSERT(level > 0);
1164
1165				/* look for lowest ancestor matching drhd */
1166				if (imarg.ima_ddip && (tdrhd == NULL ||
1167				    level < tlevel)) {
1168					tdrhd = drhd;
1169					tlevel = level;
1170				}
1171			}
1172		}
1173	}
1174
1175	if ((drhd = tdrhd) != NULL) {
1176		goto found;
1177	}
1178
1179	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1180		drhd = list_head(&(tbl->tbl_drhd_list[seg]));
1181		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1182		    drhd)) {
1183			/* Look for include all */
1184			if (drhd->dr_include_all == B_TRUE) {
1185				break;
1186			}
1187		}
1188	}
1189
1190	/*FALLTHRU*/
1191
1192found:
1193	mutex_exit(&(tbl->tbl_lock));
1194
1195	/*
1196	 * No drhd (dmar unit) found for this device in the ACPI DMAR tables.
1197	 * This may happen with buggy versions of BIOSes. Just warn instead
1198	 * of panic as we don't want whole system to go down because of one
1199	 * device.
1200	 */
1201	if (drhd == NULL) {
1202		ddi_err(DER_WARN, rdip, "can't find Intel IOMMU unit for "
1203		    "device in ACPI DMAR table.");
1204		return (NULL);
1205	}
1206
1207	return (drhd->dr_immu);
1208}
1209
1210dev_info_t *
1211immu_dmar_unit_dip(void *dmar_unit)
1212{
1213	drhd_t *drhd = (drhd_t *)dmar_unit;
1214	return (drhd->dr_dip);
1215}
1216
1217void *
1218immu_dmar_walk_units(int seg, void *dmar_unit)
1219{
1220	list_t *drhd_list;
1221	drhd_t *drhd = (drhd_t *)dmar_unit;
1222
1223	drhd_list = &(dmar_table->tbl_drhd_list[seg]);
1224
1225	if (drhd == NULL) {
1226		return ((void *)list_head(drhd_list));
1227	} else {
1228		return ((void *)list_next(drhd_list, drhd));
1229	}
1230}
1231
1232void
1233immu_dmar_set_immu(void *dmar_unit, immu_t *immu)
1234{
1235	drhd_t *drhd = (drhd_t *)dmar_unit;
1236
1237	ASSERT(drhd);
1238	ASSERT(immu);
1239
1240	drhd->dr_immu = immu;
1241}
1242
1243boolean_t
1244immu_dmar_intrmap_supported(void)
1245{
1246	ASSERT(dmar_table);
1247	return (dmar_table->tbl_intrmap);
1248}
1249
1250/* for a given ioapicid, find the source id and immu */
1251uint16_t
1252immu_dmar_ioapic_sid(int ioapic_ix)
1253{
1254	ioapic_drhd_t *idt;
1255
1256	idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1257	if (idt == NULL) {
1258		ddi_err(DER_PANIC, NULL, "cannot determine source-id for "
1259		    "IOAPIC (index = %d)", ioapic_ix);
1260		/*NOTREACHED*/
1261	}
1262
1263	return (idt->ioapic_sid);
1264}
1265
1266/* for a given ioapicid, find the source id and immu */
1267immu_t *
1268immu_dmar_ioapic_immu(int ioapic_ix)
1269{
1270	ioapic_drhd_t *idt;
1271
1272	idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1273	if (idt) {
1274		return (idt->ioapic_drhd ? idt->ioapic_drhd->dr_immu : NULL);
1275	}
1276	return (NULL);
1277}
1278