xref: /illumos-gate/usr/src/uts/intel/io/iommulib.c (revision 2d6eb4a5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
24  */
25 
26 #include <sys/sunddi.h>
27 #include <sys/sunndi.h>
28 #include <sys/errno.h>
29 #include <sys/modctl.h>
30 #include <sys/iommulib.h>
31 
32 /* ******** Type definitions private to this file  ********************** */
33 
34 /* 1 per IOMMU unit. There may be more than one per dip */
35 typedef struct iommulib_unit {
36 	kmutex_t ilu_lock;
37 	uint64_t ilu_ref;
38 	uint32_t ilu_unitid;
39 	dev_info_t *ilu_dip;
40 	iommulib_ops_t *ilu_ops;
41 	void* ilu_data;
42 	struct iommulib_unit *ilu_next;
43 	struct iommulib_unit *ilu_prev;
44 	iommulib_nexhandle_t ilu_nex;
45 } iommulib_unit_t;
46 
47 typedef struct iommulib_nex {
48 	dev_info_t *nex_dip;
49 	iommulib_nexops_t nex_ops;
50 	struct iommulib_nex *nex_next;
51 	struct iommulib_nex *nex_prev;
52 	uint_t nex_ref;
53 } iommulib_nex_t;
54 
55 /* *********  Globals ************************ */
56 
57 /* For IOMMU drivers */
58 smbios_hdl_t *iommulib_smbios;
59 
60 /* IOMMU side: Following data protected by lock */
61 static kmutex_t iommulib_lock;
62 static iommulib_unit_t   *iommulib_list;
63 static uint64_t iommulib_unit_ids = 0;
64 static uint64_t iommulib_num_units = 0;
65 
66 /* rootnex side data */
67 
68 static kmutex_t iommulib_nexus_lock;
69 static iommulib_nex_t *iommulib_nexus_list;
70 
71 /* can be set atomically without lock */
72 static volatile uint32_t iommulib_fini;
73 
74 /* debug flag */
75 static int iommulib_debug;
76 
77 /*
78  * Module linkage information for the kernel.
79  */
80 static struct modlmisc modlmisc = {
81 	&mod_miscops, "IOMMU library module"
82 };
83 
84 static struct modlinkage modlinkage = {
85 	MODREV_1, (void *)&modlmisc, NULL
86 };
87 
88 int
_init(void)89 _init(void)
90 {
91 	return (mod_install(&modlinkage));
92 }
93 
94 int
_fini(void)95 _fini(void)
96 {
97 	mutex_enter(&iommulib_lock);
98 	if (iommulib_list != NULL || iommulib_nexus_list != NULL) {
99 		mutex_exit(&iommulib_lock);
100 		return (EBUSY);
101 	}
102 	iommulib_fini = 1;
103 
104 	mutex_exit(&iommulib_lock);
105 	return (mod_remove(&modlinkage));
106 }
107 
108 int
_info(struct modinfo * modinfop)109 _info(struct modinfo *modinfop)
110 {
111 	return (mod_info(&modlinkage, modinfop));
112 }
113 
114 /*
115  * Routines with iommulib_iommu_* are invoked from the
116  * IOMMU driver.
117  * Routines with iommulib_nex* are invoked from the
118  * nexus driver (typically rootnex)
119  */
120 
121 int
iommulib_nexus_register(dev_info_t * dip,iommulib_nexops_t * nexops,iommulib_nexhandle_t * handle)122 iommulib_nexus_register(dev_info_t *dip, iommulib_nexops_t *nexops,
123     iommulib_nexhandle_t *handle)
124 {
125 	iommulib_nex_t *nexp;
126 	int instance = ddi_get_instance(dip);
127 	const char *driver = ddi_driver_name(dip);
128 	dev_info_t *pdip = ddi_get_parent(dip);
129 	const char *f = "iommulib_nexus_register";
130 
131 	ASSERT(nexops);
132 	ASSERT(handle);
133 
134 	*handle = NULL;
135 
136 	/*
137 	 * Root node is never busy held
138 	 */
139 	if (dip != ddi_root_node() && (i_ddi_node_state(dip) < DS_PROBED ||
140 	    !DEVI_BUSY_OWNED(pdip))) {
141 		cmn_err(CE_WARN, "%s: NEXUS devinfo node not in DS_PROBED "
142 		    "or busy held for nexops vector (%p). Failing registration",
143 		    f, (void *)nexops);
144 		return (DDI_FAILURE);
145 	}
146 
147 	if (nexops->nops_vers != IOMMU_NEXOPS_VERSION) {
148 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB nexops version "
149 		    "in nexops vector (%p). Failing NEXUS registration",
150 		    f, driver, instance, (void *)nexops);
151 		return (DDI_FAILURE);
152 	}
153 
154 	ASSERT(nexops->nops_data == NULL);
155 
156 	if (nexops->nops_id == NULL) {
157 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
158 		    "Failing registration for nexops vector: %p",
159 		    f, driver, instance, (void *)nexops);
160 		return (DDI_FAILURE);
161 	}
162 
163 	if (nexops->nops_dma_allochdl == NULL) {
164 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_allochdl op. "
165 		    "Failing registration for ops vector: %p", f,
166 		    driver, instance, (void *)nexops);
167 		return (DDI_FAILURE);
168 	}
169 
170 	if (nexops->nops_dma_freehdl == NULL) {
171 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_freehdl op. "
172 		    "Failing registration for ops vector: %p", f,
173 		    driver, instance, (void *)nexops);
174 		return (DDI_FAILURE);
175 	}
176 
177 	if (nexops->nops_dma_bindhdl == NULL) {
178 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_bindhdl op. "
179 		    "Failing registration for ops vector: %p", f,
180 		    driver, instance, (void *)nexops);
181 		return (DDI_FAILURE);
182 	}
183 
184 	if (nexops->nops_dma_sync == NULL) {
185 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_sync op. "
186 		    "Failing registration for ops vector: %p", f,
187 		    driver, instance, (void *)nexops);
188 		return (DDI_FAILURE);
189 	}
190 
191 	if (nexops->nops_dma_reset_cookies == NULL) {
192 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_reset_cookies op. "
193 		    "Failing registration for ops vector: %p", f,
194 		    driver, instance, (void *)nexops);
195 		return (DDI_FAILURE);
196 	}
197 
198 	if (nexops->nops_dma_get_cookies == NULL) {
199 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_cookies op. "
200 		    "Failing registration for ops vector: %p", f,
201 		    driver, instance, (void *)nexops);
202 		return (DDI_FAILURE);
203 	}
204 
205 	if (nexops->nops_dma_set_cookies == NULL) {
206 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_set_cookies op. "
207 		    "Failing registration for ops vector: %p", f,
208 		    driver, instance, (void *)nexops);
209 		return (DDI_FAILURE);
210 	}
211 
212 	if (nexops->nops_dma_clear_cookies == NULL) {
213 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_clear_cookies op. "
214 		    "Failing registration for ops vector: %p", f,
215 		    driver, instance, (void *)nexops);
216 		return (DDI_FAILURE);
217 	}
218 
219 	if (nexops->nops_dma_get_sleep_flags == NULL) {
220 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_get_sleep_flags op. "
221 		    "Failing registration for ops vector: %p", f,
222 		    driver, instance, (void *)nexops);
223 		return (DDI_FAILURE);
224 	}
225 
226 	if (nexops->nops_dma_win == NULL) {
227 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dma_win op. "
228 		    "Failing registration for ops vector: %p", f,
229 		    driver, instance, (void *)nexops);
230 		return (DDI_FAILURE);
231 	}
232 
233 	if (nexops->nops_dmahdl_setprivate == NULL) {
234 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_setprivate op. "
235 		    "Failing registration for ops vector: %p", f,
236 		    driver, instance, (void *)nexops);
237 		return (DDI_FAILURE);
238 	}
239 
240 	if (nexops->nops_dmahdl_getprivate == NULL) {
241 		cmn_err(CE_WARN, "%s: %s%d: NULL nops_dmahdl_getprivate op. "
242 		    "Failing registration for ops vector: %p", f,
243 		    driver, instance, (void *)nexops);
244 		return (DDI_FAILURE);
245 	}
246 
247 	nexp = kmem_zalloc(sizeof (iommulib_nex_t), KM_SLEEP);
248 
249 	mutex_enter(&iommulib_lock);
250 	if (iommulib_fini == 1) {
251 		mutex_exit(&iommulib_lock);
252 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. "
253 		    "Failing NEXUS register.", f);
254 		kmem_free(nexp, sizeof (iommulib_nex_t));
255 		return (DDI_FAILURE);
256 	}
257 
258 	/*
259 	 * fini/register race conditions have been handled. Now create the
260 	 * nexus struct
261 	 */
262 	ndi_hold_devi(dip);
263 	nexp->nex_dip = dip;
264 	nexp->nex_ops = *nexops;
265 
266 	mutex_enter(&iommulib_nexus_lock);
267 	nexp->nex_next = iommulib_nexus_list;
268 	iommulib_nexus_list = nexp;
269 	nexp->nex_prev = NULL;
270 
271 	if (nexp->nex_next != NULL)
272 		nexp->nex_next->nex_prev = nexp;
273 
274 	nexp->nex_ref = 0;
275 
276 	/*
277 	 * The nexus device won't be controlled by an IOMMU.
278 	 */
279 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
280 
281 	DEVI(dip)->devi_iommulib_nex_handle = nexp;
282 
283 	mutex_exit(&iommulib_nexus_lock);
284 	mutex_exit(&iommulib_lock);
285 
286 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered NEXUS %s "
287 	    "nexops=%p", f, driver, instance, ddi_node_name(dip),
288 	    (void *)nexops);
289 
290 	*handle = nexp;
291 
292 	return (DDI_SUCCESS);
293 }
294 
295 int
iommulib_nexus_unregister(iommulib_nexhandle_t handle)296 iommulib_nexus_unregister(iommulib_nexhandle_t handle)
297 {
298 	dev_info_t *dip;
299 	int instance;
300 	const char *driver;
301 	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
302 	const char *f = "iommulib_nexus_unregister";
303 
304 	ASSERT(nexp);
305 
306 	if (nexp->nex_ref != 0)
307 		return (DDI_FAILURE);
308 
309 	mutex_enter(&iommulib_nexus_lock);
310 
311 	dip = nexp->nex_dip;
312 	driver = ddi_driver_name(dip);
313 	instance = ddi_get_instance(dip);
314 
315 	/* A future enhancement would be to add ref-counts */
316 
317 	if (nexp->nex_prev == NULL) {
318 		iommulib_nexus_list = nexp->nex_next;
319 	} else {
320 		nexp->nex_prev->nex_next = nexp->nex_next;
321 	}
322 
323 	if (nexp->nex_next != NULL)
324 		nexp->nex_next->nex_prev = nexp->nex_prev;
325 
326 	mutex_exit(&iommulib_nexus_lock);
327 
328 	kmem_free(nexp, sizeof (iommulib_nex_t));
329 
330 	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
331 	    "unregistered from IOMMULIB", f, driver, instance,
332 	    ddi_node_name(dip));
333 
334 	ndi_rele_devi(dip);
335 
336 	return (DDI_SUCCESS);
337 }
338 
339 int
iommulib_iommu_register(dev_info_t * dip,iommulib_ops_t * ops,iommulib_handle_t * handle)340 iommulib_iommu_register(dev_info_t *dip, iommulib_ops_t *ops,
341     iommulib_handle_t *handle)
342 {
343 	const char *vendor;
344 	iommulib_unit_t *unitp;
345 	int instance = ddi_get_instance(dip);
346 	const char *driver = ddi_driver_name(dip);
347 	const char *f = "iommulib_register";
348 
349 	ASSERT(ops);
350 	ASSERT(handle);
351 
352 	if (ops->ilops_vers != IOMMU_OPS_VERSION) {
353 		cmn_err(CE_WARN, "%s: %s%d: Invalid IOMMULIB ops version "
354 		    "in ops vector (%p). Failing registration", f, driver,
355 		    instance, (void *)ops);
356 		return (DDI_FAILURE);
357 	}
358 
359 	switch (ops->ilops_vendor) {
360 	case AMD_IOMMU:
361 		vendor = "AMD";
362 		break;
363 	case INTEL_IOMMU:
364 		vendor = "Intel";
365 		break;
366 	case INVALID_VENDOR:
367 		cmn_err(CE_WARN, "%s: %s%d: vendor field (%x) not initialized. "
368 		    "Failing registration for ops vector: %p", f,
369 		    driver, instance, ops->ilops_vendor, (void *)ops);
370 		return (DDI_FAILURE);
371 	default:
372 		cmn_err(CE_WARN, "%s: %s%d: Invalid vendor field (%x). "
373 		    "Failing registration for ops vector: %p", f,
374 		    driver, instance, ops->ilops_vendor, (void *)ops);
375 		return (DDI_FAILURE);
376 	}
377 
378 	cmn_err(CE_NOTE, "!%s: %s%d: Detected IOMMU registration from vendor"
379 	    " %s", f, driver, instance, vendor);
380 
381 	if (ops->ilops_data == NULL) {
382 		cmn_err(CE_WARN, "%s: %s%d: NULL IOMMU data field. "
383 		    "Failing registration for ops vector: %p", f,
384 		    driver, instance, (void *)ops);
385 		return (DDI_FAILURE);
386 	}
387 
388 	if (ops->ilops_id == NULL) {
389 		cmn_err(CE_WARN, "%s: %s%d: NULL ID field. "
390 		    "Failing registration for ops vector: %p", f,
391 		    driver, instance, (void *)ops);
392 		return (DDI_FAILURE);
393 	}
394 
395 	if (ops->ilops_probe == NULL) {
396 		cmn_err(CE_WARN, "%s: %s%d: NULL probe op. "
397 		    "Failing registration for ops vector: %p", f,
398 		    driver, instance, (void *)ops);
399 		return (DDI_FAILURE);
400 	}
401 
402 	if (ops->ilops_dma_allochdl == NULL) {
403 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_allochdl op. "
404 		    "Failing registration for ops vector: %p", f,
405 		    driver, instance, (void *)ops);
406 		return (DDI_FAILURE);
407 	}
408 
409 	if (ops->ilops_dma_freehdl == NULL) {
410 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_freehdl op. "
411 		    "Failing registration for ops vector: %p", f,
412 		    driver, instance, (void *)ops);
413 		return (DDI_FAILURE);
414 	}
415 
416 	if (ops->ilops_dma_bindhdl == NULL) {
417 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_bindhdl op. "
418 		    "Failing registration for ops vector: %p", f,
419 		    driver, instance, (void *)ops);
420 		return (DDI_FAILURE);
421 	}
422 
423 	if (ops->ilops_dma_sync == NULL) {
424 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_sync op. "
425 		    "Failing registration for ops vector: %p", f,
426 		    driver, instance, (void *)ops);
427 		return (DDI_FAILURE);
428 	}
429 
430 	if (ops->ilops_dma_win == NULL) {
431 		cmn_err(CE_WARN, "%s: %s%d: NULL dma_win op. "
432 		    "Failing registration for ops vector: %p", f,
433 		    driver, instance, (void *)ops);
434 		return (DDI_FAILURE);
435 	}
436 
437 	unitp = kmem_zalloc(sizeof (iommulib_unit_t), KM_SLEEP);
438 	mutex_enter(&iommulib_lock);
439 	if (iommulib_fini == 1) {
440 		mutex_exit(&iommulib_lock);
441 		cmn_err(CE_WARN, "%s: IOMMULIB unloading. Failing register.",
442 		    f);
443 		kmem_free(unitp, sizeof (iommulib_unit_t));
444 		return (DDI_FAILURE);
445 	}
446 
447 	/*
448 	 * fini/register race conditions have been handled. Now create the
449 	 * IOMMU unit
450 	 */
451 	mutex_init(&unitp->ilu_lock, NULL, MUTEX_DEFAULT, NULL);
452 
453 	mutex_enter(&unitp->ilu_lock);
454 	unitp->ilu_unitid = ++iommulib_unit_ids;
455 	unitp->ilu_ref = 0;
456 	ndi_hold_devi(dip);
457 	unitp->ilu_dip = dip;
458 	unitp->ilu_ops = ops;
459 	unitp->ilu_data = ops->ilops_data;
460 
461 	unitp->ilu_next = iommulib_list;
462 	iommulib_list = unitp;
463 	unitp->ilu_prev = NULL;
464 	if (unitp->ilu_next)
465 		unitp->ilu_next->ilu_prev = unitp;
466 
467 	/*
468 	 * The IOMMU device itself is not controlled by an IOMMU.
469 	 */
470 	DEVI(dip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
471 
472 	mutex_exit(&unitp->ilu_lock);
473 
474 	iommulib_num_units++;
475 
476 	*handle = unitp;
477 
478 	mutex_exit(&iommulib_lock);
479 
480 	cmn_err(CE_NOTE, "!%s: %s%d: Succesfully registered IOMMU unit "
481 	    "from vendor=%s, ops=%p, data=%p, IOMMULIB unitid=%u",
482 	    f, driver, instance, vendor, (void *)ops, (void *)unitp->ilu_data,
483 	    unitp->ilu_unitid);
484 
485 	return (DDI_SUCCESS);
486 }
487 
488 int
iommulib_iommu_unregister(iommulib_handle_t handle)489 iommulib_iommu_unregister(iommulib_handle_t handle)
490 {
491 	uint32_t unitid;
492 	dev_info_t *dip;
493 	int instance;
494 	const char *driver;
495 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
496 	const char *f = "iommulib_unregister";
497 
498 	ASSERT(unitp);
499 
500 	mutex_enter(&iommulib_lock);
501 	mutex_enter(&unitp->ilu_lock);
502 
503 	unitid = unitp->ilu_unitid;
504 	dip = unitp->ilu_dip;
505 	driver = ddi_driver_name(dip);
506 	instance = ddi_get_instance(dip);
507 
508 	if (unitp->ilu_ref != 0) {
509 		mutex_exit(&unitp->ilu_lock);
510 		mutex_exit(&iommulib_lock);
511 		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
512 		    "unregister IOMMULIB unitid %u",
513 		    f, driver, instance, unitid);
514 		return (DDI_FAILURE);
515 	}
516 	unitp->ilu_unitid = 0;
517 	ASSERT(unitp->ilu_ref == 0);
518 
519 	if (unitp->ilu_prev == NULL) {
520 		iommulib_list = unitp->ilu_next;
521 		unitp->ilu_next->ilu_prev = NULL;
522 	} else {
523 		unitp->ilu_prev->ilu_next = unitp->ilu_next;
524 		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
525 	}
526 
527 	iommulib_num_units--;
528 
529 	mutex_exit(&unitp->ilu_lock);
530 
531 	mutex_destroy(&unitp->ilu_lock);
532 	kmem_free(unitp, sizeof (iommulib_unit_t));
533 
534 	mutex_exit(&iommulib_lock);
535 
536 	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
537 	    "unregistered", f, driver, instance, unitid);
538 
539 	ndi_rele_devi(dip);
540 
541 	return (DDI_SUCCESS);
542 }
543 
544 int
iommulib_nex_open(dev_info_t * dip,dev_info_t * rdip)545 iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
546 {
547 	iommulib_unit_t *unitp;
548 	int instance = ddi_get_instance(rdip);
549 	const char *driver = ddi_driver_name(rdip);
550 	const char *f = "iommulib_nex_open";
551 
552 	ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
553 	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);
554 
555 	/* prevent use of IOMMU for AMD IOMMU's DMA */
556 	if (strcmp(driver, "amd_iommu") == 0) {
557 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
558 		return (DDI_ENOTSUP);
559 	}
560 
561 	/*
562 	 * Use the probe entry point to determine in a hardware specific
563 	 * manner whether this dip is controlled by an IOMMU. If yes,
564 	 * return the handle corresponding to the IOMMU unit.
565 	 */
566 
567 	mutex_enter(&iommulib_lock);
568 	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
569 		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
570 			break;
571 	}
572 
573 	if (unitp == NULL) {
574 		mutex_exit(&iommulib_lock);
575 		if (iommulib_debug) {
576 			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
577 			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
578 			    "controlled by an IOMMU: path=%s", f, driver,
579 			    instance, (void *)rdip, ddi_pathname(rdip, buf));
580 			kmem_free(buf, MAXPATHLEN);
581 		}
582 		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
583 		return (DDI_ENOTSUP);
584 	}
585 
586 	mutex_enter(&unitp->ilu_lock);
587 	unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
588 	unitp->ilu_ref++;
589 	DEVI(rdip)->devi_iommulib_handle = unitp;
590 	mutex_exit(&unitp->ilu_lock);
591 	mutex_exit(&iommulib_lock);
592 
593 	atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);
594 
595 	return (DDI_SUCCESS);
596 }
597 
598 void
iommulib_nex_close(dev_info_t * rdip)599 iommulib_nex_close(dev_info_t *rdip)
600 {
601 	iommulib_unit_t *unitp;
602 	const char *driver;
603 	int instance;
604 	uint32_t unitid;
605 	iommulib_nex_t *nexp;
606 	const char *f = "iommulib_nex_close";
607 
608 	ASSERT(IOMMU_USED(rdip));
609 
610 	unitp = DEVI(rdip)->devi_iommulib_handle;
611 
612 	mutex_enter(&iommulib_lock);
613 	mutex_enter(&unitp->ilu_lock);
614 
615 	nexp = (iommulib_nex_t *)unitp->ilu_nex;
616 	DEVI(rdip)->devi_iommulib_handle = NULL;
617 
618 	unitid = unitp->ilu_unitid;
619 	driver = ddi_driver_name(unitp->ilu_dip);
620 	instance = ddi_get_instance(unitp->ilu_dip);
621 
622 	unitp->ilu_ref--;
623 	mutex_exit(&unitp->ilu_lock);
624 	mutex_exit(&iommulib_lock);
625 
626 	atomic_dec_uint(&nexp->nex_ref);
627 
628 	if (iommulib_debug) {
629 		char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
630 		(void) ddi_pathname(rdip, buf);
631 		cmn_err(CE_NOTE, "%s: %s%d: closing IOMMU for dip (%p), "
632 		    "unitid=%u rdip path = %s", f, driver, instance,
633 		    (void *)rdip, unitid, buf);
634 		kmem_free(buf, MAXPATHLEN);
635 	}
636 }
637 
638 int
iommulib_nexdma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)639 iommulib_nexdma_allochdl(dev_info_t *dip, dev_info_t *rdip,
640     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t),
641     caddr_t arg, ddi_dma_handle_t *dma_handlep)
642 {
643 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
644 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
645 
646 	ASSERT(unitp);
647 
648 	/* No need to grab lock - the handle is reference counted */
649 	return (unitp->ilu_ops->ilops_dma_allochdl(handle, dip, rdip,
650 	    attr, waitfp, arg, dma_handlep));
651 }
652 
653 int
iommulib_nexdma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)654 iommulib_nexdma_freehdl(dev_info_t *dip, dev_info_t *rdip,
655     ddi_dma_handle_t dma_handle)
656 {
657 	int error;
658 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
659 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
660 
661 	ASSERT(unitp);
662 
663 	/* No need to grab lock - the handle is reference counted */
664 	error = unitp->ilu_ops->ilops_dma_freehdl(handle, dip,
665 	    rdip, dma_handle);
666 
667 	return (error);
668 }
669 
670 int
iommulib_nexdma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)671 iommulib_nexdma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
672     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
673     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
674 {
675 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
676 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
677 
678 	ASSERT(unitp);
679 
680 	/* No need to grab lock - the handle is reference counted */
681 	return (unitp->ilu_ops->ilops_dma_bindhdl(handle, dip, rdip, dma_handle,
682 	    dmareq, cookiep, ccountp));
683 }
684 
685 int
iommulib_nexdma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)686 iommulib_nexdma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
687     ddi_dma_handle_t dma_handle)
688 {
689 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
690 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
691 
692 	ASSERT(unitp);
693 
694 	/* No need to grab lock - the handle is reference counted */
695 	return (unitp->ilu_ops->ilops_dma_unbindhdl(handle, dip, rdip,
696 	    dma_handle));
697 }
698 
699 int
iommulib_nexdma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cache_flags)700 iommulib_nexdma_sync(dev_info_t *dip, dev_info_t *rdip,
701     ddi_dma_handle_t dma_handle, off_t off, size_t len,
702     uint_t cache_flags)
703 {
704 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
705 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
706 
707 	ASSERT(unitp);
708 
709 	/* No need to grab lock - the handle is reference counted */
710 	return (unitp->ilu_ops->ilops_dma_sync(handle, dip, rdip, dma_handle,
711 	    off, len, cache_flags));
712 }
713 
714 int
iommulib_nexdma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)715 iommulib_nexdma_win(dev_info_t *dip, dev_info_t *rdip,
716     ddi_dma_handle_t dma_handle, uint_t win, off_t *offp, size_t *lenp,
717     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
718 {
719 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
720 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
721 
722 	ASSERT(unitp);
723 
724 	/* No need to grab lock - the handle is reference counted */
725 	return (unitp->ilu_ops->ilops_dma_win(handle, dip, rdip, dma_handle,
726 	    win, offp, lenp, cookiep, ccountp));
727 }
728 
729 int
iommulib_nexdma_mapobject(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)730 iommulib_nexdma_mapobject(dev_info_t *dip, dev_info_t *rdip,
731     ddi_dma_handle_t dma_handle, struct ddi_dma_req *dmareq,
732     ddi_dma_obj_t *dmao)
733 {
734 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
735 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
736 
737 	return (unitp->ilu_ops->ilops_dma_mapobject(handle, dip, rdip,
738 	    dma_handle, dmareq, dmao));
739 }
740 
741 int
iommulib_nexdma_unmapobject(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)742 iommulib_nexdma_unmapobject(dev_info_t *dip, dev_info_t *rdip,
743     ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
744 {
745 	iommulib_handle_t handle = DEVI(rdip)->devi_iommulib_handle;
746 	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
747 
748 	return (unitp->ilu_ops->ilops_dma_unmapobject(handle, dip, rdip,
749 	    dma_handle, dmao));
750 }
751 
752 /* Utility routines invoked by IOMMU drivers */
753 int
iommulib_iommu_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)754 iommulib_iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
755     ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
756     ddi_dma_handle_t *handlep)
757 {
758 	iommulib_nexops_t *nexops;
759 
760 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
761 	return (nexops->nops_dma_allochdl(dip, rdip, attr, waitfp, arg,
762 	    handlep));
763 }
764 
765 int
iommulib_iommu_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)766 iommulib_iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
767     ddi_dma_handle_t handle)
768 {
769 	iommulib_nexops_t *nexops;
770 
771 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
772 	ASSERT(nexops);
773 	return (nexops->nops_dma_freehdl(dip, rdip, handle));
774 }
775 
776 int
iommulib_iommu_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)777 iommulib_iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
778     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
779     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
780 {
781 	iommulib_nexops_t *nexops;
782 
783 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
784 	return (nexops->nops_dma_bindhdl(dip, rdip, handle, dmareq,
785 	    cookiep, ccountp));
786 }
787 
788 int
iommulib_iommu_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)789 iommulib_iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
790     ddi_dma_handle_t handle)
791 {
792 	iommulib_nexops_t *nexops;
793 
794 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
795 	return (nexops->nops_dma_unbindhdl(dip, rdip, handle));
796 }
797 
798 void
iommulib_iommu_dma_reset_cookies(dev_info_t * dip,ddi_dma_handle_t handle)799 iommulib_iommu_dma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
800 {
801 	iommulib_nexops_t *nexops;
802 
803 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
804 	nexops->nops_dma_reset_cookies(dip, handle);
805 }
806 
807 int
iommulib_iommu_dma_get_cookies(dev_info_t * dip,ddi_dma_handle_t handle,ddi_dma_cookie_t ** cookiepp,uint_t * ccountp)808 iommulib_iommu_dma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
809     ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
810 {
811 	iommulib_nexops_t *nexops;
812 
813 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
814 	return (nexops->nops_dma_get_cookies(dip, handle, cookiepp, ccountp));
815 }
816 
817 int
iommulib_iommu_dma_set_cookies(dev_info_t * dip,ddi_dma_handle_t handle,ddi_dma_cookie_t * cookiep,uint_t ccount)818 iommulib_iommu_dma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
819     ddi_dma_cookie_t *cookiep, uint_t ccount)
820 {
821 	iommulib_nexops_t *nexops;
822 
823 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
824 	return (nexops->nops_dma_set_cookies(dip, handle, cookiep, ccount));
825 }
826 
827 int
iommulib_iommu_dma_clear_cookies(dev_info_t * dip,ddi_dma_handle_t handle)828 iommulib_iommu_dma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
829 {
830 	iommulib_nexops_t *nexops;
831 
832 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
833 	return (nexops->nops_dma_clear_cookies(dip, handle));
834 }
835 
836 int
iommulib_iommu_dma_get_sleep_flags(dev_info_t * dip,ddi_dma_handle_t handle)837 iommulib_iommu_dma_get_sleep_flags(dev_info_t *dip, ddi_dma_handle_t handle)
838 {
839 	iommulib_nexops_t *nexops;
840 
841 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
842 	return (nexops->nops_dma_get_sleep_flags(handle));
843 }
844 
845 int
iommulib_iommu_dma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)846 iommulib_iommu_dma_sync(dev_info_t *dip, dev_info_t *rdip,
847     ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags)
848 {
849 	iommulib_nexops_t *nexops;
850 
851 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
852 	return (nexops->nops_dma_sync(dip, rdip, handle, off, len,
853 	    cache_flags));
854 }
855 
856 int
iommulib_iommu_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)857 iommulib_iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
858     ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
859     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
860 {
861 	iommulib_nexops_t *nexops;
862 
863 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
864 	return (nexops->nops_dma_win(dip, rdip, handle, win, offp, lenp,
865 	    cookiep, ccountp));
866 }
867 
868 int
iommulib_iommu_dmahdl_setprivate(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,void * priv)869 iommulib_iommu_dmahdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
870     ddi_dma_handle_t handle, void *priv)
871 {
872 	iommulib_nexops_t *nexops;
873 
874 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
875 	return (nexops->nops_dmahdl_setprivate(dip, rdip, handle, priv));
876 }
877 
878 void *
iommulib_iommu_dmahdl_getprivate(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)879 iommulib_iommu_dmahdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
880     ddi_dma_handle_t handle)
881 {
882 	iommulib_nexops_t *nexops;
883 
884 	nexops = &DEVI(dip)->devi_iommulib_nex_handle->nex_ops;
885 	return (nexops->nops_dmahdl_getprivate(dip, rdip, handle));
886 }
887 
888 int
iommulib_iommu_getunitid(iommulib_handle_t handle,uint64_t * unitidp)889 iommulib_iommu_getunitid(iommulib_handle_t handle, uint64_t *unitidp)
890 {
891 	iommulib_unit_t *unitp;
892 	uint64_t unitid;
893 
894 	unitp = (iommulib_unit_t *)handle;
895 
896 	ASSERT(unitp);
897 	ASSERT(unitidp);
898 
899 	mutex_enter(&unitp->ilu_lock);
900 	unitid = unitp->ilu_unitid;
901 	mutex_exit(&unitp->ilu_lock);
902 
903 	ASSERT(unitid > 0);
904 	*unitidp = (uint64_t)unitid;
905 
906 	return (DDI_SUCCESS);
907 }
908 
909 dev_info_t *
iommulib_iommu_getdip(iommulib_handle_t handle)910 iommulib_iommu_getdip(iommulib_handle_t handle)
911 {
912 	iommulib_unit_t *unitp;
913 	dev_info_t *dip;
914 
915 	unitp = (iommulib_unit_t *)handle;
916 
917 	ASSERT(unitp);
918 
919 	mutex_enter(&unitp->ilu_lock);
920 	dip = unitp->ilu_dip;
921 	ASSERT(dip);
922 	ndi_hold_devi(dip);
923 	mutex_exit(&unitp->ilu_lock);
924 
925 	return (dip);
926 }
927 
928 iommulib_ops_t *
iommulib_iommu_getops(iommulib_handle_t handle)929 iommulib_iommu_getops(iommulib_handle_t handle)
930 {
931 	iommulib_unit_t *unitp;
932 	iommulib_ops_t *ops;
933 
934 	unitp = (iommulib_unit_t *)handle;
935 
936 	ASSERT(unitp);
937 
938 	mutex_enter(&unitp->ilu_lock);
939 	ops = unitp->ilu_ops;
940 	mutex_exit(&unitp->ilu_lock);
941 
942 	ASSERT(ops);
943 
944 	return (ops);
945 }
946 
947 void *
iommulib_iommu_getdata(iommulib_handle_t handle)948 iommulib_iommu_getdata(iommulib_handle_t handle)
949 {
950 	iommulib_unit_t *unitp;
951 	void *data;
952 
953 	unitp = (iommulib_unit_t *)handle;
954 
955 	ASSERT(unitp);
956 
957 	mutex_enter(&unitp->ilu_lock);
958 	data = unitp->ilu_data;
959 	mutex_exit(&unitp->ilu_lock);
960 
961 	ASSERT(data);
962 
963 	return (data);
964 }
965