xref: /illumos-gate/usr/src/uts/common/os/driver_lyr.c (revision 987b2a77)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Layered driver support.
27  */
28 
29 #include <sys/atomic.h>
30 #include <sys/types.h>
31 #include <sys/t_lock.h>
32 #include <sys/param.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/buf.h>
37 #include <sys/cred.h>
38 #include <sys/uio.h>
39 #include <sys/vnode.h>
40 #include <sys/fs/snode.h>
41 #include <sys/open.h>
42 #include <sys/kmem.h>
43 #include <sys/file.h>
44 #include <sys/bootconf.h>
45 #include <sys/pathname.h>
46 #include <sys/bitmap.h>
47 #include <sys/stat.h>
48 #include <sys/dditypes.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/esunddi.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunldi.h>
56 #include <sys/sunldi_impl.h>
57 #include <sys/errno.h>
58 #include <sys/debug.h>
59 #include <sys/modctl.h>
60 #include <sys/var.h>
61 #include <vm/seg_vn.h>
62 
63 #include <sys/stropts.h>
64 #include <sys/strsubr.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/kstr.h>
68 
69 /*
70  * Device contract related
71  */
72 #include <sys/contract_impl.h>
73 #include <sys/contract/device_impl.h>
74 
75 /*
76  * Define macros to manipulate snode, vnode, and open device flags
77  */
78 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
79 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
81 
82 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
84 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85 
86 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
87 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
88 
89 /*
90  * Define macros for accessing layered driver hash structures
91  */
92 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
93 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94 
95 /*
96  * Define layered handle flags used in the lh_type field
97  */
98 #define	LH_STREAM	(0x1)	/* handle to a streams device */
99 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
100 
101 /*
102  * Define macro for devid property lookups
103  */
104 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
105 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106 
107 /*
108  * Dummy string for NDI events
109  */
110 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
111 
112 static void ldi_ev_lock(void);
113 static void ldi_ev_unlock(void);
114 
115 #ifdef	LDI_OBSOLETE_EVENT
116 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 #endif
118 
119 
120 /*
121  * globals
122  */
123 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
124 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
125 
126 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
127 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
128 static size_t			ldi_handle_hash_count;
129 
130 static struct ldi_ev_callback_list ldi_ev_callback_list;
131 
132 static uint32_t ldi_ev_id_pool = 0;
133 
134 struct ldi_ev_cookie {
135 	char *ck_evname;
136 	uint_t ck_sync;
137 	uint_t ck_ctype;
138 };
139 
140 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 	{ NULL}			/* must terminate list */
144 };
145 
146 void
147 ldi_init(void)
148 {
149 	int i;
150 
151 	ldi_handle_hash_count = 0;
152 	for (i = 0; i < LH_HASH_SZ; i++) {
153 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
154 		ldi_handle_hash[i] = NULL;
155 	}
156 	for (i = 0; i < LI_HASH_SZ; i++) {
157 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
158 		ldi_ident_hash[i] = NULL;
159 	}
160 
161 	/*
162 	 * Initialize the LDI event subsystem
163 	 */
164 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
165 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
166 	ldi_ev_callback_list.le_busy = 0;
167 	ldi_ev_callback_list.le_thread = NULL;
168 	list_create(&ldi_ev_callback_list.le_head,
169 	    sizeof (ldi_ev_callback_impl_t),
170 	    offsetof(ldi_ev_callback_impl_t, lec_list));
171 }
172 
173 /*
174  * LDI ident manipulation functions
175  */
176 static uint_t
177 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
178 {
179 	if (dip != NULL) {
180 		uintptr_t k = (uintptr_t)dip;
181 		k >>= (int)highbit(sizeof (struct dev_info));
182 		return ((uint_t)k);
183 	} else if (dev != DDI_DEV_T_NONE) {
184 		return (modid + getminor(dev) + getmajor(dev));
185 	} else {
186 		return (modid);
187 	}
188 }
189 
190 static struct ldi_ident **
191 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
192 {
193 	struct ldi_ident	**lipp = NULL;
194 	uint_t			index = LI_HASH(modid, dip, dev);
195 
196 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
197 
198 	for (lipp = &(ldi_ident_hash[index]);
199 	    (*lipp != NULL);
200 	    lipp = &((*lipp)->li_next)) {
201 		if (((*lipp)->li_modid == modid) &&
202 		    ((*lipp)->li_major == major) &&
203 		    ((*lipp)->li_dip == dip) &&
204 		    ((*lipp)->li_dev == dev))
205 			break;
206 	}
207 
208 	ASSERT(lipp != NULL);
209 	return (lipp);
210 }
211 
212 static struct ldi_ident *
213 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
214 {
215 	struct ldi_ident	*lip, **lipp;
216 	modid_t			modid;
217 	uint_t			index;
218 
219 	ASSERT(mod_name != NULL);
220 
221 	/* get the module id */
222 	modid = mod_name_to_modid(mod_name);
223 	ASSERT(modid != -1);
224 
225 	/* allocate a new ident in case we need it */
226 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
227 
228 	/* search the hash for a matching ident */
229 	index = LI_HASH(modid, dip, dev);
230 	mutex_enter(&ldi_ident_hash_lock[index]);
231 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
232 
233 	if (*lipp != NULL) {
234 		/* we found an ident in the hash */
235 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
236 		(*lipp)->li_ref++;
237 		mutex_exit(&ldi_ident_hash_lock[index]);
238 		kmem_free(lip, sizeof (struct ldi_ident));
239 		return (*lipp);
240 	}
241 
242 	/* initialize the new ident */
243 	lip->li_next = NULL;
244 	lip->li_ref = 1;
245 	lip->li_modid = modid;
246 	lip->li_major = major;
247 	lip->li_dip = dip;
248 	lip->li_dev = dev;
249 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
250 
251 	/* add it to the ident hash */
252 	lip->li_next = ldi_ident_hash[index];
253 	ldi_ident_hash[index] = lip;
254 
255 	mutex_exit(&ldi_ident_hash_lock[index]);
256 	return (lip);
257 }
258 
259 static void
260 ident_hold(struct ldi_ident *lip)
261 {
262 	uint_t			index;
263 
264 	ASSERT(lip != NULL);
265 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
266 	mutex_enter(&ldi_ident_hash_lock[index]);
267 	ASSERT(lip->li_ref > 0);
268 	lip->li_ref++;
269 	mutex_exit(&ldi_ident_hash_lock[index]);
270 }
271 
272 static void
273 ident_release(struct ldi_ident *lip)
274 {
275 	struct ldi_ident	**lipp;
276 	uint_t			index;
277 
278 	ASSERT(lip != NULL);
279 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
280 	mutex_enter(&ldi_ident_hash_lock[index]);
281 
282 	ASSERT(lip->li_ref > 0);
283 	if (--lip->li_ref > 0) {
284 		/* there are more references to this ident */
285 		mutex_exit(&ldi_ident_hash_lock[index]);
286 		return;
287 	}
288 
289 	/* this was the last reference/open for this ident.  free it. */
290 	lipp = ident_find_ref_nolock(
291 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
292 
293 	ASSERT((lipp != NULL) && (*lipp != NULL));
294 	*lipp = lip->li_next;
295 	mutex_exit(&ldi_ident_hash_lock[index]);
296 	kmem_free(lip, sizeof (struct ldi_ident));
297 }
298 
299 /*
300  * LDI handle manipulation functions
301  */
302 static uint_t
303 handle_hash_func(void *vp)
304 {
305 	uintptr_t k = (uintptr_t)vp;
306 	k >>= (int)highbit(sizeof (vnode_t));
307 	return ((uint_t)k);
308 }
309 
310 static struct ldi_handle **
311 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
312 {
313 	struct ldi_handle	**lhpp = NULL;
314 	uint_t			index = LH_HASH(vp);
315 
316 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
317 
318 	for (lhpp = &(ldi_handle_hash[index]);
319 	    (*lhpp != NULL);
320 	    lhpp = &((*lhpp)->lh_next)) {
321 		if (((*lhpp)->lh_ident == ident) &&
322 		    ((*lhpp)->lh_vp == vp))
323 			break;
324 	}
325 
326 	ASSERT(lhpp != NULL);
327 	return (lhpp);
328 }
329 
330 static struct ldi_handle *
331 handle_find(vnode_t *vp, struct ldi_ident *ident)
332 {
333 	struct ldi_handle	**lhpp;
334 	int			index = LH_HASH(vp);
335 
336 	mutex_enter(&ldi_handle_hash_lock[index]);
337 	lhpp = handle_find_ref_nolock(vp, ident);
338 	mutex_exit(&ldi_handle_hash_lock[index]);
339 	ASSERT(lhpp != NULL);
340 	return (*lhpp);
341 }
342 
343 static struct ldi_handle *
344 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
345 {
346 	struct ldi_handle	*lhp, **lhpp;
347 	uint_t			index;
348 
349 	ASSERT((vp != NULL) && (ident != NULL));
350 
351 	/* allocate a new handle in case we need it */
352 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
353 
354 	/* search the hash for a matching handle */
355 	index = LH_HASH(vp);
356 	mutex_enter(&ldi_handle_hash_lock[index]);
357 	lhpp = handle_find_ref_nolock(vp, ident);
358 
359 	if (*lhpp != NULL) {
360 		/* we found a handle in the hash */
361 		(*lhpp)->lh_ref++;
362 		mutex_exit(&ldi_handle_hash_lock[index]);
363 
364 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
365 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
366 		    (void *)*lhpp, (void *)ident, (void *)vp,
367 		    mod_major_to_name(getmajor(vp->v_rdev)),
368 		    getminor(vp->v_rdev)));
369 
370 		kmem_free(lhp, sizeof (struct ldi_handle));
371 		return (*lhpp);
372 	}
373 
374 	/* initialize the new handle */
375 	lhp->lh_ref = 1;
376 	lhp->lh_vp = vp;
377 	lhp->lh_ident = ident;
378 #ifdef	LDI_OBSOLETE_EVENT
379 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
380 #endif
381 
382 	/* set the device type for this handle */
383 	lhp->lh_type = 0;
384 	if (vp->v_stream) {
385 		ASSERT(vp->v_type == VCHR);
386 		lhp->lh_type |= LH_STREAM;
387 	} else {
388 		lhp->lh_type |= LH_CBDEV;
389 	}
390 
391 	/* get holds on other objects */
392 	ident_hold(ident);
393 	ASSERT(vp->v_count >= 1);
394 	VN_HOLD(vp);
395 
396 	/* add it to the handle hash */
397 	lhp->lh_next = ldi_handle_hash[index];
398 	ldi_handle_hash[index] = lhp;
399 	atomic_add_long(&ldi_handle_hash_count, 1);
400 
401 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
402 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
403 	    (void *)lhp, (void *)ident, (void *)vp,
404 	    mod_major_to_name(getmajor(vp->v_rdev)),
405 	    getminor(vp->v_rdev)));
406 
407 	mutex_exit(&ldi_handle_hash_lock[index]);
408 	return (lhp);
409 }
410 
411 static void
412 handle_release(struct ldi_handle *lhp)
413 {
414 	struct ldi_handle	**lhpp;
415 	uint_t			index;
416 
417 	ASSERT(lhp != NULL);
418 
419 	index = LH_HASH(lhp->lh_vp);
420 	mutex_enter(&ldi_handle_hash_lock[index]);
421 
422 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
423 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
424 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
425 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
426 	    getminor(lhp->lh_vp->v_rdev)));
427 
428 	ASSERT(lhp->lh_ref > 0);
429 	if (--lhp->lh_ref > 0) {
430 		/* there are more references to this handle */
431 		mutex_exit(&ldi_handle_hash_lock[index]);
432 		return;
433 	}
434 
435 	/* this was the last reference/open for this handle.  free it. */
436 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
437 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
438 	*lhpp = lhp->lh_next;
439 	atomic_add_long(&ldi_handle_hash_count, -1);
440 	mutex_exit(&ldi_handle_hash_lock[index]);
441 
442 	VN_RELE(lhp->lh_vp);
443 	ident_release(lhp->lh_ident);
444 #ifdef	LDI_OBSOLETE_EVENT
445 	mutex_destroy(lhp->lh_lock);
446 #endif
447 	kmem_free(lhp, sizeof (struct ldi_handle));
448 }
449 
450 #ifdef	LDI_OBSOLETE_EVENT
451 /*
452  * LDI event manipulation functions
453  */
454 static void
455 handle_event_add(ldi_event_t *lep)
456 {
457 	struct ldi_handle *lhp = lep->le_lhp;
458 
459 	ASSERT(lhp != NULL);
460 
461 	mutex_enter(lhp->lh_lock);
462 	if (lhp->lh_events == NULL) {
463 		lhp->lh_events = lep;
464 		mutex_exit(lhp->lh_lock);
465 		return;
466 	}
467 
468 	lep->le_next = lhp->lh_events;
469 	lhp->lh_events->le_prev = lep;
470 	lhp->lh_events = lep;
471 	mutex_exit(lhp->lh_lock);
472 }
473 
474 static void
475 handle_event_remove(ldi_event_t *lep)
476 {
477 	struct ldi_handle *lhp = lep->le_lhp;
478 
479 	ASSERT(lhp != NULL);
480 
481 	mutex_enter(lhp->lh_lock);
482 	if (lep->le_prev)
483 		lep->le_prev->le_next = lep->le_next;
484 	if (lep->le_next)
485 		lep->le_next->le_prev = lep->le_prev;
486 	if (lhp->lh_events == lep)
487 		lhp->lh_events = lep->le_next;
488 	mutex_exit(lhp->lh_lock);
489 
490 }
491 
492 static void
493 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
494     void *arg, void *bus_impldata)
495 {
496 	ldi_event_t *lep = (ldi_event_t *)arg;
497 
498 	ASSERT(lep != NULL);
499 
500 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
501 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
502 	    (void *)dip, (void *)event_cookie, (void *)lep));
503 
504 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
505 }
506 #endif
507 
508 /*
509  * LDI open helper functions
510  */
511 
512 /* get a vnode to a device by dev_t and otyp */
513 static int
514 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
515 {
516 	dev_info_t		*dip;
517 	vnode_t			*vp;
518 
519 	/* sanity check required input parameters */
520 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
521 		return (EINVAL);
522 
523 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
524 		return (ENODEV);
525 
526 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
527 	spec_assoc_vp_with_devi(vp, dip);
528 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
529 
530 	*vpp = vp;
531 	return (0);
532 }
533 
534 /* get a vnode to a device by pathname */
535 int
536 ldi_vp_from_name(char *path, vnode_t **vpp)
537 {
538 	vnode_t			*vp = NULL;
539 	int			ret;
540 
541 	/* sanity check required input parameters */
542 	if ((path == NULL) || (vpp == NULL))
543 		return (EINVAL);
544 
545 	if (modrootloaded) {
546 		cred_t *saved_cred = curthread->t_cred;
547 
548 		/* we don't want lookupname to fail because of credentials */
549 		curthread->t_cred = kcred;
550 
551 		/*
552 		 * all lookups should be done in the global zone.  but
553 		 * lookupnameat() won't actually do this if an absolute
554 		 * path is passed in.  since the ldi interfaces require an
555 		 * absolute path we pass lookupnameat() a pointer to
556 		 * the character after the leading '/' and tell it to
557 		 * start searching at the current system root directory.
558 		 */
559 		ASSERT(*path == '/');
560 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
561 		    &vp, rootdir);
562 
563 		/* restore this threads credentials */
564 		curthread->t_cred = saved_cred;
565 
566 		if (ret == 0) {
567 			if (!vn_matchops(vp, spec_getvnodeops()) ||
568 			    !VTYP_VALID(vp->v_type)) {
569 				VN_RELE(vp);
570 				return (ENXIO);
571 			}
572 		}
573 	}
574 
575 	if (vp == NULL) {
576 		dev_info_t	*dip;
577 		dev_t		dev;
578 		int		spec_type;
579 
580 		/*
581 		 * Root is not mounted, the minor node is not specified,
582 		 * or an OBP path has been specified.
583 		 */
584 
585 		/*
586 		 * Determine if path can be pruned to produce an
587 		 * OBP or devfs path for resolve_pathname.
588 		 */
589 		if (strncmp(path, "/devices/", 9) == 0)
590 			path += strlen("/devices");
591 
592 		/*
593 		 * if no minor node was specified the DEFAULT minor node
594 		 * will be returned.  if there is no DEFAULT minor node
595 		 * one will be fabricated of type S_IFCHR with the minor
596 		 * number equal to the instance number.
597 		 */
598 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
599 		if (ret != 0)
600 			return (ENODEV);
601 
602 		ASSERT(STYP_VALID(spec_type));
603 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
604 		spec_assoc_vp_with_devi(vp, dip);
605 		ddi_release_devi(dip);
606 	}
607 
608 	*vpp = vp;
609 	return (0);
610 }
611 
612 static int
613 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
614 {
615 	char		*devidstr;
616 	ddi_prop_t	*propp;
617 
618 	/* convert devid as a string property */
619 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
620 		return (0);
621 
622 	/*
623 	 * Search for the devid.  For speed and ease in locking this
624 	 * code directly uses the property implementation.  See
625 	 * ddi_common_devid_to_devlist() for a comment as to why.
626 	 */
627 	mutex_enter(&(DEVI(dip)->devi_lock));
628 
629 	/* check if there is a DDI_DEV_T_NONE devid property */
630 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
631 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
632 	if (propp != NULL) {
633 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
634 			/* a DDI_DEV_T_NONE devid exists and matchs */
635 			mutex_exit(&(DEVI(dip)->devi_lock));
636 			ddi_devid_str_free(devidstr);
637 			return (1);
638 		} else {
639 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
640 			mutex_exit(&(DEVI(dip)->devi_lock));
641 			ddi_devid_str_free(devidstr);
642 			return (0);
643 		}
644 	}
645 
646 	/* check if there is a devt specific devid property */
647 	propp = i_ddi_prop_search(dev,
648 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
649 	if (propp != NULL) {
650 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
651 			/* a devt specific devid exists and matchs */
652 			mutex_exit(&(DEVI(dip)->devi_lock));
653 			ddi_devid_str_free(devidstr);
654 			return (1);
655 		} else {
656 			/* a devt specific devid exists and doesn't match */
657 			mutex_exit(&(DEVI(dip)->devi_lock));
658 			ddi_devid_str_free(devidstr);
659 			return (0);
660 		}
661 	}
662 
663 	/* we didn't find any devids associated with the device */
664 	mutex_exit(&(DEVI(dip)->devi_lock));
665 	ddi_devid_str_free(devidstr);
666 	return (0);
667 }
668 
669 /* get a handle to a device by devid and minor name */
670 int
671 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
672 {
673 	dev_info_t		*dip;
674 	vnode_t			*vp;
675 	int			ret, i, ndevs, styp;
676 	dev_t			dev, *devs;
677 
678 	/* sanity check required input parameters */
679 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
680 		return (EINVAL);
681 
682 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
683 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
684 		return (ENODEV);
685 
686 	for (i = 0; i < ndevs; i++) {
687 		dev = devs[i];
688 
689 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
690 			continue;
691 
692 		/*
693 		 * now we have to verify that the devid of the disk
694 		 * still matches what was requested.
695 		 *
696 		 * we have to do this because the devid could have
697 		 * changed between the call to ddi_lyr_devid_to_devlist()
698 		 * and e_ddi_hold_devi_by_dev().  this is because when
699 		 * ddi_lyr_devid_to_devlist() returns a list of devts
700 		 * there is no kind of hold on those devts so a device
701 		 * could have been replaced out from under us in the
702 		 * interim.
703 		 */
704 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
705 		    NULL, &styp) == DDI_SUCCESS) &&
706 		    ldi_devid_match(devid, dip, dev))
707 			break;
708 
709 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
710 	}
711 
712 	ddi_lyr_free_devlist(devs, ndevs);
713 
714 	if (i == ndevs)
715 		return (ENODEV);
716 
717 	ASSERT(STYP_VALID(styp));
718 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
719 	spec_assoc_vp_with_devi(vp, dip);
720 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
721 
722 	*vpp = vp;
723 	return (0);
724 }
725 
726 /* given a vnode, open a device */
727 static int
728 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
729     ldi_handle_t *lhp, struct ldi_ident *li)
730 {
731 	struct ldi_handle	*nlhp;
732 	vnode_t			*vp;
733 	int			err;
734 
735 	ASSERT((vpp != NULL) && (*vpp != NULL));
736 	ASSERT((lhp != NULL) && (li != NULL));
737 
738 	vp = *vpp;
739 	/* if the vnode passed in is not a device, then bail */
740 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
741 		return (ENXIO);
742 
743 	/*
744 	 * the caller may have specified a node that
745 	 * doesn't have cb_ops defined.  the ldi doesn't yet
746 	 * support opening devices without a valid cb_ops.
747 	 */
748 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
749 		return (ENXIO);
750 
751 	/* open the device */
752 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
753 		return (err);
754 
755 	/* possible clone open, make sure that we still have a spec node */
756 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
757 
758 	nlhp = handle_alloc(vp, li);
759 
760 	if (vp != *vpp) {
761 		/*
762 		 * allocating the layered handle took a new hold on the vnode
763 		 * so we can release the hold that was returned by the clone
764 		 * open
765 		 */
766 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
767 		    "ldi clone open", (void *)nlhp));
768 	} else {
769 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
770 		    "ldi open", (void *)nlhp));
771 	}
772 
773 	*vpp = vp;
774 	*lhp = (ldi_handle_t)nlhp;
775 	return (0);
776 }
777 
778 /* Call a drivers prop_op(9E) interface */
779 static int
780 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
781     int flags, char *name, caddr_t valuep, int *lengthp)
782 {
783 	struct dev_ops	*ops = NULL;
784 	int		res;
785 
786 	ASSERT((dip != NULL) && (name != NULL));
787 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
788 	ASSERT(lengthp != NULL);
789 
790 	/*
791 	 * we can only be invoked after a driver has been opened and
792 	 * someone has a layered handle to it, so there had better be
793 	 * a valid ops vector.
794 	 */
795 	ops = DEVI(dip)->devi_ops;
796 	ASSERT(ops && ops->devo_cb_ops);
797 
798 	/*
799 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
800 	 * nulldev or even NULL.
801 	 */
802 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
803 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
804 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
805 		return (DDI_PROP_NOT_FOUND);
806 	}
807 
808 	/* check if this is actually DDI_DEV_T_ANY query */
809 	if (flags & LDI_DEV_T_ANY) {
810 		flags &= ~LDI_DEV_T_ANY;
811 		dev = DDI_DEV_T_ANY;
812 	}
813 
814 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
815 	return (res);
816 }
817 
818 static void
819 i_ldi_prop_op_free(struct prop_driver_data *pdd)
820 {
821 	kmem_free(pdd, pdd->pdd_size);
822 }
823 
824 static caddr_t
825 i_ldi_prop_op_alloc(int prop_len)
826 {
827 	struct prop_driver_data	*pdd;
828 	int			pdd_size;
829 
830 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
831 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
832 	pdd->pdd_size = pdd_size;
833 	pdd->pdd_prop_free = i_ldi_prop_op_free;
834 	return ((caddr_t)&pdd[1]);
835 }
836 
837 /*
838  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
839  * by the typed ldi property lookup interfaces.
840  */
841 static int
842 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
843     caddr_t *datap, int *lengthp, int elem_size)
844 {
845 	caddr_t	prop_val;
846 	int	prop_len, res;
847 
848 	ASSERT((dip != NULL) && (name != NULL));
849 	ASSERT((datap != NULL) && (lengthp != NULL));
850 
851 	/*
852 	 * first call the drivers prop_op() interface to allow it
853 	 * it to override default property values.
854 	 */
855 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
856 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
857 	if (res != DDI_PROP_SUCCESS)
858 		return (DDI_PROP_NOT_FOUND);
859 
860 	/* sanity check the property length */
861 	if (prop_len == 0) {
862 		/*
863 		 * the ddi typed interfaces don't allow a drivers to
864 		 * create properties with a length of 0.  so we should
865 		 * prevent drivers from returning 0 length dynamic
866 		 * properties for typed property lookups.
867 		 */
868 		return (DDI_PROP_NOT_FOUND);
869 	}
870 
871 	/* sanity check the property length against the element size */
872 	if (elem_size && ((prop_len % elem_size) != 0))
873 		return (DDI_PROP_NOT_FOUND);
874 
875 	/*
876 	 * got it.  now allocate a prop_driver_data struct so that the
877 	 * user can free the property via ddi_prop_free().
878 	 */
879 	prop_val = i_ldi_prop_op_alloc(prop_len);
880 
881 	/* lookup the property again, this time get the value */
882 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
883 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
884 	if (res != DDI_PROP_SUCCESS) {
885 		ddi_prop_free(prop_val);
886 		return (DDI_PROP_NOT_FOUND);
887 	}
888 
889 	/* sanity check the property length */
890 	if (prop_len == 0) {
891 		ddi_prop_free(prop_val);
892 		return (DDI_PROP_NOT_FOUND);
893 	}
894 
895 	/* sanity check the property length against the element size */
896 	if (elem_size && ((prop_len % elem_size) != 0)) {
897 		ddi_prop_free(prop_val);
898 		return (DDI_PROP_NOT_FOUND);
899 	}
900 
901 	/*
902 	 * return the prop_driver_data struct and, optionally, the length
903 	 * of the data.
904 	 */
905 	*datap = prop_val;
906 	*lengthp = prop_len;
907 
908 	return (DDI_PROP_SUCCESS);
909 }
910 
911 /*
912  * i_check_string looks at a string property and makes sure its
913  * a valid null terminated string
914  */
915 static int
916 i_check_string(char *str, int prop_len)
917 {
918 	int i;
919 
920 	ASSERT(str != NULL);
921 
922 	for (i = 0; i < prop_len; i++) {
923 		if (str[i] == '\0')
924 			return (0);
925 	}
926 	return (1);
927 }
928 
929 /*
930  * i_pack_string_array takes a a string array property that is represented
931  * as a concatenation of strings (with the NULL character included for
932  * each string) and converts it into a format that can be returned by
933  * ldi_prop_lookup_string_array.
934  */
935 static int
936 i_pack_string_array(char *str_concat, int prop_len,
937     char ***str_arrayp, int *nelemp)
938 {
939 	int i, nelem, pack_size;
940 	char **str_array, *strptr;
941 
942 	/*
943 	 * first we need to sanity check the input string array.
944 	 * in essence this can be done my making sure that the last
945 	 * character of the array passed in is null.  (meaning the last
946 	 * string in the array is NULL terminated.
947 	 */
948 	if (str_concat[prop_len - 1] != '\0')
949 		return (1);
950 
951 	/* now let's count the number of strings in the array */
952 	for (nelem = i = 0; i < prop_len; i++)
953 		if (str_concat[i] == '\0')
954 			nelem++;
955 	ASSERT(nelem >= 1);
956 
957 	/* now let's allocate memory for the new packed property */
958 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
959 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
960 
961 	/* let's copy the actual string data into the new property */
962 	strptr = (char *)&(str_array[nelem + 1]);
963 	bcopy(str_concat, strptr, prop_len);
964 
965 	/* now initialize the string array pointers */
966 	for (i = 0; i < nelem; i++) {
967 		str_array[i] = strptr;
968 		strptr += strlen(strptr) + 1;
969 	}
970 	str_array[nelem] = NULL;
971 
972 	/* set the return values */
973 	*str_arrayp = str_array;
974 	*nelemp = nelem;
975 
976 	return (0);
977 }
978 
979 
980 /*
981  * LDI Project private device usage interfaces
982  */
983 
984 /*
985  * Get a count of how many devices are currentl open by different consumers
986  */
987 int
988 ldi_usage_count()
989 {
990 	return (ldi_handle_hash_count);
991 }
992 
993 static void
994 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
995 {
996 	dev_info_t	*dip;
997 	dev_t		dev;
998 
999 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1000 
1001 	/* get the target devt */
1002 	dev = vp->v_rdev;
1003 
1004 	/* try to get the target dip */
1005 	dip = VTOCS(vp)->s_dip;
1006 	if (dip != NULL) {
1007 		e_ddi_hold_devi(dip);
1008 	} else if (dev != DDI_DEV_T_NONE) {
1009 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1010 	}
1011 
1012 	/* set the target information */
1013 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1014 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1015 	ldi_usage->tgt_devt = dev;
1016 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1017 	ldi_usage->tgt_dip = dip;
1018 }
1019 
1020 
1021 static int
1022 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1023     void *arg, int (*callback)(const ldi_usage_t *, void *))
1024 {
1025 	ldi_usage_t	ldi_usage;
1026 	struct devnames	*dnp;
1027 	dev_info_t	*dip;
1028 	major_t		major;
1029 	dev_t		dev;
1030 	int		ret = LDI_USAGE_CONTINUE;
1031 
1032 	/* set the target device information */
1033 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1034 
1035 	/* get the source devt */
1036 	dev = lip->li_dev;
1037 
1038 	/* try to get the source dip */
1039 	dip = lip->li_dip;
1040 	if (dip != NULL) {
1041 		e_ddi_hold_devi(dip);
1042 	} else if (dev != DDI_DEV_T_NONE) {
1043 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1044 	}
1045 
1046 	/* set the valid source information */
1047 	ldi_usage.src_modid = lip->li_modid;
1048 	ldi_usage.src_name = lip->li_modname;
1049 	ldi_usage.src_devt = dev;
1050 	ldi_usage.src_dip = dip;
1051 
1052 	/*
1053 	 * if the source ident represents either:
1054 	 *
1055 	 * - a kernel module (and not a device or device driver)
1056 	 * - a device node
1057 	 *
1058 	 * then we currently have all the info we need to report the
1059 	 * usage information so invoke the callback function.
1060 	 */
1061 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1062 	    (dip != NULL)) {
1063 		ret = callback(&ldi_usage, arg);
1064 		if (dip != NULL)
1065 			ddi_release_devi(dip);
1066 		if (ldi_usage.tgt_dip != NULL)
1067 			ddi_release_devi(ldi_usage.tgt_dip);
1068 		return (ret);
1069 	}
1070 
1071 	/*
1072 	 * now this is kinda gross.
1073 	 *
1074 	 * what we do here is attempt to associate every device instance
1075 	 * of the source driver on the system with the open target driver.
1076 	 * we do this because we don't know which instance of the device
1077 	 * could potentially access the lower device so we assume that all
1078 	 * the instances could access it.
1079 	 *
1080 	 * there are two ways we could have gotten here:
1081 	 *
1082 	 * 1) this layered ident represents one created using only a
1083 	 *    major number or a driver module name.  this means that when
1084 	 *    it was created we could not associate it with a particular
1085 	 *    dev_t or device instance.
1086 	 *
1087 	 *    when could this possibly happen you ask?
1088 	 *
1089 	 *    a perfect example of this is streams persistent links.
1090 	 *    when a persistant streams link is formed we can't associate
1091 	 *    the lower device stream with any particular upper device
1092 	 *    stream or instance.  this is because any particular upper
1093 	 *    device stream could be closed, then another could be
1094 	 *    opened with a different dev_t and device instance, and it
1095 	 *    would still have access to the lower linked stream.
1096 	 *
1097 	 *    since any instance of the upper streams driver could
1098 	 *    potentially access the lower stream whenever it wants,
1099 	 *    we represent that here by associating the opened lower
1100 	 *    device with every existing device instance of the upper
1101 	 *    streams driver.
1102 	 *
1103 	 * 2) This case should really never happen but we'll include it
1104 	 *    for completeness.
1105 	 *
1106 	 *    it's possible that we could have gotten here because we
1107 	 *    have a dev_t for the upper device but we couldn't find a
1108 	 *    dip associated with that dev_t.
1109 	 *
1110 	 *    the only types of devices that have dev_t without an
1111 	 *    associated dip are unbound DLPIv2 network devices.  These
1112 	 *    types of devices exist to be able to attach a stream to any
1113 	 *    instance of a hardware network device.  since these types of
1114 	 *    devices are usually hardware devices they should never
1115 	 *    really have other devices open.
1116 	 */
1117 	if (dev != DDI_DEV_T_NONE)
1118 		major = getmajor(dev);
1119 	else
1120 		major = lip->li_major;
1121 
1122 	ASSERT((major >= 0) && (major < devcnt));
1123 
1124 	dnp = &devnamesp[major];
1125 	LOCK_DEV_OPS(&dnp->dn_lock);
1126 	dip = dnp->dn_head;
1127 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1128 		e_ddi_hold_devi(dip);
1129 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1130 
1131 		/* set the source dip */
1132 		ldi_usage.src_dip = dip;
1133 
1134 		/* invoke the callback function */
1135 		ret = callback(&ldi_usage, arg);
1136 
1137 		LOCK_DEV_OPS(&dnp->dn_lock);
1138 		ddi_release_devi(dip);
1139 		dip = ddi_get_next(dip);
1140 	}
1141 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1142 
1143 	/* if there was a target dip, release it */
1144 	if (ldi_usage.tgt_dip != NULL)
1145 		ddi_release_devi(ldi_usage.tgt_dip);
1146 
1147 	return (ret);
1148 }
1149 
1150 /*
1151  * ldi_usage_walker() - this walker reports LDI kernel device usage
1152  * information via the callback() callback function.  the LDI keeps track
1153  * of what devices are being accessed in its own internal data structures.
1154  * this function walks those data structures to determine device usage.
1155  */
1156 void
1157 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1158 {
1159 	struct ldi_handle	*lhp;
1160 	struct ldi_ident	*lip;
1161 	vnode_t			*vp;
1162 	int			i;
1163 	int			ret = LDI_USAGE_CONTINUE;
1164 
1165 	for (i = 0; i < LH_HASH_SZ; i++) {
1166 		mutex_enter(&ldi_handle_hash_lock[i]);
1167 
1168 		lhp = ldi_handle_hash[i];
1169 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1170 			lip = lhp->lh_ident;
1171 			vp = lhp->lh_vp;
1172 
1173 			/* invoke the devinfo callback function */
1174 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1175 
1176 			lhp = lhp->lh_next;
1177 		}
1178 		mutex_exit(&ldi_handle_hash_lock[i]);
1179 
1180 		if (ret != LDI_USAGE_CONTINUE)
1181 			break;
1182 	}
1183 }
1184 
1185 /*
1186  * LDI Project private interfaces (streams linking interfaces)
1187  *
1188  * Streams supports a type of built in device layering via linking.
1189  * Certain types of streams drivers can be streams multiplexors.
1190  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1191  * These operations allows other streams devices to be linked under the
1192  * multiplexor.  By definition all streams multiplexors are devices
1193  * so this linking is a type of device layering where the multiplexor
1194  * device is layered on top of the device linked below it.
1195  */
1196 
1197 /*
1198  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1199  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1200  *
1201  * The streams framework keeps track of links via the file_t of the lower
1202  * stream.  The LDI keeps track of devices using a vnode.  In the case
1203  * of a streams link created via an LDI handle, fnk_lh() allocates
1204  * a file_t that the streams framework can use to track the linkage.
1205  */
1206 int
1207 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1208 {
1209 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1210 	vnode_t			*vpdown;
1211 	file_t			*fpdown;
1212 	int			err;
1213 
1214 	if (lhp == NULL)
1215 		return (EINVAL);
1216 
1217 	vpdown = lhp->lh_vp;
1218 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1219 	ASSERT(cmd == _I_PLINK_LH);
1220 
1221 	/*
1222 	 * create a new lower vnode and a file_t that points to it,
1223 	 * streams linking requires a file_t.  falloc() returns with
1224 	 * fpdown locked.
1225 	 */
1226 	VN_HOLD(vpdown);
1227 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1228 	mutex_exit(&fpdown->f_tlock);
1229 
1230 	/* try to establish the link */
1231 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1232 
1233 	if (err != 0) {
1234 		/* the link failed, free the file_t and release the vnode */
1235 		mutex_enter(&fpdown->f_tlock);
1236 		unfalloc(fpdown);
1237 		VN_RELE(vpdown);
1238 	}
1239 
1240 	return (err);
1241 }
1242 
1243 /*
1244  * ldi_mlink_fp() is invoked for all successful streams linkages created
1245  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1246  * in its internal state so that the devinfo snapshot code has some
1247  * observability into streams device linkage information.
1248  */
1249 void
1250 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1251 {
1252 	vnode_t			*vp = fpdown->f_vnode;
1253 	struct snode		*sp, *csp;
1254 	ldi_ident_t		li;
1255 	major_t			major;
1256 	int			ret;
1257 
1258 	/* if the lower stream is not a device then return */
1259 	if (!vn_matchops(vp, spec_getvnodeops()))
1260 		return;
1261 
1262 	ASSERT(!servicing_interrupt());
1263 
1264 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1265 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1266 	    (void *)stp, (void *)fpdown));
1267 
1268 	sp = VTOS(vp);
1269 	csp = VTOS(sp->s_commonvp);
1270 
1271 	/* check if this was a plink via a layered handle */
1272 	if (lhlink) {
1273 		/*
1274 		 * increment the common snode s_count.
1275 		 *
1276 		 * this is done because after the link operation there
1277 		 * are two ways that s_count can be decremented.
1278 		 *
1279 		 * when the layered handle used to create the link is
1280 		 * closed, spec_close() is called and it will decrement
1281 		 * s_count in the common snode.  if we don't increment
1282 		 * s_count here then this could cause spec_close() to
1283 		 * actually close the device while it's still linked
1284 		 * under a multiplexer.
1285 		 *
1286 		 * also, when the lower stream is unlinked, closef() is
1287 		 * called for the file_t associated with this snode.
1288 		 * closef() will call spec_close(), which will decrement
1289 		 * s_count.  if we dont't increment s_count here then this
1290 		 * could cause spec_close() to actually close the device
1291 		 * while there may still be valid layered handles
1292 		 * pointing to it.
1293 		 */
1294 		mutex_enter(&csp->s_lock);
1295 		ASSERT(csp->s_count >= 1);
1296 		csp->s_count++;
1297 		mutex_exit(&csp->s_lock);
1298 
1299 		/*
1300 		 * decrement the f_count.
1301 		 * this is done because the layered driver framework does
1302 		 * not actually cache a copy of the file_t allocated to
1303 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1304 		 * because there is a window in ldi_mlink_lh() between where
1305 		 * milnk_file() returns and we would decrement the f_count
1306 		 * when the stream could be unlinked.
1307 		 */
1308 		mutex_enter(&fpdown->f_tlock);
1309 		fpdown->f_count--;
1310 		mutex_exit(&fpdown->f_tlock);
1311 	}
1312 
1313 	/*
1314 	 * NOTE: here we rely on the streams subsystem not allowing
1315 	 * a stream to be multiplexed more than once.  if this
1316 	 * changes, we break.
1317 	 *
1318 	 * mark the snode/stream as multiplexed
1319 	 */
1320 	mutex_enter(&sp->s_lock);
1321 	ASSERT(!(sp->s_flag & SMUXED));
1322 	sp->s_flag |= SMUXED;
1323 	mutex_exit(&sp->s_lock);
1324 
1325 	/* get a layered ident for the upper stream */
1326 	if (type == LINKNORMAL) {
1327 		/*
1328 		 * if the link is not persistant then we can associate
1329 		 * the upper stream with a dev_t.  this is because the
1330 		 * upper stream is associated with a vnode, which is
1331 		 * associated with a dev_t and this binding can't change
1332 		 * during the life of the stream.  since the link isn't
1333 		 * persistant once the stream is destroyed the link is
1334 		 * destroyed.  so the dev_t will be valid for the life
1335 		 * of the link.
1336 		 */
1337 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1338 	} else {
1339 		/*
1340 		 * if the link is persistant we can only associate the
1341 		 * link with a driver (and not a dev_t.)  this is
1342 		 * because subsequent opens of the upper device may result
1343 		 * in a different stream (and dev_t) having access to
1344 		 * the lower stream.
1345 		 *
1346 		 * for example, if the upper stream is closed after the
1347 		 * persistant link operation is compleated, a subsequent
1348 		 * open of the upper device will create a new stream which
1349 		 * may have a different dev_t and an unlink operation
1350 		 * can be performed using this new upper stream.
1351 		 */
1352 		ASSERT(type == LINKPERSIST);
1353 		major = getmajor(stp->sd_vnode->v_rdev);
1354 		ret = ldi_ident_from_major(major, &li);
1355 	}
1356 
1357 	ASSERT(ret == 0);
1358 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1359 	ldi_ident_release(li);
1360 }
1361 
1362 void
1363 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1364 {
1365 	struct ldi_handle	*lhp;
1366 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1367 	struct snode		*sp;
1368 	ldi_ident_t		li;
1369 	major_t			major;
1370 	int			ret;
1371 
1372 	/* if the lower stream is not a device then return */
1373 	if (!vn_matchops(vp, spec_getvnodeops()))
1374 		return;
1375 
1376 	ASSERT(!servicing_interrupt());
1377 	ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1378 
1379 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1380 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1381 	    (void *)stp, (void *)fpdown));
1382 
1383 	/*
1384 	 * NOTE: here we rely on the streams subsystem not allowing
1385 	 * a stream to be multiplexed more than once.  if this
1386 	 * changes, we break.
1387 	 *
1388 	 * mark the snode/stream as not multiplexed
1389 	 */
1390 	sp = VTOS(vp);
1391 	mutex_enter(&sp->s_lock);
1392 	ASSERT(sp->s_flag & SMUXED);
1393 	sp->s_flag &= ~SMUXED;
1394 	mutex_exit(&sp->s_lock);
1395 
1396 	/*
1397 	 * clear the owner for this snode
1398 	 * see the comment in ldi_mlink_fp() for information about how
1399 	 * the ident is allocated
1400 	 */
1401 	if (type == LINKNORMAL) {
1402 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1403 	} else {
1404 		ASSERT(type == LINKPERSIST);
1405 		major = getmajor(stp->sd_vnode->v_rdev);
1406 		ret = ldi_ident_from_major(major, &li);
1407 	}
1408 
1409 	ASSERT(ret == 0);
1410 	lhp = handle_find(vp, (struct ldi_ident *)li);
1411 	handle_release(lhp);
1412 	ldi_ident_release(li);
1413 }
1414 
1415 /*
1416  * LDI Consolidation private interfaces
1417  */
1418 int
1419 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1420 {
1421 	struct modctl		*modp;
1422 	major_t			major;
1423 	char			*name;
1424 
1425 	if ((modlp == NULL) || (lip == NULL))
1426 		return (EINVAL);
1427 
1428 	ASSERT(!servicing_interrupt());
1429 
1430 	modp = mod_getctl(modlp);
1431 	if (modp == NULL)
1432 		return (EINVAL);
1433 	name = modp->mod_modname;
1434 	if (name == NULL)
1435 		return (EINVAL);
1436 	major = mod_name_to_major(name);
1437 
1438 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1439 
1440 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1441 	    "ldi_ident_from_mod", (void *)*lip, name));
1442 
1443 	return (0);
1444 }
1445 
1446 ldi_ident_t
1447 ldi_ident_from_anon()
1448 {
1449 	ldi_ident_t	lip;
1450 
1451 	ASSERT(!servicing_interrupt());
1452 
1453 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1454 
1455 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1456 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1457 
1458 	return (lip);
1459 }
1460 
1461 
1462 /*
1463  * LDI Public interfaces
1464  */
1465 int
1466 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1467 {
1468 	struct stdata		*stp;
1469 	dev_t			dev;
1470 	char			*name;
1471 
1472 	if ((sq == NULL) || (lip == NULL))
1473 		return (EINVAL);
1474 
1475 	ASSERT(!servicing_interrupt());
1476 
1477 	stp = sq->q_stream;
1478 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1479 		return (EINVAL);
1480 
1481 	dev = stp->sd_vnode->v_rdev;
1482 	name = mod_major_to_name(getmajor(dev));
1483 	if (name == NULL)
1484 		return (EINVAL);
1485 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1486 
1487 	LDI_ALLOCFREE((CE_WARN,
1488 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1489 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1490 	    (void *)stp));
1491 
1492 	return (0);
1493 }
1494 
1495 int
1496 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1497 {
1498 	char			*name;
1499 
1500 	if (lip == NULL)
1501 		return (EINVAL);
1502 
1503 	ASSERT(!servicing_interrupt());
1504 
1505 	name = mod_major_to_name(getmajor(dev));
1506 	if (name == NULL)
1507 		return (EINVAL);
1508 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1509 
1510 	LDI_ALLOCFREE((CE_WARN,
1511 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1512 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1513 
1514 	return (0);
1515 }
1516 
1517 int
1518 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1519 {
1520 	struct dev_info		*devi = (struct dev_info *)dip;
1521 	char			*name;
1522 
1523 	if ((dip == NULL) || (lip == NULL))
1524 		return (EINVAL);
1525 
1526 	ASSERT(!servicing_interrupt());
1527 
1528 	name = mod_major_to_name(devi->devi_major);
1529 	if (name == NULL)
1530 		return (EINVAL);
1531 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1532 
1533 	LDI_ALLOCFREE((CE_WARN,
1534 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1535 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1536 
1537 	return (0);
1538 }
1539 
1540 int
1541 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1542 {
1543 	char			*name;
1544 
1545 	if (lip == NULL)
1546 		return (EINVAL);
1547 
1548 	ASSERT(!servicing_interrupt());
1549 
1550 	name = mod_major_to_name(major);
1551 	if (name == NULL)
1552 		return (EINVAL);
1553 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1554 
1555 	LDI_ALLOCFREE((CE_WARN,
1556 	    "%s: li=0x%p, mod=%s",
1557 	    "ldi_ident_from_major", (void *)*lip, name));
1558 
1559 	return (0);
1560 }
1561 
1562 void
1563 ldi_ident_release(ldi_ident_t li)
1564 {
1565 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1566 	char			*name;
1567 
1568 	if (li == NULL)
1569 		return;
1570 
1571 	ASSERT(!servicing_interrupt());
1572 
1573 	name = ident->li_modname;
1574 
1575 	LDI_ALLOCFREE((CE_WARN,
1576 	    "%s: li=0x%p, mod=%s",
1577 	    "ldi_ident_release", (void *)li, name));
1578 
1579 	ident_release((struct ldi_ident *)li);
1580 }
1581 
1582 /* get a handle to a device by dev_t and otyp */
1583 int
1584 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1585     ldi_handle_t *lhp, ldi_ident_t li)
1586 {
1587 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1588 	int			ret;
1589 	vnode_t			*vp;
1590 
1591 	/* sanity check required input parameters */
1592 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1593 	    (lhp == NULL) || (lip == NULL))
1594 		return (EINVAL);
1595 
1596 	ASSERT(!servicing_interrupt());
1597 
1598 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1599 		return (ret);
1600 
1601 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1602 		*devp = vp->v_rdev;
1603 	}
1604 	VN_RELE(vp);
1605 
1606 	return (ret);
1607 }
1608 
1609 /* get a handle to a device by pathname */
1610 int
1611 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1612     ldi_handle_t *lhp, ldi_ident_t li)
1613 {
1614 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1615 	int			ret;
1616 	vnode_t			*vp;
1617 
1618 	/* sanity check required input parameters */
1619 	if ((pathname == NULL) || (*pathname != '/') ||
1620 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1621 		return (EINVAL);
1622 
1623 	ASSERT(!servicing_interrupt());
1624 
1625 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1626 		return (ret);
1627 
1628 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1629 	VN_RELE(vp);
1630 
1631 	return (ret);
1632 }
1633 
1634 /* get a handle to a device by devid and minor_name */
1635 int
1636 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1637     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1638 {
1639 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1640 	int			ret;
1641 	vnode_t			*vp;
1642 
1643 	/* sanity check required input parameters */
1644 	if ((minor_name == NULL) || (cr == NULL) ||
1645 	    (lhp == NULL) || (lip == NULL))
1646 		return (EINVAL);
1647 
1648 	ASSERT(!servicing_interrupt());
1649 
1650 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1651 		return (ret);
1652 
1653 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1654 	VN_RELE(vp);
1655 
1656 	return (ret);
1657 }
1658 
1659 int
1660 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1661 {
1662 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1663 	struct ldi_event	*lep;
1664 	int			err = 0;
1665 	int			notify = 0;
1666 	list_t			*listp;
1667 	ldi_ev_callback_impl_t	*lecp;
1668 
1669 	if (lh == NULL)
1670 		return (EINVAL);
1671 
1672 	ASSERT(!servicing_interrupt());
1673 
1674 #ifdef	LDI_OBSOLETE_EVENT
1675 
1676 	/*
1677 	 * Any event handlers should have been unregistered by the
1678 	 * time ldi_close() is called.  If they haven't then it's a
1679 	 * bug.
1680 	 *
1681 	 * In a debug kernel we'll panic to make the problem obvious.
1682 	 */
1683 	ASSERT(handlep->lh_events == NULL);
1684 
1685 	/*
1686 	 * On a production kernel we'll "do the right thing" (unregister
1687 	 * the event handlers) and then complain about having to do the
1688 	 * work ourselves.
1689 	 */
1690 	while ((lep = handlep->lh_events) != NULL) {
1691 		err = 1;
1692 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1693 	}
1694 	if (err) {
1695 		struct ldi_ident *lip = handlep->lh_ident;
1696 		ASSERT(lip != NULL);
1697 		cmn_err(CE_NOTE, "ldi err: %s "
1698 		    "failed to unregister layered event handlers before "
1699 		    "closing devices", lip->li_modname);
1700 	}
1701 #endif
1702 
1703 	/* do a layered close on the device */
1704 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1705 
1706 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1707 
1708 	/*
1709 	 * Search the event callback list for callbacks with this
1710 	 * handle. There are 2 cases
1711 	 * 1. Called in the context of a notify. The handle consumer
1712 	 *    is releasing its hold on the device to allow a reconfiguration
1713 	 *    of the device. Simply NULL out the handle and the notify callback.
1714 	 *    The finalize callback is still available so that the consumer
1715 	 *    knows of the final disposition of the device.
1716 	 * 2. Not called in the context of notify. NULL out the handle as well
1717 	 *    as the notify and finalize callbacks. Since the consumer has
1718 	 *    closed the handle, we assume it is not interested in the
1719 	 *    notify and finalize callbacks.
1720 	 */
1721 	ldi_ev_lock();
1722 
1723 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1724 		notify = 1;
1725 	listp = &ldi_ev_callback_list.le_head;
1726 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1727 		if (lecp->lec_lhp != handlep)
1728 			continue;
1729 		lecp->lec_lhp = NULL;
1730 		lecp->lec_notify = NULL;
1731 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1732 		if (!notify) {
1733 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1734 			lecp->lec_finalize = NULL;
1735 		}
1736 	}
1737 
1738 	if (notify)
1739 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1740 	ldi_ev_unlock();
1741 
1742 	/*
1743 	 * Free the handle even if the device close failed.  why?
1744 	 *
1745 	 * If the device close failed we can't really make assumptions
1746 	 * about the devices state so we shouldn't allow access to the
1747 	 * device via this handle any more.  If the device consumer wants
1748 	 * to access the device again they should open it again.
1749 	 *
1750 	 * This is the same way file/device close failures are handled
1751 	 * in other places like spec_close() and closeandsetf().
1752 	 */
1753 	handle_release(handlep);
1754 	return (err);
1755 }
1756 
1757 int
1758 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1759 {
1760 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1761 	vnode_t			*vp;
1762 	dev_t			dev;
1763 	int			ret;
1764 
1765 	if (lh == NULL)
1766 		return (EINVAL);
1767 
1768 	vp = handlep->lh_vp;
1769 	dev = vp->v_rdev;
1770 	if (handlep->lh_type & LH_CBDEV) {
1771 		ret = cdev_read(dev, uiop, credp);
1772 	} else if (handlep->lh_type & LH_STREAM) {
1773 		ret = strread(vp, uiop, credp);
1774 	} else {
1775 		return (ENOTSUP);
1776 	}
1777 	return (ret);
1778 }
1779 
1780 int
1781 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1782 {
1783 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1784 	vnode_t			*vp;
1785 	dev_t			dev;
1786 	int			ret;
1787 
1788 	if (lh == NULL)
1789 		return (EINVAL);
1790 
1791 	vp = handlep->lh_vp;
1792 	dev = vp->v_rdev;
1793 	if (handlep->lh_type & LH_CBDEV) {
1794 		ret = cdev_write(dev, uiop, credp);
1795 	} else if (handlep->lh_type & LH_STREAM) {
1796 		ret = strwrite(vp, uiop, credp);
1797 	} else {
1798 		return (ENOTSUP);
1799 	}
1800 	return (ret);
1801 }
1802 
1803 int
1804 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1805 {
1806 	int			otyp;
1807 	uint_t			value;
1808 	int64_t			drv_prop64;
1809 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1810 	uint_t			blksize;
1811 	int			blkshift;
1812 
1813 
1814 	if ((lh == NULL) || (sizep == NULL))
1815 		return (DDI_FAILURE);
1816 
1817 	if (handlep->lh_type & LH_STREAM)
1818 		return (DDI_FAILURE);
1819 
1820 	/*
1821 	 * Determine device type (char or block).
1822 	 * Character devices support Size/size
1823 	 * property value. Block devices may support
1824 	 * Nblocks/nblocks or Size/size property value.
1825 	 */
1826 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1827 		return (DDI_FAILURE);
1828 
1829 	if (otyp == OTYP_BLK) {
1830 		if (ldi_prop_exists(lh,
1831 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1832 
1833 			drv_prop64 = ldi_prop_get_int64(lh,
1834 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1835 			    "Nblocks", 0);
1836 			blksize = ldi_prop_get_int(lh,
1837 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1838 			    "blksize", DEV_BSIZE);
1839 			if (blksize == DEV_BSIZE)
1840 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1841 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1842 				    "device-blksize", DEV_BSIZE);
1843 
1844 			/* blksize must be a power of two */
1845 			ASSERT(BIT_ONLYONESET(blksize));
1846 			blkshift = highbit(blksize) - 1;
1847 
1848 			/*
1849 			 * We don't support Nblocks values that don't have
1850 			 * an accurate uint64_t byte count representation.
1851 			 */
1852 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1853 				return (DDI_FAILURE);
1854 
1855 			*sizep = (uint64_t)
1856 			    (((u_offset_t)drv_prop64) << blkshift);
1857 			return (DDI_SUCCESS);
1858 		}
1859 
1860 		if (ldi_prop_exists(lh,
1861 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1862 
1863 			value = ldi_prop_get_int(lh,
1864 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1865 			    "nblocks", 0);
1866 			blksize = ldi_prop_get_int(lh,
1867 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1868 			    "blksize", DEV_BSIZE);
1869 			if (blksize == DEV_BSIZE)
1870 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1871 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1872 				    "device-blksize", DEV_BSIZE);
1873 
1874 			/* blksize must be a power of two */
1875 			ASSERT(BIT_ONLYONESET(blksize));
1876 			blkshift = highbit(blksize) - 1;
1877 
1878 			/*
1879 			 * We don't support nblocks values that don't have an
1880 			 * accurate uint64_t byte count representation.
1881 			 */
1882 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1883 				return (DDI_FAILURE);
1884 
1885 			*sizep = (uint64_t)
1886 			    (((u_offset_t)value) << blkshift);
1887 			return (DDI_SUCCESS);
1888 		}
1889 	}
1890 
1891 	if (ldi_prop_exists(lh,
1892 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1893 
1894 		drv_prop64 = ldi_prop_get_int64(lh,
1895 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1896 		*sizep = (uint64_t)drv_prop64;
1897 		return (DDI_SUCCESS);
1898 	}
1899 
1900 	if (ldi_prop_exists(lh,
1901 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1902 
1903 		value = ldi_prop_get_int(lh,
1904 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1905 		*sizep = (uint64_t)value;
1906 		return (DDI_SUCCESS);
1907 	}
1908 
1909 	/* unable to determine device size */
1910 	return (DDI_FAILURE);
1911 }
1912 
1913 int
1914 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1915 	cred_t *cr, int *rvalp)
1916 {
1917 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1918 	vnode_t			*vp;
1919 	dev_t			dev;
1920 	int			ret, copymode, unused;
1921 
1922 	if (lh == NULL)
1923 		return (EINVAL);
1924 
1925 	/*
1926 	 * if the data pointed to by arg is located in the kernel then
1927 	 * make sure the FNATIVE flag is set.
1928 	 */
1929 	if (mode & FKIOCTL)
1930 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1931 
1932 	/*
1933 	 * Some drivers assume that rvalp will always be non-NULL, so in
1934 	 * an attempt to avoid panics if the caller passed in a NULL
1935 	 * value, update rvalp to point to a temporary variable.
1936 	 */
1937 	if (rvalp == NULL)
1938 		rvalp = &unused;
1939 	vp = handlep->lh_vp;
1940 	dev = vp->v_rdev;
1941 	if (handlep->lh_type & LH_CBDEV) {
1942 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1943 	} else if (handlep->lh_type & LH_STREAM) {
1944 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1945 
1946 		/*
1947 		 * if we get an I_PLINK from within the kernel the
1948 		 * arg is a layered handle pointer instead of
1949 		 * a file descriptor, so we translate this ioctl
1950 		 * into a private one that can handle this.
1951 		 */
1952 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1953 			cmd = _I_PLINK_LH;
1954 
1955 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1956 	} else {
1957 		return (ENOTSUP);
1958 	}
1959 
1960 	return (ret);
1961 }
1962 
1963 int
1964 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1965     struct pollhead **phpp)
1966 {
1967 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1968 	vnode_t			*vp;
1969 	dev_t			dev;
1970 	int			ret;
1971 
1972 	if (lh == NULL)
1973 		return (EINVAL);
1974 
1975 	vp = handlep->lh_vp;
1976 	dev = vp->v_rdev;
1977 	if (handlep->lh_type & LH_CBDEV) {
1978 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1979 	} else if (handlep->lh_type & LH_STREAM) {
1980 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1981 	} else {
1982 		return (ENOTSUP);
1983 	}
1984 
1985 	return (ret);
1986 }
1987 
1988 int
1989 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1990 	int flags, char *name, caddr_t valuep, int *length)
1991 {
1992 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1993 	dev_t			dev;
1994 	dev_info_t		*dip;
1995 	int			ret;
1996 	struct snode		*csp;
1997 
1998 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
1999 		return (DDI_PROP_INVAL_ARG);
2000 
2001 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2002 		return (DDI_PROP_INVAL_ARG);
2003 
2004 	if (length == NULL)
2005 		return (DDI_PROP_INVAL_ARG);
2006 
2007 	/*
2008 	 * try to find the associated dip,
2009 	 * this places a hold on the driver
2010 	 */
2011 	dev = handlep->lh_vp->v_rdev;
2012 
2013 	csp = VTOCS(handlep->lh_vp);
2014 	mutex_enter(&csp->s_lock);
2015 	if ((dip = csp->s_dip) != NULL)
2016 		e_ddi_hold_devi(dip);
2017 	mutex_exit(&csp->s_lock);
2018 	if (dip == NULL)
2019 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2020 
2021 	if (dip == NULL)
2022 		return (DDI_PROP_NOT_FOUND);
2023 
2024 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2025 	ddi_release_devi(dip);
2026 
2027 	return (ret);
2028 }
2029 
2030 int
2031 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2032 {
2033 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2034 	dev_t			dev;
2035 
2036 	if ((lh == NULL) || (bp == NULL))
2037 		return (EINVAL);
2038 
2039 	/* this entry point is only supported for cb devices */
2040 	dev = handlep->lh_vp->v_rdev;
2041 	if (!(handlep->lh_type & LH_CBDEV))
2042 		return (ENOTSUP);
2043 
2044 	bp->b_edev = dev;
2045 	bp->b_dev = cmpdev(dev);
2046 	return (bdev_strategy(bp));
2047 }
2048 
2049 int
2050 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2051 {
2052 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2053 	dev_t			dev;
2054 
2055 	if (lh == NULL)
2056 		return (EINVAL);
2057 
2058 	/* this entry point is only supported for cb devices */
2059 	dev = handlep->lh_vp->v_rdev;
2060 	if (!(handlep->lh_type & LH_CBDEV))
2061 		return (ENOTSUP);
2062 
2063 	return (bdev_dump(dev, addr, blkno, nblk));
2064 }
2065 
2066 int
2067 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2068     size_t len, size_t *maplen, uint_t model)
2069 {
2070 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2071 	dev_t			dev;
2072 
2073 	if (lh == NULL)
2074 		return (EINVAL);
2075 
2076 	/* this entry point is only supported for cb devices */
2077 	dev = handlep->lh_vp->v_rdev;
2078 	if (!(handlep->lh_type & LH_CBDEV))
2079 		return (ENOTSUP);
2080 
2081 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2082 }
2083 
2084 int
2085 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2086 {
2087 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2088 	dev_t			dev;
2089 	struct cb_ops		*cb;
2090 
2091 	if (lh == NULL)
2092 		return (EINVAL);
2093 
2094 	/* this entry point is only supported for cb devices */
2095 	if (!(handlep->lh_type & LH_CBDEV))
2096 		return (ENOTSUP);
2097 
2098 	/*
2099 	 * Kaio is only supported on block devices.
2100 	 */
2101 	dev = handlep->lh_vp->v_rdev;
2102 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2103 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2104 		return (ENOTSUP);
2105 
2106 	if (cb->cb_aread == NULL)
2107 		return (ENOTSUP);
2108 
2109 	return (cb->cb_aread(dev, aio_reqp, cr));
2110 }
2111 
2112 int
2113 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2114 {
2115 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2116 	struct cb_ops		*cb;
2117 	dev_t			dev;
2118 
2119 	if (lh == NULL)
2120 		return (EINVAL);
2121 
2122 	/* this entry point is only supported for cb devices */
2123 	if (!(handlep->lh_type & LH_CBDEV))
2124 		return (ENOTSUP);
2125 
2126 	/*
2127 	 * Kaio is only supported on block devices.
2128 	 */
2129 	dev = handlep->lh_vp->v_rdev;
2130 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2131 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2132 		return (ENOTSUP);
2133 
2134 	if (cb->cb_awrite == NULL)
2135 		return (ENOTSUP);
2136 
2137 	return (cb->cb_awrite(dev, aio_reqp, cr));
2138 }
2139 
2140 int
2141 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2142 {
2143 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2144 	int			ret;
2145 
2146 	if ((lh == NULL) || (smp == NULL))
2147 		return (EINVAL);
2148 
2149 	if (!(handlep->lh_type & LH_STREAM)) {
2150 		freemsg(smp);
2151 		return (ENOTSUP);
2152 	}
2153 
2154 	/*
2155 	 * If we don't have db_credp, set it. Note that we can not be called
2156 	 * from interrupt context.
2157 	 */
2158 	if (msg_getcred(smp, NULL) == NULL)
2159 		mblk_setcred(smp, CRED(), curproc->p_pid);
2160 
2161 	/* Send message while honoring flow control */
2162 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2163 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2164 
2165 	return (ret);
2166 }
2167 
2168 int
2169 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2170 {
2171 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2172 	clock_t			timout; /* milliseconds */
2173 	uchar_t			pri;
2174 	rval_t			rval;
2175 	int			ret, pflag;
2176 
2177 
2178 	if (lh == NULL)
2179 		return (EINVAL);
2180 
2181 	if (!(handlep->lh_type & LH_STREAM))
2182 		return (ENOTSUP);
2183 
2184 	/* Convert from nanoseconds to milliseconds */
2185 	if (timeo != NULL) {
2186 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2187 		if (timout > INT_MAX)
2188 			return (EINVAL);
2189 	} else
2190 		timout = -1;
2191 
2192 	/* Wait for timeout millseconds for a message */
2193 	pflag = MSG_ANY;
2194 	pri = 0;
2195 	*rmp = NULL;
2196 	ret = kstrgetmsg(handlep->lh_vp,
2197 	    rmp, NULL, &pri, &pflag, timout, &rval);
2198 	return (ret);
2199 }
2200 
2201 int
2202 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2203 {
2204 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2205 
2206 	if ((lh == NULL) || (devp == NULL))
2207 		return (EINVAL);
2208 
2209 	*devp = handlep->lh_vp->v_rdev;
2210 	return (0);
2211 }
2212 
2213 int
2214 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2215 {
2216 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2217 
2218 	if ((lh == NULL) || (otyp == NULL))
2219 		return (EINVAL);
2220 
2221 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2222 	return (0);
2223 }
2224 
2225 int
2226 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2227 {
2228 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2229 	int			ret;
2230 	dev_t			dev;
2231 
2232 	if ((lh == NULL) || (devid == NULL))
2233 		return (EINVAL);
2234 
2235 	dev = handlep->lh_vp->v_rdev;
2236 
2237 	ret = ddi_lyr_get_devid(dev, devid);
2238 	if (ret != DDI_SUCCESS)
2239 		return (ENOTSUP);
2240 
2241 	return (0);
2242 }
2243 
2244 int
2245 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2246 {
2247 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2248 	int			ret, otyp;
2249 	dev_t			dev;
2250 
2251 	if ((lh == NULL) || (minor_name == NULL))
2252 		return (EINVAL);
2253 
2254 	dev = handlep->lh_vp->v_rdev;
2255 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2256 
2257 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2258 	if (ret != DDI_SUCCESS)
2259 		return (ENOTSUP);
2260 
2261 	return (0);
2262 }
2263 
2264 int
2265 ldi_prop_lookup_int_array(ldi_handle_t lh,
2266     uint_t flags, char *name, int **data, uint_t *nelements)
2267 {
2268 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2269 	dev_info_t		*dip;
2270 	dev_t			dev;
2271 	int			res;
2272 	struct snode		*csp;
2273 
2274 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2275 		return (DDI_PROP_INVAL_ARG);
2276 
2277 	dev = handlep->lh_vp->v_rdev;
2278 
2279 	csp = VTOCS(handlep->lh_vp);
2280 	mutex_enter(&csp->s_lock);
2281 	if ((dip = csp->s_dip) != NULL)
2282 		e_ddi_hold_devi(dip);
2283 	mutex_exit(&csp->s_lock);
2284 	if (dip == NULL)
2285 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2286 
2287 	if (dip == NULL) {
2288 		flags |= DDI_UNBND_DLPI2;
2289 	} else if (flags & LDI_DEV_T_ANY) {
2290 		flags &= ~LDI_DEV_T_ANY;
2291 		dev = DDI_DEV_T_ANY;
2292 	}
2293 
2294 	if (dip != NULL) {
2295 		int *prop_val, prop_len;
2296 
2297 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2298 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2299 
2300 		/* if we got it then return it */
2301 		if (res == DDI_PROP_SUCCESS) {
2302 			*nelements = prop_len / sizeof (int);
2303 			*data = prop_val;
2304 
2305 			ddi_release_devi(dip);
2306 			return (res);
2307 		}
2308 	}
2309 
2310 	/* call the normal property interfaces */
2311 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2312 	    name, data, nelements);
2313 
2314 	if (dip != NULL)
2315 		ddi_release_devi(dip);
2316 
2317 	return (res);
2318 }
2319 
2320 int
2321 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2322     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2323 {
2324 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2325 	dev_info_t		*dip;
2326 	dev_t			dev;
2327 	int			res;
2328 	struct snode		*csp;
2329 
2330 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2331 		return (DDI_PROP_INVAL_ARG);
2332 
2333 	dev = handlep->lh_vp->v_rdev;
2334 
2335 	csp = VTOCS(handlep->lh_vp);
2336 	mutex_enter(&csp->s_lock);
2337 	if ((dip = csp->s_dip) != NULL)
2338 		e_ddi_hold_devi(dip);
2339 	mutex_exit(&csp->s_lock);
2340 	if (dip == NULL)
2341 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2342 
2343 	if (dip == NULL) {
2344 		flags |= DDI_UNBND_DLPI2;
2345 	} else if (flags & LDI_DEV_T_ANY) {
2346 		flags &= ~LDI_DEV_T_ANY;
2347 		dev = DDI_DEV_T_ANY;
2348 	}
2349 
2350 	if (dip != NULL) {
2351 		int64_t	*prop_val;
2352 		int	prop_len;
2353 
2354 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2355 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2356 
2357 		/* if we got it then return it */
2358 		if (res == DDI_PROP_SUCCESS) {
2359 			*nelements = prop_len / sizeof (int64_t);
2360 			*data = prop_val;
2361 
2362 			ddi_release_devi(dip);
2363 			return (res);
2364 		}
2365 	}
2366 
2367 	/* call the normal property interfaces */
2368 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2369 	    name, data, nelements);
2370 
2371 	if (dip != NULL)
2372 		ddi_release_devi(dip);
2373 
2374 	return (res);
2375 }
2376 
2377 int
2378 ldi_prop_lookup_string_array(ldi_handle_t lh,
2379     uint_t flags, char *name, char ***data, uint_t *nelements)
2380 {
2381 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2382 	dev_info_t		*dip;
2383 	dev_t			dev;
2384 	int			res;
2385 	struct snode		*csp;
2386 
2387 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2388 		return (DDI_PROP_INVAL_ARG);
2389 
2390 	dev = handlep->lh_vp->v_rdev;
2391 
2392 	csp = VTOCS(handlep->lh_vp);
2393 	mutex_enter(&csp->s_lock);
2394 	if ((dip = csp->s_dip) != NULL)
2395 		e_ddi_hold_devi(dip);
2396 	mutex_exit(&csp->s_lock);
2397 	if (dip == NULL)
2398 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2399 
2400 	if (dip == NULL) {
2401 		flags |= DDI_UNBND_DLPI2;
2402 	} else if (flags & LDI_DEV_T_ANY) {
2403 		flags &= ~LDI_DEV_T_ANY;
2404 		dev = DDI_DEV_T_ANY;
2405 	}
2406 
2407 	if (dip != NULL) {
2408 		char	*prop_val;
2409 		int	prop_len;
2410 
2411 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2412 		    (caddr_t *)&prop_val, &prop_len, 0);
2413 
2414 		/* if we got it then return it */
2415 		if (res == DDI_PROP_SUCCESS) {
2416 			char	**str_array;
2417 			int	nelem;
2418 
2419 			/*
2420 			 * pack the returned string array into the format
2421 			 * our callers expect
2422 			 */
2423 			if (i_pack_string_array(prop_val, prop_len,
2424 			    &str_array, &nelem) == 0) {
2425 
2426 				*data = str_array;
2427 				*nelements = nelem;
2428 
2429 				ddi_prop_free(prop_val);
2430 				ddi_release_devi(dip);
2431 				return (res);
2432 			}
2433 
2434 			/*
2435 			 * the format of the returned property must have
2436 			 * been bad so throw it out
2437 			 */
2438 			ddi_prop_free(prop_val);
2439 		}
2440 	}
2441 
2442 	/* call the normal property interfaces */
2443 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2444 	    name, data, nelements);
2445 
2446 	if (dip != NULL)
2447 		ddi_release_devi(dip);
2448 
2449 	return (res);
2450 }
2451 
2452 int
2453 ldi_prop_lookup_string(ldi_handle_t lh,
2454     uint_t flags, char *name, char **data)
2455 {
2456 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2457 	dev_info_t		*dip;
2458 	dev_t			dev;
2459 	int			res;
2460 	struct snode		*csp;
2461 
2462 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2463 		return (DDI_PROP_INVAL_ARG);
2464 
2465 	dev = handlep->lh_vp->v_rdev;
2466 
2467 	csp = VTOCS(handlep->lh_vp);
2468 	mutex_enter(&csp->s_lock);
2469 	if ((dip = csp->s_dip) != NULL)
2470 		e_ddi_hold_devi(dip);
2471 	mutex_exit(&csp->s_lock);
2472 	if (dip == NULL)
2473 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2474 
2475 	if (dip == NULL) {
2476 		flags |= DDI_UNBND_DLPI2;
2477 	} else if (flags & LDI_DEV_T_ANY) {
2478 		flags &= ~LDI_DEV_T_ANY;
2479 		dev = DDI_DEV_T_ANY;
2480 	}
2481 
2482 	if (dip != NULL) {
2483 		char	*prop_val;
2484 		int	prop_len;
2485 
2486 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2487 		    (caddr_t *)&prop_val, &prop_len, 0);
2488 
2489 		/* if we got it then return it */
2490 		if (res == DDI_PROP_SUCCESS) {
2491 			/*
2492 			 * sanity check the vaule returned.
2493 			 */
2494 			if (i_check_string(prop_val, prop_len)) {
2495 				ddi_prop_free(prop_val);
2496 			} else {
2497 				*data = prop_val;
2498 				ddi_release_devi(dip);
2499 				return (res);
2500 			}
2501 		}
2502 	}
2503 
2504 	/* call the normal property interfaces */
2505 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2506 
2507 	if (dip != NULL)
2508 		ddi_release_devi(dip);
2509 
2510 #ifdef DEBUG
2511 	if (res == DDI_PROP_SUCCESS) {
2512 		/*
2513 		 * keep ourselves honest
2514 		 * make sure the framework returns strings in the
2515 		 * same format as we're demanding from drivers.
2516 		 */
2517 		struct prop_driver_data	*pdd;
2518 		int			pdd_prop_size;
2519 
2520 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2521 		pdd_prop_size = pdd->pdd_size -
2522 		    sizeof (struct prop_driver_data);
2523 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2524 	}
2525 #endif /* DEBUG */
2526 
2527 	return (res);
2528 }
2529 
2530 int
2531 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2532     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2533 {
2534 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2535 	dev_info_t		*dip;
2536 	dev_t			dev;
2537 	int			res;
2538 	struct snode		*csp;
2539 
2540 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2541 		return (DDI_PROP_INVAL_ARG);
2542 
2543 	dev = handlep->lh_vp->v_rdev;
2544 
2545 	csp = VTOCS(handlep->lh_vp);
2546 	mutex_enter(&csp->s_lock);
2547 	if ((dip = csp->s_dip) != NULL)
2548 		e_ddi_hold_devi(dip);
2549 	mutex_exit(&csp->s_lock);
2550 	if (dip == NULL)
2551 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2552 
2553 	if (dip == NULL) {
2554 		flags |= DDI_UNBND_DLPI2;
2555 	} else if (flags & LDI_DEV_T_ANY) {
2556 		flags &= ~LDI_DEV_T_ANY;
2557 		dev = DDI_DEV_T_ANY;
2558 	}
2559 
2560 	if (dip != NULL) {
2561 		uchar_t	*prop_val;
2562 		int	prop_len;
2563 
2564 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2565 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2566 
2567 		/* if we got it then return it */
2568 		if (res == DDI_PROP_SUCCESS) {
2569 			*nelements = prop_len / sizeof (uchar_t);
2570 			*data = prop_val;
2571 
2572 			ddi_release_devi(dip);
2573 			return (res);
2574 		}
2575 	}
2576 
2577 	/* call the normal property interfaces */
2578 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2579 	    name, data, nelements);
2580 
2581 	if (dip != NULL)
2582 		ddi_release_devi(dip);
2583 
2584 	return (res);
2585 }
2586 
2587 int
2588 ldi_prop_get_int(ldi_handle_t lh,
2589     uint_t flags, char *name, int defvalue)
2590 {
2591 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2592 	dev_info_t		*dip;
2593 	dev_t			dev;
2594 	int			res;
2595 	struct snode		*csp;
2596 
2597 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2598 		return (defvalue);
2599 
2600 	dev = handlep->lh_vp->v_rdev;
2601 
2602 	csp = VTOCS(handlep->lh_vp);
2603 	mutex_enter(&csp->s_lock);
2604 	if ((dip = csp->s_dip) != NULL)
2605 		e_ddi_hold_devi(dip);
2606 	mutex_exit(&csp->s_lock);
2607 	if (dip == NULL)
2608 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2609 
2610 	if (dip == NULL) {
2611 		flags |= DDI_UNBND_DLPI2;
2612 	} else if (flags & LDI_DEV_T_ANY) {
2613 		flags &= ~LDI_DEV_T_ANY;
2614 		dev = DDI_DEV_T_ANY;
2615 	}
2616 
2617 	if (dip != NULL) {
2618 		int	prop_val;
2619 		int	prop_len;
2620 
2621 		/*
2622 		 * first call the drivers prop_op interface to allow it
2623 		 * it to override default property values.
2624 		 */
2625 		prop_len = sizeof (int);
2626 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2627 		    flags | DDI_PROP_DYNAMIC, name,
2628 		    (caddr_t)&prop_val, &prop_len);
2629 
2630 		/* if we got it then return it */
2631 		if ((res == DDI_PROP_SUCCESS) &&
2632 		    (prop_len == sizeof (int))) {
2633 			res = prop_val;
2634 			ddi_release_devi(dip);
2635 			return (res);
2636 		}
2637 	}
2638 
2639 	/* call the normal property interfaces */
2640 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2641 
2642 	if (dip != NULL)
2643 		ddi_release_devi(dip);
2644 
2645 	return (res);
2646 }
2647 
2648 int64_t
2649 ldi_prop_get_int64(ldi_handle_t lh,
2650     uint_t flags, char *name, int64_t defvalue)
2651 {
2652 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2653 	dev_info_t		*dip;
2654 	dev_t			dev;
2655 	int64_t			res;
2656 	struct snode		*csp;
2657 
2658 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2659 		return (defvalue);
2660 
2661 	dev = handlep->lh_vp->v_rdev;
2662 
2663 	csp = VTOCS(handlep->lh_vp);
2664 	mutex_enter(&csp->s_lock);
2665 	if ((dip = csp->s_dip) != NULL)
2666 		e_ddi_hold_devi(dip);
2667 	mutex_exit(&csp->s_lock);
2668 	if (dip == NULL)
2669 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2670 
2671 	if (dip == NULL) {
2672 		flags |= DDI_UNBND_DLPI2;
2673 	} else if (flags & LDI_DEV_T_ANY) {
2674 		flags &= ~LDI_DEV_T_ANY;
2675 		dev = DDI_DEV_T_ANY;
2676 	}
2677 
2678 	if (dip != NULL) {
2679 		int64_t	prop_val;
2680 		int	prop_len;
2681 
2682 		/*
2683 		 * first call the drivers prop_op interface to allow it
2684 		 * it to override default property values.
2685 		 */
2686 		prop_len = sizeof (int64_t);
2687 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2688 		    flags | DDI_PROP_DYNAMIC, name,
2689 		    (caddr_t)&prop_val, &prop_len);
2690 
2691 		/* if we got it then return it */
2692 		if ((res == DDI_PROP_SUCCESS) &&
2693 		    (prop_len == sizeof (int64_t))) {
2694 			res = prop_val;
2695 			ddi_release_devi(dip);
2696 			return (res);
2697 		}
2698 	}
2699 
2700 	/* call the normal property interfaces */
2701 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2702 
2703 	if (dip != NULL)
2704 		ddi_release_devi(dip);
2705 
2706 	return (res);
2707 }
2708 
2709 int
2710 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2711 {
2712 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2713 	dev_info_t		*dip;
2714 	dev_t			dev;
2715 	int			res, prop_len;
2716 	struct snode		*csp;
2717 
2718 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2719 		return (0);
2720 
2721 	dev = handlep->lh_vp->v_rdev;
2722 
2723 	csp = VTOCS(handlep->lh_vp);
2724 	mutex_enter(&csp->s_lock);
2725 	if ((dip = csp->s_dip) != NULL)
2726 		e_ddi_hold_devi(dip);
2727 	mutex_exit(&csp->s_lock);
2728 	if (dip == NULL)
2729 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2730 
2731 	/* if NULL dip, prop does NOT exist */
2732 	if (dip == NULL)
2733 		return (0);
2734 
2735 	if (flags & LDI_DEV_T_ANY) {
2736 		flags &= ~LDI_DEV_T_ANY;
2737 		dev = DDI_DEV_T_ANY;
2738 	}
2739 
2740 	/*
2741 	 * first call the drivers prop_op interface to allow it
2742 	 * it to override default property values.
2743 	 */
2744 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2745 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2746 
2747 	if (res == DDI_PROP_SUCCESS) {
2748 		ddi_release_devi(dip);
2749 		return (1);
2750 	}
2751 
2752 	/* call the normal property interfaces */
2753 	res = ddi_prop_exists(dev, dip, flags, name);
2754 
2755 	ddi_release_devi(dip);
2756 	return (res);
2757 }
2758 
2759 #ifdef	LDI_OBSOLETE_EVENT
2760 
2761 int
2762 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2763 {
2764 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2765 	dev_info_t		*dip;
2766 	dev_t			dev;
2767 	int			res;
2768 	struct snode		*csp;
2769 
2770 	if ((lh == NULL) || (name == NULL) ||
2771 	    (strlen(name) == 0) || (ecp == NULL)) {
2772 		return (DDI_FAILURE);
2773 	}
2774 
2775 	ASSERT(!servicing_interrupt());
2776 
2777 	dev = handlep->lh_vp->v_rdev;
2778 
2779 	csp = VTOCS(handlep->lh_vp);
2780 	mutex_enter(&csp->s_lock);
2781 	if ((dip = csp->s_dip) != NULL)
2782 		e_ddi_hold_devi(dip);
2783 	mutex_exit(&csp->s_lock);
2784 	if (dip == NULL)
2785 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2786 
2787 	if (dip == NULL)
2788 		return (DDI_FAILURE);
2789 
2790 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2791 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2792 	    name, (void *)dip, (void *)ecp));
2793 
2794 	res = ddi_get_eventcookie(dip, name, ecp);
2795 
2796 	ddi_release_devi(dip);
2797 	return (res);
2798 }
2799 
2800 int
2801 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2802     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2803     void *arg, ldi_callback_id_t *id)
2804 {
2805 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2806 	struct ldi_event	*lep;
2807 	dev_info_t		*dip;
2808 	dev_t			dev;
2809 	int			res;
2810 	struct snode		*csp;
2811 
2812 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2813 		return (DDI_FAILURE);
2814 
2815 	ASSERT(!servicing_interrupt());
2816 
2817 	dev = handlep->lh_vp->v_rdev;
2818 
2819 	csp = VTOCS(handlep->lh_vp);
2820 	mutex_enter(&csp->s_lock);
2821 	if ((dip = csp->s_dip) != NULL)
2822 		e_ddi_hold_devi(dip);
2823 	mutex_exit(&csp->s_lock);
2824 	if (dip == NULL)
2825 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2826 
2827 	if (dip == NULL)
2828 		return (DDI_FAILURE);
2829 
2830 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2831 	lep->le_lhp = handlep;
2832 	lep->le_arg = arg;
2833 	lep->le_handler = handler;
2834 
2835 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2836 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2837 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2838 		    "event callback", "ldi_add_event_handler"));
2839 		ddi_release_devi(dip);
2840 		kmem_free(lep, sizeof (struct ldi_event));
2841 		return (res);
2842 	}
2843 
2844 	*id = (ldi_callback_id_t)lep;
2845 
2846 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2847 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2848 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2849 
2850 	handle_event_add(lep);
2851 	ddi_release_devi(dip);
2852 	return (res);
2853 }
2854 
2855 int
2856 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2857 {
2858 	ldi_event_t		*lep = (ldi_event_t *)id;
2859 	int			res;
2860 
2861 	if ((lh == NULL) || (id == NULL))
2862 		return (DDI_FAILURE);
2863 
2864 	ASSERT(!servicing_interrupt());
2865 
2866 	if ((res = ddi_remove_event_handler(lep->le_id))
2867 	    != DDI_SUCCESS) {
2868 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2869 		    "event callback", "ldi_remove_event_handler"));
2870 		return (res);
2871 	}
2872 
2873 	handle_event_remove(lep);
2874 	kmem_free(lep, sizeof (struct ldi_event));
2875 	return (res);
2876 }
2877 
2878 #endif
2879 
2880 /*
2881  * Here are some definitions of terms used in the following LDI events
2882  * code:
2883  *
2884  * "LDI events" AKA "native events": These are events defined by the
2885  * "new" LDI event framework. These events are serviced by the LDI event
2886  * framework itself and thus are native to it.
2887  *
2888  * "LDI contract events": These are contract events that correspond to the
2889  *  LDI events. This mapping of LDI events to contract events is defined by
2890  * the ldi_ev_cookies[] array above.
2891  *
2892  * NDI events: These are events which are serviced by the NDI event subsystem.
2893  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2894  * These events are therefore *not* native events.
2895  */
2896 
2897 static int
2898 ldi_native_event(const char *evname)
2899 {
2900 	int i;
2901 
2902 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2903 
2904 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2905 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2906 			return (1);
2907 	}
2908 
2909 	return (0);
2910 }
2911 
2912 static uint_t
2913 ldi_ev_sync_event(const char *evname)
2914 {
2915 	int i;
2916 
2917 	ASSERT(ldi_native_event(evname));
2918 
2919 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2920 
2921 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2922 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2923 			return (ldi_ev_cookies[i].ck_sync);
2924 	}
2925 
2926 	/*
2927 	 * This should never happen until non-contract based
2928 	 * LDI events are introduced. If that happens, we will
2929 	 * use a "special" token to indicate that there are no
2930 	 * contracts corresponding to this LDI event.
2931 	 */
2932 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2933 
2934 	return (0);
2935 }
2936 
2937 static uint_t
2938 ldi_contract_event(const char *evname)
2939 {
2940 	int i;
2941 
2942 	ASSERT(ldi_native_event(evname));
2943 
2944 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2945 
2946 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2947 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2948 			return (ldi_ev_cookies[i].ck_ctype);
2949 	}
2950 
2951 	/*
2952 	 * This should never happen until non-contract based
2953 	 * LDI events are introduced. If that happens, we will
2954 	 * use a "special" token to indicate that there are no
2955 	 * contracts corresponding to this LDI event.
2956 	 */
2957 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2958 
2959 	return (0);
2960 }
2961 
2962 char *
2963 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2964 {
2965 	int i;
2966 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2967 
2968 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2969 		if (&ldi_ev_cookies[i] == cookie_impl) {
2970 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2971 			    ldi_ev_cookies[i].ck_evname));
2972 			return (ldi_ev_cookies[i].ck_evname);
2973 		}
2974 	}
2975 
2976 	/*
2977 	 * Not an LDI native event. Must be NDI event service.
2978 	 * Just return a generic string
2979 	 */
2980 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2981 	return (NDI_EVENT_SERVICE);
2982 }
2983 
2984 static int
2985 ldi_native_cookie(ldi_ev_cookie_t cookie)
2986 {
2987 	int i;
2988 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2989 
2990 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2991 		if (&ldi_ev_cookies[i] == cookie_impl) {
2992 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2993 			return (1);
2994 		}
2995 	}
2996 
2997 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
2998 	return (0);
2999 }
3000 
3001 static ldi_ev_cookie_t
3002 ldi_get_native_cookie(const char *evname)
3003 {
3004 	int i;
3005 
3006 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3007 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3008 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3009 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3010 		}
3011 	}
3012 
3013 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3014 	return (NULL);
3015 }
3016 
3017 /*
3018  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3019  * other LDI interfaces (such as ldi_close() from within the context of
3020  * a notify callback. Since the notify callback is called with the
3021  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3022  * to be recursive.
3023  */
3024 static void
3025 ldi_ev_lock(void)
3026 {
3027 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3028 
3029 	mutex_enter(&ldi_ev_callback_list.le_lock);
3030 	if (ldi_ev_callback_list.le_thread == curthread) {
3031 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3032 		ldi_ev_callback_list.le_busy++;
3033 	} else {
3034 		while (ldi_ev_callback_list.le_busy)
3035 			cv_wait(&ldi_ev_callback_list.le_cv,
3036 			    &ldi_ev_callback_list.le_lock);
3037 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3038 		ldi_ev_callback_list.le_busy = 1;
3039 		ldi_ev_callback_list.le_thread = curthread;
3040 	}
3041 	mutex_exit(&ldi_ev_callback_list.le_lock);
3042 
3043 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3044 }
3045 
3046 static void
3047 ldi_ev_unlock(void)
3048 {
3049 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3050 	mutex_enter(&ldi_ev_callback_list.le_lock);
3051 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3052 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3053 
3054 	ldi_ev_callback_list.le_busy--;
3055 	if (ldi_ev_callback_list.le_busy == 0) {
3056 		ldi_ev_callback_list.le_thread = NULL;
3057 		cv_signal(&ldi_ev_callback_list.le_cv);
3058 	}
3059 	mutex_exit(&ldi_ev_callback_list.le_lock);
3060 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3061 }
3062 
3063 int
3064 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3065 {
3066 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3067 	dev_info_t		*dip;
3068 	dev_t			dev;
3069 	int			res;
3070 	struct snode		*csp;
3071 	ddi_eventcookie_t	ddi_cookie;
3072 	ldi_ev_cookie_t		tcookie;
3073 
3074 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3075 	    evname ? evname : "<NULL>"));
3076 
3077 	if (lh == NULL || evname == NULL ||
3078 	    strlen(evname) == 0 || cookiep == NULL) {
3079 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3080 		return (LDI_EV_FAILURE);
3081 	}
3082 
3083 	*cookiep = NULL;
3084 
3085 	/*
3086 	 * First check if it is a LDI native event
3087 	 */
3088 	tcookie = ldi_get_native_cookie(evname);
3089 	if (tcookie) {
3090 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3091 		*cookiep = tcookie;
3092 		return (LDI_EV_SUCCESS);
3093 	}
3094 
3095 	/*
3096 	 * Not a LDI native event. Try NDI event services
3097 	 */
3098 
3099 	dev = handlep->lh_vp->v_rdev;
3100 
3101 	csp = VTOCS(handlep->lh_vp);
3102 	mutex_enter(&csp->s_lock);
3103 	if ((dip = csp->s_dip) != NULL)
3104 		e_ddi_hold_devi(dip);
3105 	mutex_exit(&csp->s_lock);
3106 	if (dip == NULL)
3107 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3108 
3109 	if (dip == NULL) {
3110 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3111 		    "handle: %p", (void *)handlep);
3112 		return (LDI_EV_FAILURE);
3113 	}
3114 
3115 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3116 	    (void *)dip, evname));
3117 
3118 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3119 
3120 	ddi_release_devi(dip);
3121 
3122 	if (res == DDI_SUCCESS) {
3123 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3124 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3125 		return (LDI_EV_SUCCESS);
3126 	} else {
3127 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3128 		return (LDI_EV_FAILURE);
3129 	}
3130 }
3131 
3132 /*ARGSUSED*/
3133 static void
3134 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3135     void *arg, void *ev_data)
3136 {
3137 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3138 
3139 	ASSERT(lecp != NULL);
3140 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3141 	ASSERT(lecp->lec_lhp);
3142 	ASSERT(lecp->lec_notify == NULL);
3143 	ASSERT(lecp->lec_finalize);
3144 
3145 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3146 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3147 	    (void *)lecp->lec_arg, (void *)ev_data));
3148 
3149 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3150 	    lecp->lec_arg, ev_data);
3151 }
3152 
3153 int
3154 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3155     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3156 {
3157 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3158 	ldi_ev_callback_impl_t	*lecp;
3159 	dev_t			dev;
3160 	struct snode		*csp;
3161 	dev_info_t		*dip;
3162 	int			ddi_event;
3163 
3164 	ASSERT(!servicing_interrupt());
3165 
3166 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3167 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3168 		return (LDI_EV_FAILURE);
3169 	}
3170 
3171 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3172 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3173 		return (LDI_EV_FAILURE);
3174 	}
3175 
3176 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3177 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3178 		return (LDI_EV_FAILURE);
3179 	}
3180 
3181 	*id = 0;
3182 
3183 	dev = lhp->lh_vp->v_rdev;
3184 	csp = VTOCS(lhp->lh_vp);
3185 	mutex_enter(&csp->s_lock);
3186 	if ((dip = csp->s_dip) != NULL)
3187 		e_ddi_hold_devi(dip);
3188 	mutex_exit(&csp->s_lock);
3189 	if (dip == NULL)
3190 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3191 
3192 	if (dip == NULL) {
3193 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3194 		    "LDI handle: %p", (void *)lhp);
3195 		return (LDI_EV_FAILURE);
3196 	}
3197 
3198 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3199 
3200 	ddi_event = 0;
3201 	if (!ldi_native_cookie(cookie)) {
3202 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3203 			/*
3204 			 * NDI event services only accept finalize
3205 			 */
3206 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3207 			    "Only finalize"
3208 			    " callback supported with this cookie",
3209 			    "ldi_ev_register_callbacks",
3210 			    lhp->lh_ident->li_modname);
3211 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3212 			ddi_release_devi(dip);
3213 			return (LDI_EV_FAILURE);
3214 		}
3215 
3216 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3217 		    i_ldi_ev_callback, (void *)lecp,
3218 		    (ddi_callback_id_t *)&lecp->lec_id)
3219 		    != DDI_SUCCESS) {
3220 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3221 			ddi_release_devi(dip);
3222 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3223 			    "ddi_add_event_handler failed"));
3224 			return (LDI_EV_FAILURE);
3225 		}
3226 		ddi_event = 1;
3227 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3228 		    "ddi_add_event_handler success"));
3229 	}
3230 
3231 
3232 
3233 	ldi_ev_lock();
3234 
3235 	/*
3236 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3237 	 */
3238 	lecp->lec_lhp = lhp;
3239 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3240 	lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3241 	lecp->lec_notify = callb->cb_notify;
3242 	lecp->lec_finalize = callb->cb_finalize;
3243 	lecp->lec_arg = arg;
3244 	lecp->lec_cookie = cookie;
3245 	if (!ddi_event)
3246 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3247 	else
3248 		ASSERT(lecp->lec_id);
3249 	lecp->lec_dip = dip;
3250 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3251 
3252 	*id = (ldi_callback_id_t)lecp->lec_id;
3253 
3254 	ldi_ev_unlock();
3255 
3256 	ddi_release_devi(dip);
3257 
3258 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3259 	    "notify/finalize"));
3260 
3261 	return (LDI_EV_SUCCESS);
3262 }
3263 
3264 static int
3265 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3266     dev_t dev, int spec_type)
3267 {
3268 	ASSERT(lecp);
3269 	ASSERT(dip);
3270 	ASSERT(dev != DDI_DEV_T_NONE);
3271 	ASSERT(dev != NODEV);
3272 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3273 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3274 	ASSERT(lecp->lec_dip);
3275 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3276 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3277 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3278 	ASSERT(lecp->lec_dev != NODEV);
3279 
3280 	if (dip != lecp->lec_dip)
3281 		return (0);
3282 
3283 	if (dev != DDI_DEV_T_ANY) {
3284 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3285 			return (0);
3286 	}
3287 
3288 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3289 
3290 	return (1);
3291 }
3292 
3293 /*
3294  * LDI framework function to post a "notify" event to all layered drivers
3295  * that have registered for that event
3296  *
3297  * Returns:
3298  *		LDI_EV_SUCCESS - registered callbacks allow event
3299  *		LDI_EV_FAILURE - registered callbacks block event
3300  *		LDI_EV_NONE    - No matching LDI callbacks
3301  *
3302  * This function is *not* to be called by layered drivers. It is for I/O
3303  * framework code in Solaris, such as the I/O retire code and DR code
3304  * to call while servicing a device event such as offline or degraded.
3305  */
3306 int
3307 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3308     void *ev_data)
3309 {
3310 	ldi_ev_callback_impl_t *lecp;
3311 	list_t	*listp;
3312 	int	ret;
3313 	char	*lec_event;
3314 
3315 	ASSERT(dip);
3316 	ASSERT(dev != DDI_DEV_T_NONE);
3317 	ASSERT(dev != NODEV);
3318 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3319 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3320 	ASSERT(event);
3321 	ASSERT(ldi_native_event(event));
3322 	ASSERT(ldi_ev_sync_event(event));
3323 
3324 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3325 	    (void *)dip, event));
3326 
3327 	ret = LDI_EV_NONE;
3328 	ldi_ev_lock();
3329 	listp = &ldi_ev_callback_list.le_head;
3330 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3331 
3332 		/* Check if matching device */
3333 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3334 			continue;
3335 
3336 		if (lecp->lec_lhp == NULL) {
3337 			/*
3338 			 * Consumer has unregistered the handle and so
3339 			 * is no longer interested in notify events.
3340 			 */
3341 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3342 			    "handle, skipping"));
3343 			continue;
3344 		}
3345 
3346 		if (lecp->lec_notify == NULL) {
3347 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3348 			    "callback. skipping"));
3349 			continue;	/* not interested in notify */
3350 		}
3351 
3352 		/*
3353 		 * Check if matching event
3354 		 */
3355 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3356 		if (strcmp(event, lec_event) != 0) {
3357 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3358 			    " event {%s,%s}. skipping", event, lec_event));
3359 			continue;
3360 		}
3361 
3362 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3363 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3364 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3365 			ret = LDI_EV_FAILURE;
3366 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3367 			    " FAILURE"));
3368 			break;
3369 		}
3370 
3371 		/* We have a matching callback that allows the event to occur */
3372 		ret = LDI_EV_SUCCESS;
3373 
3374 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3375 	}
3376 
3377 	if (ret != LDI_EV_FAILURE)
3378 		goto out;
3379 
3380 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3381 
3382 	/*
3383 	 * Undo notifies already sent
3384 	 */
3385 	lecp = list_prev(listp, lecp);
3386 	for (; lecp; lecp = list_prev(listp, lecp)) {
3387 
3388 		/*
3389 		 * Check if matching device
3390 		 */
3391 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3392 			continue;
3393 
3394 
3395 		if (lecp->lec_finalize == NULL) {
3396 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3397 			    "skipping"));
3398 			continue;	/* not interested in finalize */
3399 		}
3400 
3401 		/*
3402 		 * it is possible that in response to a notify event a
3403 		 * layered driver closed its LDI handle so it is ok
3404 		 * to have a NULL LDI handle for finalize. The layered
3405 		 * driver is expected to maintain state in its "arg"
3406 		 * parameter to keep track of the closed device.
3407 		 */
3408 
3409 		/* Check if matching event */
3410 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3411 		if (strcmp(event, lec_event) != 0) {
3412 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3413 			    "event: %s,%s, skipping", event, lec_event));
3414 			continue;
3415 		}
3416 
3417 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3418 
3419 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3420 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3421 
3422 		/*
3423 		 * If LDI native event and LDI handle closed in context
3424 		 * of notify, NULL out the finalize callback as we have
3425 		 * already called the 1 finalize above allowed in this situation
3426 		 */
3427 		if (lecp->lec_lhp == NULL &&
3428 		    ldi_native_cookie(lecp->lec_cookie)) {
3429 			LDI_EVDBG((CE_NOTE,
3430 			    "ldi_invoke_notify(): NULL-ing finalize after "
3431 			    "calling 1 finalize following ldi_close"));
3432 			lecp->lec_finalize = NULL;
3433 		}
3434 	}
3435 
3436 out:
3437 	ldi_ev_unlock();
3438 
3439 	if (ret == LDI_EV_NONE) {
3440 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3441 		    "LDI callbacks"));
3442 	}
3443 
3444 	return (ret);
3445 }
3446 
3447 /*
3448  * Framework function to be called from a layered driver to propagate
3449  * LDI "notify" events to exported minors.
3450  *
3451  * This function is a public interface exported by the LDI framework
3452  * for use by layered drivers to propagate device events up the software
3453  * stack.
3454  */
3455 int
3456 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3457     ldi_ev_cookie_t cookie, void *ev_data)
3458 {
3459 	char		*evname = ldi_ev_get_type(cookie);
3460 	uint_t		ct_evtype;
3461 	dev_t		dev;
3462 	major_t		major;
3463 	int		retc;
3464 	int		retl;
3465 
3466 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3467 	ASSERT(dip);
3468 	ASSERT(ldi_native_cookie(cookie));
3469 
3470 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3471 	    evname, (void *)dip));
3472 
3473 	if (!ldi_ev_sync_event(evname)) {
3474 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3475 		    "negotiatable event", evname);
3476 		return (LDI_EV_SUCCESS);
3477 	}
3478 
3479 	major = ddi_driver_major(dip);
3480 	if (major == DDI_MAJOR_T_NONE) {
3481 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3482 		(void) ddi_pathname(dip, path);
3483 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3484 		    "for device %s", path);
3485 		kmem_free(path, MAXPATHLEN);
3486 		return (LDI_EV_FAILURE);
3487 	}
3488 	dev = makedevice(major, minor);
3489 
3490 	/*
3491 	 * Generate negotiation contract events on contracts (if any) associated
3492 	 * with this minor.
3493 	 */
3494 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3495 	ct_evtype = ldi_contract_event(evname);
3496 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3497 	if (retc == CT_NACK) {
3498 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3499 		return (LDI_EV_FAILURE);
3500 	}
3501 
3502 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3503 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3504 	if (retl == LDI_EV_FAILURE) {
3505 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3506 		    "returned FAILURE. Calling contract negend"));
3507 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3508 		return (LDI_EV_FAILURE);
3509 	}
3510 
3511 	/*
3512 	 * The very fact that we are here indicates that there is a
3513 	 * LDI callback (and hence a constraint) for the retire of the
3514 	 * HW device. So we just return success even if there are no
3515 	 * contracts or LDI callbacks against the minors layered on top
3516 	 * of the HW minors
3517 	 */
3518 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3519 	return (LDI_EV_SUCCESS);
3520 }
3521 
3522 /*
3523  * LDI framework function to invoke "finalize" callbacks for all layered
3524  * drivers that have registered callbacks for that event.
3525  *
3526  * This function is *not* to be called by layered drivers. It is for I/O
3527  * framework code in Solaris, such as the I/O retire code and DR code
3528  * to call while servicing a device event such as offline or degraded.
3529  */
3530 void
3531 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3532     int ldi_result, void *ev_data)
3533 {
3534 	ldi_ev_callback_impl_t *lecp;
3535 	list_t	*listp;
3536 	char	*lec_event;
3537 	int	found = 0;
3538 
3539 	ASSERT(dip);
3540 	ASSERT(dev != DDI_DEV_T_NONE);
3541 	ASSERT(dev != NODEV);
3542 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3543 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3544 	ASSERT(event);
3545 	ASSERT(ldi_native_event(event));
3546 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3547 
3548 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3549 	    " event=%s", (void *)dip, ldi_result, event));
3550 
3551 	ldi_ev_lock();
3552 	listp = &ldi_ev_callback_list.le_head;
3553 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3554 
3555 		if (lecp->lec_finalize == NULL) {
3556 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3557 			    "finalize. Skipping"));
3558 			continue;	/* Not interested in finalize */
3559 		}
3560 
3561 		/*
3562 		 * Check if matching device
3563 		 */
3564 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3565 			continue;
3566 
3567 		/*
3568 		 * It is valid for the LDI handle to be NULL during finalize.
3569 		 * The layered driver may have done an LDI close in the notify
3570 		 * callback.
3571 		 */
3572 
3573 		/*
3574 		 * Check if matching event
3575 		 */
3576 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3577 		if (strcmp(event, lec_event) != 0) {
3578 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3579 			    "matching event {%s,%s}. Skipping",
3580 			    event, lec_event));
3581 			continue;
3582 		}
3583 
3584 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3585 
3586 		found = 1;
3587 
3588 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3589 		    ldi_result, lecp->lec_arg, ev_data);
3590 
3591 		/*
3592 		 * If LDI native event and LDI handle closed in context
3593 		 * of notify, NULL out the finalize callback as we have
3594 		 * already called the 1 finalize above allowed in this situation
3595 		 */
3596 		if (lecp->lec_lhp == NULL &&
3597 		    ldi_native_cookie(lecp->lec_cookie)) {
3598 			LDI_EVDBG((CE_NOTE,
3599 			    "ldi_invoke_finalize(): NULLing finalize after "
3600 			    "calling 1 finalize following ldi_close"));
3601 			lecp->lec_finalize = NULL;
3602 		}
3603 	}
3604 	ldi_ev_unlock();
3605 
3606 	if (found)
3607 		return;
3608 
3609 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3610 }
3611 
3612 /*
3613  * Framework function to be called from a layered driver to propagate
3614  * LDI "finalize" events to exported minors.
3615  *
3616  * This function is a public interface exported by the LDI framework
3617  * for use by layered drivers to propagate device events up the software
3618  * stack.
3619  */
3620 void
3621 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3622     ldi_ev_cookie_t cookie, void *ev_data)
3623 {
3624 	dev_t dev;
3625 	major_t major;
3626 	char *evname;
3627 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3628 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3629 	uint_t ct_evtype;
3630 
3631 	ASSERT(dip);
3632 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3633 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3634 	ASSERT(ldi_native_cookie(cookie));
3635 
3636 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3637 
3638 	major = ddi_driver_major(dip);
3639 	if (major == DDI_MAJOR_T_NONE) {
3640 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3641 		(void) ddi_pathname(dip, path);
3642 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3643 		    "for device %s", path);
3644 		kmem_free(path, MAXPATHLEN);
3645 		return;
3646 	}
3647 	dev = makedevice(major, minor);
3648 
3649 	evname = ldi_ev_get_type(cookie);
3650 
3651 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3652 	ct_evtype = ldi_contract_event(evname);
3653 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3654 
3655 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3656 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3657 }
3658 
3659 int
3660 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3661 {
3662 	ldi_ev_callback_impl_t	*lecp;
3663 	ldi_ev_callback_impl_t	*next;
3664 	ldi_ev_callback_impl_t	*found;
3665 	list_t			*listp;
3666 
3667 	ASSERT(!servicing_interrupt());
3668 
3669 	if (id == 0) {
3670 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3671 		return (LDI_EV_FAILURE);
3672 	}
3673 
3674 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3675 	    (void *)id));
3676 
3677 	ldi_ev_lock();
3678 
3679 	listp = &ldi_ev_callback_list.le_head;
3680 	next = found = NULL;
3681 	for (lecp = list_head(listp); lecp; lecp = next) {
3682 		next = list_next(listp, lecp);
3683 		if (lecp->lec_id == id) {
3684 			ASSERT(found == NULL);
3685 			list_remove(listp, lecp);
3686 			found = lecp;
3687 		}
3688 	}
3689 	ldi_ev_unlock();
3690 
3691 	if (found == NULL) {
3692 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3693 		    (void *)id);
3694 		return (LDI_EV_SUCCESS);
3695 	}
3696 
3697 	if (!ldi_native_cookie(found->lec_cookie)) {
3698 		ASSERT(found->lec_notify == NULL);
3699 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3700 		    != DDI_SUCCESS) {
3701 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3702 			    "for id (%p)", (void *)id);
3703 			ldi_ev_lock();
3704 			list_insert_tail(listp, found);
3705 			ldi_ev_unlock();
3706 			return (LDI_EV_FAILURE);
3707 		}
3708 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3709 		    "service removal succeeded"));
3710 	} else {
3711 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3712 		    "LDI native callbacks"));
3713 	}
3714 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3715 
3716 	return (LDI_EV_SUCCESS);
3717 }
3718