xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 3b756dd32021555fd913472dcc9a7a1692fb9007)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_devlockmem;
89 
90 #ifdef DEBUG
91 static int sunddi_debug = 0;
92 #endif /* DEBUG */
93 
94 /* ddi_umem_unlock miscellaneous */
95 
96 static	void	i_ddi_umem_unlock_thread_start(void);
97 
98 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
99 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
100 static	kthread_t	*ddi_umem_unlock_thread;
101 /*
102  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
103  */
104 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
106 
107 /*
108  * This lock protects the project.max-device-locked-memory counter.
109  * When both p_lock (proc_t) and this lock need to acquired, p_lock
110  * should be acquired first.
111  */
112 static kmutex_t umem_devlockmem_rctl_lock;
113 
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 #ifdef _LP64
409 int
410 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
411 #else /* _ILP32 */
412 int
413 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
414 #endif
415 {
416 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
417 	    val_p));
418 }
419 
420 #ifdef _LP64
421 int
422 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
423 #else /* _ILP32 */
424 int
425 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
426 #endif
427 {
428 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
429 	    val_p));
430 }
431 
432 #ifdef _LP64
433 int
434 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
435 #else /* _ILP32 */
436 int
437 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
438 #endif
439 {
440 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
441 	    val_p));
442 }
443 
444 #ifdef _LP64
445 int
446 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
447 #else /* _ILP32 */
448 int
449 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
450 #endif
451 {
452 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
453 	    val_p));
454 }
455 
456 #ifdef _LP64
457 int
458 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
459 #else /* _ILP32 */
460 int
461 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
462 #endif
463 {
464 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
465 }
466 
467 #ifdef _LP64
468 int
469 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
470 #else /* _ILP32 */
471 int
472 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
473 #endif
474 {
475 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
476 }
477 
478 #ifdef _LP64
479 int
480 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
481 #else /* _ILP32 */
482 int
483 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
484 #endif
485 {
486 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
487 }
488 
489 #ifdef _LP64
490 int
491 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
492 #else /* _ILP32 */
493 int
494 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
495 #endif
496 {
497 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
498 }
499 
500 /*
501  * ddi_peekpokeio() is used primarily by the mem drivers for moving
502  * data to and from uio structures via peek and poke.  Note that we
503  * use "internal" routines ddi_peek and ddi_poke to make this go
504  * slightly faster, avoiding the call overhead ..
505  */
506 int
507 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
508     caddr_t addr, size_t len, uint_t xfersize)
509 {
510 	int64_t	ibuffer;
511 	int8_t w8;
512 	size_t sz;
513 	int o;
514 
515 	if (xfersize > sizeof (long))
516 		xfersize = sizeof (long);
517 
518 	while (len != 0) {
519 		if ((len | (uintptr_t)addr) & 1) {
520 			sz = sizeof (int8_t);
521 			if (rw == UIO_WRITE) {
522 				if ((o = uwritec(uio)) == -1)
523 					return (DDI_FAILURE);
524 				if (ddi_poke8(devi, (int8_t *)addr,
525 				    (int8_t)o) != DDI_SUCCESS)
526 					return (DDI_FAILURE);
527 			} else {
528 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
529 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
530 					return (DDI_FAILURE);
531 				if (ureadc(w8, uio))
532 					return (DDI_FAILURE);
533 			}
534 		} else {
535 			switch (xfersize) {
536 			case sizeof (int64_t):
537 				if (((len | (uintptr_t)addr) &
538 				    (sizeof (int64_t) - 1)) == 0) {
539 					sz = xfersize;
540 					break;
541 				}
542 				/*FALLTHROUGH*/
543 			case sizeof (int32_t):
544 				if (((len | (uintptr_t)addr) &
545 				    (sizeof (int32_t) - 1)) == 0) {
546 					sz = xfersize;
547 					break;
548 				}
549 				/*FALLTHROUGH*/
550 			default:
551 				/*
552 				 * This still assumes that we might have an
553 				 * I/O bus out there that permits 16-bit
554 				 * transfers (and that it would be upset by
555 				 * 32-bit transfers from such locations).
556 				 */
557 				sz = sizeof (int16_t);
558 				break;
559 			}
560 
561 			if (rw == UIO_READ) {
562 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
563 				    addr, &ibuffer) != DDI_SUCCESS)
564 					return (DDI_FAILURE);
565 			}
566 
567 			if (uiomove(&ibuffer, sz, rw, uio))
568 				return (DDI_FAILURE);
569 
570 			if (rw == UIO_WRITE) {
571 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
572 				    addr, &ibuffer) != DDI_SUCCESS)
573 					return (DDI_FAILURE);
574 			}
575 		}
576 		addr += sz;
577 		len -= sz;
578 	}
579 	return (DDI_SUCCESS);
580 }
581 
582 /*
583  * These routines are used by drivers that do layered ioctls
584  * On sparc, they're implemented in assembler to avoid spilling
585  * register windows in the common (copyin) case ..
586  */
587 #if !defined(__sparc)
588 int
589 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
590 {
591 	if (flags & FKIOCTL)
592 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
593 	return (copyin(buf, kernbuf, size));
594 }
595 
596 int
597 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
598 {
599 	if (flags & FKIOCTL)
600 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
601 	return (copyout(buf, kernbuf, size));
602 }
603 #endif	/* !__sparc */
604 
605 /*
606  * Conversions in nexus pagesize units.  We don't duplicate the
607  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
608  * routines anyway.
609  */
610 unsigned long
611 ddi_btop(dev_info_t *dip, unsigned long bytes)
612 {
613 	unsigned long pages;
614 
615 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
616 	return (pages);
617 }
618 
619 unsigned long
620 ddi_btopr(dev_info_t *dip, unsigned long bytes)
621 {
622 	unsigned long pages;
623 
624 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
625 	return (pages);
626 }
627 
628 unsigned long
629 ddi_ptob(dev_info_t *dip, unsigned long pages)
630 {
631 	unsigned long bytes;
632 
633 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
634 	return (bytes);
635 }
636 
637 unsigned int
638 ddi_enter_critical(void)
639 {
640 	return ((uint_t)spl7());
641 }
642 
643 void
644 ddi_exit_critical(unsigned int spl)
645 {
646 	splx((int)spl);
647 }
648 
649 /*
650  * Nexus ctlops punter
651  */
652 
653 #if !defined(__sparc)
654 /*
655  * Request bus_ctl parent to handle a bus_ctl request
656  *
657  * (The sparc version is in sparc_ddi.s)
658  */
659 int
660 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
661 {
662 	int (*fp)();
663 
664 	if (!d || !r)
665 		return (DDI_FAILURE);
666 
667 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
668 		return (DDI_FAILURE);
669 
670 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
671 	return ((*fp)(d, r, op, a, v));
672 }
673 
674 #endif
675 
676 /*
677  * DMA/DVMA setup
678  */
679 
680 #if defined(__sparc)
681 static ddi_dma_lim_t standard_limits = {
682 	(uint_t)0,	/* addr_t dlim_addr_lo */
683 	(uint_t)-1,	/* addr_t dlim_addr_hi */
684 	(uint_t)-1,	/* uint_t dlim_cntr_max */
685 	(uint_t)1,	/* uint_t dlim_burstsizes */
686 	(uint_t)1,	/* uint_t dlim_minxfer */
687 	0		/* uint_t dlim_dmaspeed */
688 };
689 #elif defined(__x86)
690 static ddi_dma_lim_t standard_limits = {
691 	(uint_t)0,		/* addr_t dlim_addr_lo */
692 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
693 	(uint_t)0,		/* uint_t dlim_cntr_max */
694 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
695 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
696 	(uint_t)0,		/* uint_t dlim_dmaspeed */
697 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
698 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
699 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
700 	(uint_t)512,		/* uint_t dlim_granular */
701 	(int)1,			/* int dlim_sgllen */
702 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
703 };
704 
705 #endif
706 
707 int
708 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
709     ddi_dma_handle_t *handlep)
710 {
711 	int (*funcp)() = ddi_dma_map;
712 	struct bus_ops *bop;
713 #if defined(__sparc)
714 	auto ddi_dma_lim_t dma_lim;
715 
716 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
717 		dma_lim = standard_limits;
718 	} else {
719 		dma_lim = *dmareqp->dmar_limits;
720 	}
721 	dmareqp->dmar_limits = &dma_lim;
722 #endif
723 #if defined(__x86)
724 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
725 		return (DDI_FAILURE);
726 #endif
727 
728 	/*
729 	 * Handle the case that the requester is both a leaf
730 	 * and a nexus driver simultaneously by calling the
731 	 * requester's bus_dma_map function directly instead
732 	 * of ddi_dma_map.
733 	 */
734 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
735 	if (bop && bop->bus_dma_map)
736 		funcp = bop->bus_dma_map;
737 	return ((*funcp)(dip, dip, dmareqp, handlep));
738 }
739 
740 int
741 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
742     uint_t flags, int (*waitfp)(), caddr_t arg,
743     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
744 {
745 	int (*funcp)() = ddi_dma_map;
746 	ddi_dma_lim_t dma_lim;
747 	struct ddi_dma_req dmareq;
748 	struct bus_ops *bop;
749 
750 	if (len == 0) {
751 		return (DDI_DMA_NOMAPPING);
752 	}
753 	if (limits == (ddi_dma_lim_t *)0) {
754 		dma_lim = standard_limits;
755 	} else {
756 		dma_lim = *limits;
757 	}
758 	dmareq.dmar_limits = &dma_lim;
759 	dmareq.dmar_flags = flags;
760 	dmareq.dmar_fp = waitfp;
761 	dmareq.dmar_arg = arg;
762 	dmareq.dmar_object.dmao_size = len;
763 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
764 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
765 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
766 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
767 
768 	/*
769 	 * Handle the case that the requester is both a leaf
770 	 * and a nexus driver simultaneously by calling the
771 	 * requester's bus_dma_map function directly instead
772 	 * of ddi_dma_map.
773 	 */
774 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
775 	if (bop && bop->bus_dma_map)
776 		funcp = bop->bus_dma_map;
777 
778 	return ((*funcp)(dip, dip, &dmareq, handlep));
779 }
780 
781 int
782 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
783     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
784     ddi_dma_handle_t *handlep)
785 {
786 	int (*funcp)() = ddi_dma_map;
787 	ddi_dma_lim_t dma_lim;
788 	struct ddi_dma_req dmareq;
789 	struct bus_ops *bop;
790 
791 	if (limits == (ddi_dma_lim_t *)0) {
792 		dma_lim = standard_limits;
793 	} else {
794 		dma_lim = *limits;
795 	}
796 	dmareq.dmar_limits = &dma_lim;
797 	dmareq.dmar_flags = flags;
798 	dmareq.dmar_fp = waitfp;
799 	dmareq.dmar_arg = arg;
800 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
801 
802 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
803 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
804 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
805 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
806 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
807 	} else {
808 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
809 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
810 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
811 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
812 							bp->b_shadow;
813 		} else {
814 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
815 		}
816 
817 		/*
818 		 * If the buffer has no proc pointer, or the proc
819 		 * struct has the kernel address space, or the buffer has
820 		 * been marked B_REMAPPED (meaning that it is now
821 		 * mapped into the kernel's address space), then
822 		 * the address space is kas (kernel address space).
823 		 */
824 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
825 		    (bp->b_flags & B_REMAPPED) != 0) {
826 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
827 		} else {
828 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
829 			    bp->b_proc->p_as;
830 		}
831 	}
832 
833 	/*
834 	 * Handle the case that the requester is both a leaf
835 	 * and a nexus driver simultaneously by calling the
836 	 * requester's bus_dma_map function directly instead
837 	 * of ddi_dma_map.
838 	 */
839 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
840 	if (bop && bop->bus_dma_map)
841 		funcp = bop->bus_dma_map;
842 
843 	return ((*funcp)(dip, dip, &dmareq, handlep));
844 }
845 
846 #if !defined(__sparc)
847 /*
848  * Request bus_dma_ctl parent to fiddle with a dma request.
849  *
850  * (The sparc version is in sparc_subr.s)
851  */
852 int
853 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
854     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
855     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
856 {
857 	int (*fp)();
858 
859 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
860 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
861 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
862 }
863 #endif
864 
865 /*
866  * For all DMA control functions, call the DMA control
867  * routine and return status.
868  *
869  * Just plain assume that the parent is to be called.
870  * If a nexus driver or a thread outside the framework
871  * of a nexus driver or a leaf driver calls these functions,
872  * it is up to them to deal with the fact that the parent's
873  * bus_dma_ctl function will be the first one called.
874  */
875 
876 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
877 
878 int
879 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
880 {
881 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
882 }
883 
884 int
885 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
886 {
887 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
888 }
889 
890 int
891 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
892 {
893 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
894 	    (off_t *)c, 0, (caddr_t *)o, 0));
895 }
896 
897 int
898 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
899 {
900 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
901 	    l, (caddr_t *)c, 0));
902 }
903 
904 int
905 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
906 {
907 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
908 		return (DDI_FAILURE);
909 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
910 }
911 
912 /*
913  * Note:  The astute might notice that in the next two routines
914  * the SPARC case passes a pointer to a ddi_dma_win_t as the 5th
915  * argument while the x86 case passes the ddi_dma_win_t directly.
916  *
917  * While it would be nice if the "correct" behavior was
918  * platform independent and specified someplace, it isn't.
919  * Until that point, what's required is that this call and
920  * the relevant bus nexus drivers agree, and in this case they
921  * do, at least for the cases I've looked at.
922  */
923 int
924 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
925     ddi_dma_win_t *nwin)
926 {
927 #if defined(__sparc)
928 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
929 	    (caddr_t *)nwin, 0));
930 #elif defined(__x86)
931 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
932 		(off_t *)win, 0, (caddr_t *)nwin, 0));
933 #else
934 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
935 		(off_t *)win, 0, (caddr_t *)nwin, 0));
936 #endif
937 }
938 
939 int
940 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
941 {
942 #if defined(__sparc)
943 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
944 
945 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
946 	    (size_t *)&seg, (caddr_t *)nseg, 0));
947 #else
948 	ddi_dma_handle_t h = (ddi_dma_handle_t)
949 	    ((impl_dma_segment_t *)win)->dmais_hndl;
950 
951 #if defined(__x86)
952 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
953 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
954 #else
955 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
956 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
957 #endif
958 #endif
959 }
960 
961 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
962 /*
963  * This routine is Obsolete and should be removed from ALL architectures
964  * in a future release of Solaris.
965  *
966  * It is deliberately NOT ported to amd64; please fix the code that
967  * depends on this routine to use ddi_dma_nextcookie(9F).
968  */
969 int
970 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
971     ddi_dma_cookie_t *cookiep)
972 {
973 #if defined(__sparc)
974 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
975 
976 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
977 	    (caddr_t *)cookiep, 0));
978 #elif defined(__i386) && !defined(__amd64)
979 	ddi_dma_handle_t h = (ddi_dma_handle_t)
980 	    ((impl_dma_segment_t *)seg)->dmais_hndl;
981 
982 	/*
983 	 * The hack used for i386 won't work here; we can't squeeze a
984 	 * pointer through the 'cache_flags' field.
985 	 */
986 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_SEGTOC,
987 		o, (size_t *)l, (caddr_t *)cookiep, (uint_t)seg));
988 #endif
989 }
990 #endif	/* (__i386 && !__amd64) || __sparc */
991 
992 #if !defined(__sparc)
993 
994 /*
995  * The SPARC versions of these routines are done in assembler to
996  * save register windows, so they're in sparc_subr.s.
997  */
998 
999 int
1000 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
1001 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
1002 {
1003 	dev_info_t	*hdip;
1004 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
1005 	    ddi_dma_handle_t *);
1006 
1007 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1008 
1009 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
1010 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
1011 }
1012 
1013 int
1014 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1015     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1016 {
1017 	dev_info_t	*hdip;
1018 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1019 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1020 
1021 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1022 
1023 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1024 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1025 }
1026 
1027 int
1028 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1029 {
1030 	dev_info_t	*hdip;
1031 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1032 
1033 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1034 
1035 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1036 	return ((*funcp)(hdip, rdip, handlep));
1037 }
1038 
1039 int
1040 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1041     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1042     ddi_dma_cookie_t *cp, uint_t *ccountp)
1043 {
1044 	dev_info_t	*hdip;
1045 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1046 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1047 
1048 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1049 
1050 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1051 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1052 }
1053 
1054 int
1055 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1056     ddi_dma_handle_t handle)
1057 {
1058 	dev_info_t	*hdip;
1059 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1060 
1061 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1062 
1063 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1064 	return ((*funcp)(hdip, rdip, handle));
1065 }
1066 
1067 
1068 int
1069 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1070     ddi_dma_handle_t handle, off_t off, size_t len,
1071     uint_t cache_flags)
1072 {
1073 	dev_info_t	*hdip;
1074 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1075 	    off_t, size_t, uint_t);
1076 
1077 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1078 
1079 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1080 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1081 }
1082 
1083 int
1084 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1085     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1086     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1087 {
1088 	dev_info_t	*hdip;
1089 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1090 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1091 
1092 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1093 
1094 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1095 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1096 	    cookiep, ccountp));
1097 }
1098 
1099 int
1100 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1101 {
1102 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1103 	dev_info_t *hdip, *dip;
1104 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1105 		size_t, uint_t);
1106 
1107 	/*
1108 	 * the DMA nexus driver will set DMP_NOSYNC if the
1109 	 * platform does not require any sync operation. For
1110 	 * example if the memory is uncached or consistent
1111 	 * and without any I/O write buffers involved.
1112 	 */
1113 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1114 		return (DDI_SUCCESS);
1115 
1116 	dip = hp->dmai_rdip;
1117 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1118 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1119 	return ((*funcp)(hdip, dip, h, o, l, whom));
1120 }
1121 
1122 int
1123 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1124 {
1125 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1126 	dev_info_t *hdip, *dip;
1127 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1128 
1129 	dip = hp->dmai_rdip;
1130 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1131 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1132 	return ((*funcp)(hdip, dip, h));
1133 }
1134 
1135 #endif	/* !__sparc */
1136 
1137 int
1138 ddi_dma_free(ddi_dma_handle_t h)
1139 {
1140 #if !defined(__x86)
1141 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1142 #else
1143 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_FREE,
1144 		0, 0, 0, 0));
1145 #endif
1146 }
1147 
1148 int
1149 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1150 {
1151 	ddi_dma_lim_t defalt;
1152 	size_t size = len;
1153 
1154 	if (!limp) {
1155 		defalt = standard_limits;
1156 		limp = &defalt;
1157 	}
1158 #if defined(__sparc)
1159 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1160 	    iopbp, NULL, NULL));
1161 #else
1162 	return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_IOPB_ALLOC, (off_t *)limp,
1163 	    &size, iopbp, 0));
1164 #endif
1165 }
1166 
1167 void
1168 ddi_iopb_free(caddr_t iopb)
1169 {
1170 	i_ddi_mem_free(iopb, 0);
1171 }
1172 
1173 int
1174 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1175 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1176 {
1177 	ddi_dma_lim_t defalt;
1178 	size_t size = length;
1179 
1180 	if (!limits) {
1181 		defalt = standard_limits;
1182 		limits = &defalt;
1183 	}
1184 #if defined(__sparc)
1185 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1186 	    1, 0, kaddrp, real_length, NULL));
1187 #else
1188 	return (ddi_dma_mctl(dip, dip, (ddi_dma_handle_t)real_length,
1189 	    DDI_DMA_SMEM_ALLOC, (off_t *)limits, &size,
1190 	    kaddrp, (flags & 0x1)));
1191 #endif
1192 }
1193 
1194 void
1195 ddi_mem_free(caddr_t kaddr)
1196 {
1197 	i_ddi_mem_free(kaddr, 1);
1198 }
1199 
1200 /*
1201  * DMA attributes, alignment, burst sizes, and transfer minimums
1202  */
1203 int
1204 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1205 {
1206 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1207 
1208 	if (attrp == NULL)
1209 		return (DDI_FAILURE);
1210 	*attrp = dimp->dmai_attr;
1211 	return (DDI_SUCCESS);
1212 }
1213 
1214 int
1215 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1216 {
1217 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1218 
1219 	if (!dimp)
1220 		return (0);
1221 	else
1222 		return (dimp->dmai_burstsizes);
1223 }
1224 
1225 int
1226 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1227 {
1228 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1229 
1230 	if (!dimp || !alignment || !mineffect)
1231 		return (DDI_FAILURE);
1232 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1233 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1234 	} else {
1235 		if (dimp->dmai_burstsizes & 0xff0000) {
1236 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1237 		} else {
1238 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1239 		}
1240 	}
1241 	*mineffect = dimp->dmai_minxfer;
1242 	return (DDI_SUCCESS);
1243 }
1244 
1245 int
1246 ddi_iomin(dev_info_t *a, int i, int stream)
1247 {
1248 	int r;
1249 
1250 	/*
1251 	 * Make sure that the initial value is sane
1252 	 */
1253 	if (i & (i - 1))
1254 		return (0);
1255 	if (i == 0)
1256 		i = (stream) ? 4 : 1;
1257 
1258 	r = ddi_ctlops(a, a,
1259 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1260 	if (r != DDI_SUCCESS || (i & (i - 1)))
1261 		return (0);
1262 	return (i);
1263 }
1264 
1265 /*
1266  * Given two DMA attribute structures, apply the attributes
1267  * of one to the other, following the rules of attributes
1268  * and the wishes of the caller.
1269  *
1270  * The rules of DMA attribute structures are that you cannot
1271  * make things *less* restrictive as you apply one set
1272  * of attributes to another.
1273  *
1274  */
1275 void
1276 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1277 {
1278 	attr->dma_attr_addr_lo =
1279 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1280 	attr->dma_attr_addr_hi =
1281 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1282 	attr->dma_attr_count_max =
1283 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1284 	attr->dma_attr_align =
1285 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1286 	attr->dma_attr_burstsizes =
1287 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1288 	attr->dma_attr_minxfer =
1289 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1290 	attr->dma_attr_maxxfer =
1291 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1292 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1293 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1294 	    (uint_t)mod->dma_attr_sgllen);
1295 	attr->dma_attr_granular =
1296 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1297 }
1298 
1299 /*
1300  * mmap/segmap interface:
1301  */
1302 
1303 /*
1304  * ddi_segmap:		setup the default segment driver. Calls the drivers
1305  *			XXmmap routine to validate the range to be mapped.
1306  *			Return ENXIO of the range is not valid.  Create
1307  *			a seg_dev segment that contains all of the
1308  *			necessary information and will reference the
1309  *			default segment driver routines. It returns zero
1310  *			on success or non-zero on failure.
1311  */
1312 int
1313 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1314     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1315 {
1316 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1317 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1318 
1319 	return (spec_segmap(dev, offset, asp, addrp, len,
1320 	    prot, maxprot, flags, credp));
1321 }
1322 
1323 /*
1324  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1325  *			drivers. Allows each successive parent to resolve
1326  *			address translations and add its mappings to the
1327  *			mapping list supplied in the page structure. It
1328  *			returns zero on success	or non-zero on failure.
1329  */
1330 
1331 int
1332 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1333     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1334 {
1335 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1336 }
1337 
1338 /*
1339  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1340  *	Invokes platform specific DDI to determine whether attributes specified
1341  *	in attr(9s) are	valid for the region of memory that will be made
1342  *	available for direct access to user process via the mmap(2) system call.
1343  */
1344 int
1345 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1346     uint_t rnumber, uint_t *hat_flags)
1347 {
1348 	ddi_acc_handle_t handle;
1349 	ddi_map_req_t mr;
1350 	ddi_acc_hdl_t *hp;
1351 	int result;
1352 	dev_info_t *dip;
1353 
1354 	/*
1355 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1356 	 * release it immediately since it should already be held by
1357 	 * a devfs vnode.
1358 	 */
1359 	if ((dip =
1360 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1361 		return (-1);
1362 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1363 
1364 	/*
1365 	 * Allocate and initialize the common elements of data
1366 	 * access handle.
1367 	 */
1368 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1369 	if (handle == NULL)
1370 		return (-1);
1371 
1372 	hp = impl_acc_hdl_get(handle);
1373 	hp->ah_vers = VERS_ACCHDL;
1374 	hp->ah_dip = dip;
1375 	hp->ah_rnumber = rnumber;
1376 	hp->ah_offset = 0;
1377 	hp->ah_len = 0;
1378 	hp->ah_acc = *accattrp;
1379 
1380 	/*
1381 	 * Set up the mapping request and call to parent.
1382 	 */
1383 	mr.map_op = DDI_MO_MAP_HANDLE;
1384 	mr.map_type = DDI_MT_RNUMBER;
1385 	mr.map_obj.rnumber = rnumber;
1386 	mr.map_prot = PROT_READ | PROT_WRITE;
1387 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1388 	mr.map_handlep = hp;
1389 	mr.map_vers = DDI_MAP_VERSION;
1390 	result = ddi_map(dip, &mr, 0, 0, NULL);
1391 
1392 	/*
1393 	 * Region must be mappable, pick up flags from the framework.
1394 	 */
1395 	*hat_flags = hp->ah_hat_flags;
1396 
1397 	impl_acc_hdl_free(handle);
1398 
1399 	/*
1400 	 * check for end result.
1401 	 */
1402 	if (result != DDI_SUCCESS)
1403 		return (-1);
1404 	return (0);
1405 }
1406 
1407 
1408 /*
1409  * Property functions:	 See also, ddipropdefs.h.
1410  *
1411  * These functions are the framework for the property functions,
1412  * i.e. they support software defined properties.  All implementation
1413  * specific property handling (i.e.: self-identifying devices and
1414  * PROM defined properties are handled in the implementation specific
1415  * functions (defined in ddi_implfuncs.h).
1416  */
1417 
1418 /*
1419  * nopropop:	Shouldn't be called, right?
1420  */
1421 int
1422 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1423     char *name, caddr_t valuep, int *lengthp)
1424 {
1425 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1426 	return (DDI_PROP_NOT_FOUND);
1427 }
1428 
1429 #ifdef	DDI_PROP_DEBUG
1430 int ddi_prop_debug_flag = 0;
1431 
1432 int
1433 ddi_prop_debug(int enable)
1434 {
1435 	int prev = ddi_prop_debug_flag;
1436 
1437 	if ((enable != 0) || (prev != 0))
1438 		printf("ddi_prop_debug: debugging %s\n",
1439 		    enable ? "enabled" : "disabled");
1440 	ddi_prop_debug_flag = enable;
1441 	return (prev);
1442 }
1443 
1444 #endif	/* DDI_PROP_DEBUG */
1445 
1446 /*
1447  * Search a property list for a match, if found return pointer
1448  * to matching prop struct, else return NULL.
1449  */
1450 
1451 ddi_prop_t *
1452 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1453 {
1454 	ddi_prop_t	*propp;
1455 
1456 	/*
1457 	 * find the property in child's devinfo:
1458 	 * Search order defined by this search function is first matching
1459 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1460 	 * dev == propp->prop_dev, name == propp->name, and the correct
1461 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1462 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1463 	 */
1464 	if (dev == DDI_DEV_T_NONE)
1465 		dev = DDI_DEV_T_ANY;
1466 
1467 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1468 
1469 		if (!DDI_STRSAME(propp->prop_name, name))
1470 			continue;
1471 
1472 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1473 			continue;
1474 
1475 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1476 			continue;
1477 
1478 		return (propp);
1479 	}
1480 
1481 	return ((ddi_prop_t *)0);
1482 }
1483 
1484 /*
1485  * Search for property within devnames structures
1486  */
1487 ddi_prop_t *
1488 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1489 {
1490 	major_t		major;
1491 	struct devnames	*dnp;
1492 	ddi_prop_t	*propp;
1493 
1494 	/*
1495 	 * Valid dev_t value is needed to index into the
1496 	 * correct devnames entry, therefore a dev_t
1497 	 * value of DDI_DEV_T_ANY is not appropriate.
1498 	 */
1499 	ASSERT(dev != DDI_DEV_T_ANY);
1500 	if (dev == DDI_DEV_T_ANY) {
1501 		return ((ddi_prop_t *)0);
1502 	}
1503 
1504 	major = getmajor(dev);
1505 	dnp = &(devnamesp[major]);
1506 
1507 	if (dnp->dn_global_prop_ptr == NULL)
1508 		return ((ddi_prop_t *)0);
1509 
1510 	LOCK_DEV_OPS(&dnp->dn_lock);
1511 
1512 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1513 	    propp != NULL;
1514 	    propp = (ddi_prop_t *)propp->prop_next) {
1515 
1516 		if (!DDI_STRSAME(propp->prop_name, name))
1517 			continue;
1518 
1519 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1520 			continue;
1521 
1522 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1523 			continue;
1524 
1525 		/* Property found, return it */
1526 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1527 		return (propp);
1528 	}
1529 
1530 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1531 	return ((ddi_prop_t *)0);
1532 }
1533 
1534 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1535 
1536 /*
1537  * ddi_prop_search_global:
1538  *	Search the global property list within devnames
1539  *	for the named property.  Return the encoded value.
1540  */
1541 static int
1542 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1543     void *valuep, uint_t *lengthp)
1544 {
1545 	ddi_prop_t	*propp;
1546 	caddr_t		buffer;
1547 
1548 	propp =  i_ddi_search_global_prop(dev, name, flags);
1549 
1550 	/* Property NOT found, bail */
1551 	if (propp == (ddi_prop_t *)0)
1552 		return (DDI_PROP_NOT_FOUND);
1553 
1554 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1555 		return (DDI_PROP_UNDEFINED);
1556 
1557 	if ((buffer = kmem_alloc(propp->prop_len,
1558 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1559 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1560 		return (DDI_PROP_NO_MEMORY);
1561 	}
1562 
1563 	/*
1564 	 * Return the encoded data
1565 	 */
1566 	*(caddr_t *)valuep = buffer;
1567 	*lengthp = propp->prop_len;
1568 	bcopy(propp->prop_val, buffer, propp->prop_len);
1569 
1570 	return (DDI_PROP_SUCCESS);
1571 }
1572 
1573 /*
1574  * ddi_prop_search_common:	Lookup and return the encoded value
1575  */
1576 int
1577 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1578     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1579 {
1580 	ddi_prop_t	*propp;
1581 	int		i;
1582 	caddr_t		buffer;
1583 	caddr_t		prealloc = NULL;
1584 	int		plength = 0;
1585 	dev_info_t	*pdip;
1586 	int		(*bop)();
1587 
1588 	/*CONSTANTCONDITION*/
1589 	while (1)  {
1590 
1591 		mutex_enter(&(DEVI(dip)->devi_lock));
1592 
1593 
1594 		/*
1595 		 * find the property in child's devinfo:
1596 		 * Search order is:
1597 		 *	1. driver defined properties
1598 		 *	2. system defined properties
1599 		 *	3. driver global properties
1600 		 *	4. boot defined properties
1601 		 */
1602 
1603 		propp = i_ddi_prop_search(dev, name, flags,
1604 		    &(DEVI(dip)->devi_drv_prop_ptr));
1605 		if (propp == NULL)  {
1606 			propp = i_ddi_prop_search(dev, name, flags,
1607 			    &(DEVI(dip)->devi_sys_prop_ptr));
1608 		}
1609 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1610 			propp = i_ddi_prop_search(dev, name, flags,
1611 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1612 		}
1613 
1614 		if (propp == NULL)  {
1615 			propp = i_ddi_prop_search(dev, name, flags,
1616 			    &(DEVI(dip)->devi_hw_prop_ptr));
1617 		}
1618 
1619 		/*
1620 		 * Software property found?
1621 		 */
1622 		if (propp != (ddi_prop_t *)0)	{
1623 
1624 			/*
1625 			 * If explicit undefine, return now.
1626 			 */
1627 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1628 				mutex_exit(&(DEVI(dip)->devi_lock));
1629 				if (prealloc)
1630 					kmem_free(prealloc, plength);
1631 				return (DDI_PROP_UNDEFINED);
1632 			}
1633 
1634 			/*
1635 			 * If we only want to know if it exists, return now
1636 			 */
1637 			if (prop_op == PROP_EXISTS) {
1638 				mutex_exit(&(DEVI(dip)->devi_lock));
1639 				ASSERT(prealloc == NULL);
1640 				return (DDI_PROP_SUCCESS);
1641 			}
1642 
1643 			/*
1644 			 * If length only request or prop length == 0,
1645 			 * service request and return now.
1646 			 */
1647 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1648 				*lengthp = propp->prop_len;
1649 
1650 				/*
1651 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1652 				 * that means prop_len is 0, so set valuep
1653 				 * also to NULL
1654 				 */
1655 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1656 					*(caddr_t *)valuep = NULL;
1657 
1658 				mutex_exit(&(DEVI(dip)->devi_lock));
1659 				if (prealloc)
1660 					kmem_free(prealloc, plength);
1661 				return (DDI_PROP_SUCCESS);
1662 			}
1663 
1664 			/*
1665 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1666 			 * drop the mutex, allocate the buffer, and go
1667 			 * through the loop again.  If we already allocated
1668 			 * the buffer, and the size of the property changed,
1669 			 * keep trying...
1670 			 */
1671 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1672 			    (flags & DDI_PROP_CANSLEEP))  {
1673 				if (prealloc && (propp->prop_len != plength)) {
1674 					kmem_free(prealloc, plength);
1675 					prealloc = NULL;
1676 				}
1677 				if (prealloc == NULL)  {
1678 					plength = propp->prop_len;
1679 					mutex_exit(&(DEVI(dip)->devi_lock));
1680 					prealloc = kmem_alloc(plength,
1681 					    KM_SLEEP);
1682 					continue;
1683 				}
1684 			}
1685 
1686 			/*
1687 			 * Allocate buffer, if required.  Either way,
1688 			 * set `buffer' variable.
1689 			 */
1690 			i = *lengthp;			/* Get callers length */
1691 			*lengthp = propp->prop_len;	/* Set callers length */
1692 
1693 			switch (prop_op) {
1694 
1695 			case PROP_LEN_AND_VAL_ALLOC:
1696 
1697 				if (prealloc == NULL) {
1698 					buffer = kmem_alloc(propp->prop_len,
1699 					    KM_NOSLEEP);
1700 				} else {
1701 					buffer = prealloc;
1702 				}
1703 
1704 				if (buffer == NULL)  {
1705 					mutex_exit(&(DEVI(dip)->devi_lock));
1706 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1707 					return (DDI_PROP_NO_MEMORY);
1708 				}
1709 				/* Set callers buf ptr */
1710 				*(caddr_t *)valuep = buffer;
1711 				break;
1712 
1713 			case PROP_LEN_AND_VAL_BUF:
1714 
1715 				if (propp->prop_len > (i)) {
1716 					mutex_exit(&(DEVI(dip)->devi_lock));
1717 					return (DDI_PROP_BUF_TOO_SMALL);
1718 				}
1719 
1720 				buffer = valuep;  /* Get callers buf ptr */
1721 				break;
1722 
1723 			default:
1724 				break;
1725 			}
1726 
1727 			/*
1728 			 * Do the copy.
1729 			 */
1730 			bcopy(propp->prop_val, buffer, propp->prop_len);
1731 			mutex_exit(&(DEVI(dip)->devi_lock));
1732 			return (DDI_PROP_SUCCESS);
1733 		}
1734 
1735 		mutex_exit(&(DEVI(dip)->devi_lock));
1736 		if (prealloc)
1737 			kmem_free(prealloc, plength);
1738 		prealloc = NULL;
1739 
1740 		/*
1741 		 * Prop not found, call parent bus_ops to deal with possible
1742 		 * h/w layer (possible PROM defined props, etc.) and to
1743 		 * possibly ascend the hierarchy, if allowed by flags.
1744 		 */
1745 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1746 
1747 		/*
1748 		 * One last call for the root driver PROM props?
1749 		 */
1750 		if (dip == ddi_root_node())  {
1751 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1752 			    flags, name, valuep, (int *)lengthp));
1753 		}
1754 
1755 		/*
1756 		 * We may have been called to check for properties
1757 		 * within a single devinfo node that has no parent -
1758 		 * see make_prop()
1759 		 */
1760 		if (pdip == NULL) {
1761 			ASSERT((flags &
1762 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1763 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1764 			return (DDI_PROP_NOT_FOUND);
1765 		}
1766 
1767 		/*
1768 		 * Instead of recursing, we do iterative calls up the tree.
1769 		 * As a bit of optimization, skip the bus_op level if the
1770 		 * node is a s/w node and if the parent's bus_prop_op function
1771 		 * is `ddi_bus_prop_op', because we know that in this case,
1772 		 * this function does nothing.
1773 		 *
1774 		 * 4225415: If the parent isn't attached, or the child
1775 		 * hasn't been named by the parent yet, use the default
1776 		 * ddi_bus_prop_op as a proxy for the parent.  This
1777 		 * allows property lookups in any child/parent state to
1778 		 * include 'prom' and inherited properties, even when
1779 		 * there are no drivers attached to the child or parent.
1780 		 */
1781 
1782 		bop = ddi_bus_prop_op;
1783 		if ((i_ddi_node_state(pdip) == DS_READY) &&
1784 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1785 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1786 
1787 		i = DDI_PROP_NOT_FOUND;
1788 
1789 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1790 			i = (*bop)(dev, pdip, dip, prop_op,
1791 			    flags | DDI_PROP_DONTPASS,
1792 			    name, valuep, lengthp);
1793 		}
1794 
1795 		if ((flags & DDI_PROP_DONTPASS) ||
1796 		    (i != DDI_PROP_NOT_FOUND))
1797 			return (i);
1798 
1799 		dip = pdip;
1800 	}
1801 	/*NOTREACHED*/
1802 }
1803 
1804 
1805 /*
1806  * ddi_prop_op: The basic property operator for drivers.
1807  *
1808  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1809  *
1810  *	prop_op			valuep
1811  *	------			------
1812  *
1813  *	PROP_LEN		<unused>
1814  *
1815  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1816  *
1817  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1818  *				address of allocated buffer, if successful)
1819  */
1820 int
1821 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1822     char *name, caddr_t valuep, int *lengthp)
1823 {
1824 	int	i;
1825 
1826 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1827 
1828 	/*
1829 	 * If this was originally an LDI prop lookup then we bail here.
1830 	 * The reason is that the LDI property lookup interfaces first call
1831 	 * a drivers prop_op() entry point to allow it to override
1832 	 * properties.  But if we've made it here, then the driver hasn't
1833 	 * overriden any properties.  We don't want to continue with the
1834 	 * property search here because we don't have any type inforamtion.
1835 	 * When we return failure, the LDI interfaces will then proceed to
1836 	 * call the typed property interfaces to look up the property.
1837 	 */
1838 	if (mod_flags & DDI_PROP_DYNAMIC)
1839 		return (DDI_PROP_NOT_FOUND);
1840 
1841 	/*
1842 	 * check for pre-typed property consumer asking for typed property:
1843 	 * see e_ddi_getprop_int64.
1844 	 */
1845 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1846 		mod_flags |= DDI_PROP_TYPE_INT64;
1847 	mod_flags |= DDI_PROP_TYPE_ANY;
1848 
1849 	i = ddi_prop_search_common(dev, dip, prop_op,
1850 		mod_flags, name, valuep, (uint_t *)lengthp);
1851 	if (i == DDI_PROP_FOUND_1275)
1852 		return (DDI_PROP_SUCCESS);
1853 	return (i);
1854 }
1855 
1856 /*
1857  * ddi_prop_op_nblocks: The basic property operator for drivers that maintain
1858  * size in number of DEV_BSIZE blocks.  Provides a dynamic property
1859  * implementation for size oriented properties based on nblocks64 values passed
1860  * in by the driver.  Fallback to ddi_prop_op if the nblocks64 is too large.
1861  * This interface should not be used with a nblocks64 that represents the
1862  * driver's idea of how to represent unknown, if nblocks is unknown use
1863  * ddi_prop_op.
1864  */
1865 int
1866 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1867     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1868 {
1869 	uint64_t size64;
1870 
1871 	/*
1872 	 * There is no point in supporting nblocks64 values that don't have
1873 	 * an accurate uint64_t byte count representation.
1874 	 */
1875 	if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT))
1876 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1877 		    name, valuep, lengthp));
1878 
1879 	size64 = nblocks64 << DEV_BSHIFT;
1880 	return (ddi_prop_op_size(dev, dip, prop_op, mod_flags,
1881 	    name, valuep, lengthp, size64));
1882 }
1883 
1884 /*
1885  * ddi_prop_op_size: The basic property operator for drivers that maintain size
1886  * in bytes. Provides a of dynamic property implementation for size oriented
1887  * properties based on size64 values passed in by the driver.  Fallback to
1888  * ddi_prop_op if the size64 is too large. This interface should not be used
1889  * with a size64 that represents the driver's idea of how to represent unknown,
1890  * if size is unknown use ddi_prop_op.
1891  *
1892  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1893  * integers. While the most likely interface to request them ([bc]devi_size)
1894  * is declared int (signed) there is no enforcement of this, which means we
1895  * can't enforce limitations here without risking regression.
1896  */
1897 int
1898 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1899     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1900 {
1901 	uint64_t nblocks64;
1902 	int	callers_length;
1903 	caddr_t	buffer;
1904 
1905 	/* compute DEV_BSIZE nblocks value */
1906 	nblocks64 = lbtodb(size64);
1907 
1908 	/* get callers length, establish length of our dynamic properties */
1909 	callers_length = *lengthp;
1910 
1911 	if (strcmp(name, "Nblocks") == 0)
1912 		*lengthp = sizeof (uint64_t);
1913 	else if (strcmp(name, "Size") == 0)
1914 		*lengthp = sizeof (uint64_t);
1915 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1916 		*lengthp = sizeof (uint32_t);
1917 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1918 		*lengthp = sizeof (uint32_t);
1919 	else {
1920 		/* fallback to ddi_prop_op */
1921 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1922 		    name, valuep, lengthp));
1923 	}
1924 
1925 	/* service request for the length of the property */
1926 	if (prop_op == PROP_LEN)
1927 		return (DDI_PROP_SUCCESS);
1928 
1929 	/* the length of the property and the request must match */
1930 	if (callers_length != *lengthp)
1931 		return (DDI_PROP_INVAL_ARG);
1932 
1933 	switch (prop_op) {
1934 	case PROP_LEN_AND_VAL_ALLOC:
1935 		if ((buffer = kmem_alloc(*lengthp,
1936 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1937 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1938 			return (DDI_PROP_NO_MEMORY);
1939 
1940 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1941 		break;
1942 
1943 	case PROP_LEN_AND_VAL_BUF:
1944 		buffer = valuep;		/* get callers buf ptr */
1945 		break;
1946 
1947 	default:
1948 		return (DDI_PROP_INVAL_ARG);
1949 	}
1950 
1951 	/* transfer the value into the buffer */
1952 	if (strcmp(name, "Nblocks") == 0)
1953 		*((uint64_t *)buffer) = nblocks64;
1954 	else if (strcmp(name, "Size") == 0)
1955 		*((uint64_t *)buffer) = size64;
1956 	else if (strcmp(name, "nblocks") == 0)
1957 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1958 	else if (strcmp(name, "size") == 0)
1959 		*((uint32_t *)buffer) = (uint32_t)size64;
1960 	return (DDI_PROP_SUCCESS);
1961 }
1962 
1963 /*
1964  * Variable length props...
1965  */
1966 
1967 /*
1968  * ddi_getlongprop:	Get variable length property len+val into a buffer
1969  *		allocated by property provider via kmem_alloc. Requester
1970  *		is responsible for freeing returned property via kmem_free.
1971  *
1972  *	Arguments:
1973  *
1974  *	dev_t:	Input:	dev_t of property.
1975  *	dip:	Input:	dev_info_t pointer of child.
1976  *	flags:	Input:	Possible flag modifiers are:
1977  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1978  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1979  *	name:	Input:	name of property.
1980  *	valuep:	Output:	Addr of callers buffer pointer.
1981  *	lengthp:Output:	*lengthp will contain prop length on exit.
1982  *
1983  *	Possible Returns:
1984  *
1985  *		DDI_PROP_SUCCESS:	Prop found and returned.
1986  *		DDI_PROP_NOT_FOUND:	Prop not found
1987  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1988  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1989  */
1990 
1991 int
1992 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1993     char *name, caddr_t valuep, int *lengthp)
1994 {
1995 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1996 	    flags, name, valuep, lengthp));
1997 }
1998 
1999 /*
2000  *
2001  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2002  *				buffer. (no memory allocation by provider).
2003  *
2004  *	dev_t:	Input:	dev_t of property.
2005  *	dip:	Input:	dev_info_t pointer of child.
2006  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2007  *	name:	Input:	name of property
2008  *	valuep:	Input:	ptr to callers buffer.
2009  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2010  *			actual length of property on exit.
2011  *
2012  *	Possible returns:
2013  *
2014  *		DDI_PROP_SUCCESS	Prop found and returned
2015  *		DDI_PROP_NOT_FOUND	Prop not found
2016  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2017  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2018  *					no value returned, but actual prop
2019  *					length returned in *lengthp
2020  *
2021  */
2022 
2023 int
2024 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2025     char *name, caddr_t valuep, int *lengthp)
2026 {
2027 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2028 	    flags, name, valuep, lengthp));
2029 }
2030 
2031 /*
2032  * Integer/boolean sized props.
2033  *
2034  * Call is value only... returns found boolean or int sized prop value or
2035  * defvalue if prop not found or is wrong length or is explicitly undefined.
2036  * Only flag is DDI_PROP_DONTPASS...
2037  *
2038  * By convention, this interface returns boolean (0) sized properties
2039  * as value (int)1.
2040  *
2041  * This never returns an error, if property not found or specifically
2042  * undefined, the input `defvalue' is returned.
2043  */
2044 
2045 int
2046 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2047 {
2048 	int	propvalue = defvalue;
2049 	int	proplength = sizeof (int);
2050 	int	error;
2051 
2052 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2053 	    flags, name, (caddr_t)&propvalue, &proplength);
2054 
2055 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2056 		propvalue = 1;
2057 
2058 	return (propvalue);
2059 }
2060 
2061 /*
2062  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2063  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2064  */
2065 
2066 int
2067 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2068 {
2069 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2070 }
2071 
2072 /*
2073  * Allocate a struct prop_driver_data, along with 'size' bytes
2074  * for decoded property data.  This structure is freed by
2075  * calling ddi_prop_free(9F).
2076  */
2077 static void *
2078 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2079 {
2080 	struct prop_driver_data *pdd;
2081 
2082 	/*
2083 	 * Allocate a structure with enough memory to store the decoded data.
2084 	 */
2085 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2086 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2087 	pdd->pdd_prop_free = prop_free;
2088 
2089 	/*
2090 	 * Return a pointer to the location to put the decoded data.
2091 	 */
2092 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2093 }
2094 
2095 /*
2096  * Allocated the memory needed to store the encoded data in the property
2097  * handle.
2098  */
2099 static int
2100 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2101 {
2102 	/*
2103 	 * If size is zero, then set data to NULL and size to 0.  This
2104 	 * is a boolean property.
2105 	 */
2106 	if (size == 0) {
2107 		ph->ph_size = 0;
2108 		ph->ph_data = NULL;
2109 		ph->ph_cur_pos = NULL;
2110 		ph->ph_save_pos = NULL;
2111 	} else {
2112 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2113 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2114 			if (ph->ph_data == NULL)
2115 				return (DDI_PROP_NO_MEMORY);
2116 		} else
2117 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2118 		ph->ph_size = size;
2119 		ph->ph_cur_pos = ph->ph_data;
2120 		ph->ph_save_pos = ph->ph_data;
2121 	}
2122 	return (DDI_PROP_SUCCESS);
2123 }
2124 
2125 /*
2126  * Free the space allocated by the lookup routines.  Each lookup routine
2127  * returns a pointer to the decoded data to the driver.  The driver then
2128  * passes this pointer back to us.  This data actually lives in a struct
2129  * prop_driver_data.  We use negative indexing to find the beginning of
2130  * the structure and then free the entire structure using the size and
2131  * the free routine stored in the structure.
2132  */
2133 void
2134 ddi_prop_free(void *datap)
2135 {
2136 	struct prop_driver_data *pdd;
2137 
2138 	/*
2139 	 * Get the structure
2140 	 */
2141 	pdd = (struct prop_driver_data *)
2142 		((caddr_t)datap - sizeof (struct prop_driver_data));
2143 	/*
2144 	 * Call the free routine to free it
2145 	 */
2146 	(*pdd->pdd_prop_free)(pdd);
2147 }
2148 
2149 /*
2150  * Free the data associated with an array of ints,
2151  * allocated with ddi_prop_decode_alloc().
2152  */
2153 static void
2154 ddi_prop_free_ints(struct prop_driver_data *pdd)
2155 {
2156 	kmem_free(pdd, pdd->pdd_size);
2157 }
2158 
2159 /*
2160  * Free a single string property or a single string contained within
2161  * the argv style return value of an array of strings.
2162  */
2163 static void
2164 ddi_prop_free_string(struct prop_driver_data *pdd)
2165 {
2166 	kmem_free(pdd, pdd->pdd_size);
2167 
2168 }
2169 
2170 /*
2171  * Free an array of strings.
2172  */
2173 static void
2174 ddi_prop_free_strings(struct prop_driver_data *pdd)
2175 {
2176 	kmem_free(pdd, pdd->pdd_size);
2177 }
2178 
2179 /*
2180  * Free the data associated with an array of bytes.
2181  */
2182 static void
2183 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2184 {
2185 	kmem_free(pdd, pdd->pdd_size);
2186 }
2187 
2188 /*
2189  * Reset the current location pointer in the property handle to the
2190  * beginning of the data.
2191  */
2192 void
2193 ddi_prop_reset_pos(prop_handle_t *ph)
2194 {
2195 	ph->ph_cur_pos = ph->ph_data;
2196 	ph->ph_save_pos = ph->ph_data;
2197 }
2198 
2199 /*
2200  * Restore the current location pointer in the property handle to the
2201  * saved position.
2202  */
2203 void
2204 ddi_prop_save_pos(prop_handle_t *ph)
2205 {
2206 	ph->ph_save_pos = ph->ph_cur_pos;
2207 }
2208 
2209 /*
2210  * Save the location that the current location pointer is pointing to..
2211  */
2212 void
2213 ddi_prop_restore_pos(prop_handle_t *ph)
2214 {
2215 	ph->ph_cur_pos = ph->ph_save_pos;
2216 }
2217 
2218 /*
2219  * Property encode/decode functions
2220  */
2221 
2222 /*
2223  * Decode a single integer property
2224  */
2225 static int
2226 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2227 {
2228 	int	i;
2229 	int	tmp;
2230 
2231 	/*
2232 	 * If there is nothing to decode return an error
2233 	 */
2234 	if (ph->ph_size == 0)
2235 		return (DDI_PROP_END_OF_DATA);
2236 
2237 	/*
2238 	 * Decode the property as a single integer and return it
2239 	 * in data if we were able to decode it.
2240 	 */
2241 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2242 	if (i < DDI_PROP_RESULT_OK) {
2243 		switch (i) {
2244 		case DDI_PROP_RESULT_EOF:
2245 			return (DDI_PROP_END_OF_DATA);
2246 
2247 		case DDI_PROP_RESULT_ERROR:
2248 			return (DDI_PROP_CANNOT_DECODE);
2249 		}
2250 	}
2251 
2252 	*(int *)data = tmp;
2253 	*nelements = 1;
2254 	return (DDI_PROP_SUCCESS);
2255 }
2256 
2257 /*
2258  * Decode a single 64 bit integer property
2259  */
2260 static int
2261 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2262 {
2263 	int	i;
2264 	int64_t	tmp;
2265 
2266 	/*
2267 	 * If there is nothing to decode return an error
2268 	 */
2269 	if (ph->ph_size == 0)
2270 		return (DDI_PROP_END_OF_DATA);
2271 
2272 	/*
2273 	 * Decode the property as a single integer and return it
2274 	 * in data if we were able to decode it.
2275 	 */
2276 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2277 	if (i < DDI_PROP_RESULT_OK) {
2278 		switch (i) {
2279 		case DDI_PROP_RESULT_EOF:
2280 			return (DDI_PROP_END_OF_DATA);
2281 
2282 		case DDI_PROP_RESULT_ERROR:
2283 			return (DDI_PROP_CANNOT_DECODE);
2284 		}
2285 	}
2286 
2287 	*(int64_t *)data = tmp;
2288 	*nelements = 1;
2289 	return (DDI_PROP_SUCCESS);
2290 }
2291 
2292 /*
2293  * Decode an array of integers property
2294  */
2295 static int
2296 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2297 {
2298 	int	i;
2299 	int	cnt = 0;
2300 	int	*tmp;
2301 	int	*intp;
2302 	int	n;
2303 
2304 	/*
2305 	 * Figure out how many array elements there are by going through the
2306 	 * data without decoding it first and counting.
2307 	 */
2308 	for (;;) {
2309 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2310 		if (i < 0)
2311 			break;
2312 		cnt++;
2313 	}
2314 
2315 	/*
2316 	 * If there are no elements return an error
2317 	 */
2318 	if (cnt == 0)
2319 		return (DDI_PROP_END_OF_DATA);
2320 
2321 	/*
2322 	 * If we cannot skip through the data, we cannot decode it
2323 	 */
2324 	if (i == DDI_PROP_RESULT_ERROR)
2325 		return (DDI_PROP_CANNOT_DECODE);
2326 
2327 	/*
2328 	 * Reset the data pointer to the beginning of the encoded data
2329 	 */
2330 	ddi_prop_reset_pos(ph);
2331 
2332 	/*
2333 	 * Allocated memory to store the decoded value in.
2334 	 */
2335 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2336 		ddi_prop_free_ints);
2337 
2338 	/*
2339 	 * Decode each element and place it in the space we just allocated
2340 	 */
2341 	tmp = intp;
2342 	for (n = 0; n < cnt; n++, tmp++) {
2343 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2344 		if (i < DDI_PROP_RESULT_OK) {
2345 			/*
2346 			 * Free the space we just allocated
2347 			 * and return an error.
2348 			 */
2349 			ddi_prop_free(intp);
2350 			switch (i) {
2351 			case DDI_PROP_RESULT_EOF:
2352 				return (DDI_PROP_END_OF_DATA);
2353 
2354 			case DDI_PROP_RESULT_ERROR:
2355 				return (DDI_PROP_CANNOT_DECODE);
2356 			}
2357 		}
2358 	}
2359 
2360 	*nelements = cnt;
2361 	*(int **)data = intp;
2362 
2363 	return (DDI_PROP_SUCCESS);
2364 }
2365 
2366 /*
2367  * Decode a 64 bit integer array property
2368  */
2369 static int
2370 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2371 {
2372 	int	i;
2373 	int	n;
2374 	int	cnt = 0;
2375 	int64_t	*tmp;
2376 	int64_t	*intp;
2377 
2378 	/*
2379 	 * Count the number of array elements by going
2380 	 * through the data without decoding it.
2381 	 */
2382 	for (;;) {
2383 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2384 		if (i < 0)
2385 			break;
2386 		cnt++;
2387 	}
2388 
2389 	/*
2390 	 * If there are no elements return an error
2391 	 */
2392 	if (cnt == 0)
2393 		return (DDI_PROP_END_OF_DATA);
2394 
2395 	/*
2396 	 * If we cannot skip through the data, we cannot decode it
2397 	 */
2398 	if (i == DDI_PROP_RESULT_ERROR)
2399 		return (DDI_PROP_CANNOT_DECODE);
2400 
2401 	/*
2402 	 * Reset the data pointer to the beginning of the encoded data
2403 	 */
2404 	ddi_prop_reset_pos(ph);
2405 
2406 	/*
2407 	 * Allocate memory to store the decoded value.
2408 	 */
2409 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2410 		ddi_prop_free_ints);
2411 
2412 	/*
2413 	 * Decode each element and place it in the space allocated
2414 	 */
2415 	tmp = intp;
2416 	for (n = 0; n < cnt; n++, tmp++) {
2417 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2418 		if (i < DDI_PROP_RESULT_OK) {
2419 			/*
2420 			 * Free the space we just allocated
2421 			 * and return an error.
2422 			 */
2423 			ddi_prop_free(intp);
2424 			switch (i) {
2425 			case DDI_PROP_RESULT_EOF:
2426 				return (DDI_PROP_END_OF_DATA);
2427 
2428 			case DDI_PROP_RESULT_ERROR:
2429 				return (DDI_PROP_CANNOT_DECODE);
2430 			}
2431 		}
2432 	}
2433 
2434 	*nelements = cnt;
2435 	*(int64_t **)data = intp;
2436 
2437 	return (DDI_PROP_SUCCESS);
2438 }
2439 
2440 /*
2441  * Encode an array of integers property (Can be one element)
2442  */
2443 int
2444 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2445 {
2446 	int	i;
2447 	int	*tmp;
2448 	int	cnt;
2449 	int	size;
2450 
2451 	/*
2452 	 * If there is no data, we cannot do anything
2453 	 */
2454 	if (nelements == 0)
2455 		return (DDI_PROP_CANNOT_ENCODE);
2456 
2457 	/*
2458 	 * Get the size of an encoded int.
2459 	 */
2460 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2461 
2462 	if (size < DDI_PROP_RESULT_OK) {
2463 		switch (size) {
2464 		case DDI_PROP_RESULT_EOF:
2465 			return (DDI_PROP_END_OF_DATA);
2466 
2467 		case DDI_PROP_RESULT_ERROR:
2468 			return (DDI_PROP_CANNOT_ENCODE);
2469 		}
2470 	}
2471 
2472 	/*
2473 	 * Allocate space in the handle to store the encoded int.
2474 	 */
2475 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2476 		DDI_PROP_SUCCESS)
2477 		return (DDI_PROP_NO_MEMORY);
2478 
2479 	/*
2480 	 * Encode the array of ints.
2481 	 */
2482 	tmp = (int *)data;
2483 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2484 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2485 		if (i < DDI_PROP_RESULT_OK) {
2486 			switch (i) {
2487 			case DDI_PROP_RESULT_EOF:
2488 				return (DDI_PROP_END_OF_DATA);
2489 
2490 			case DDI_PROP_RESULT_ERROR:
2491 				return (DDI_PROP_CANNOT_ENCODE);
2492 			}
2493 		}
2494 	}
2495 
2496 	return (DDI_PROP_SUCCESS);
2497 }
2498 
2499 
2500 /*
2501  * Encode a 64 bit integer array property
2502  */
2503 int
2504 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2505 {
2506 	int i;
2507 	int cnt;
2508 	int size;
2509 	int64_t *tmp;
2510 
2511 	/*
2512 	 * If there is no data, we cannot do anything
2513 	 */
2514 	if (nelements == 0)
2515 		return (DDI_PROP_CANNOT_ENCODE);
2516 
2517 	/*
2518 	 * Get the size of an encoded 64 bit int.
2519 	 */
2520 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2521 
2522 	if (size < DDI_PROP_RESULT_OK) {
2523 		switch (size) {
2524 		case DDI_PROP_RESULT_EOF:
2525 			return (DDI_PROP_END_OF_DATA);
2526 
2527 		case DDI_PROP_RESULT_ERROR:
2528 			return (DDI_PROP_CANNOT_ENCODE);
2529 		}
2530 	}
2531 
2532 	/*
2533 	 * Allocate space in the handle to store the encoded int.
2534 	 */
2535 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2536 	    DDI_PROP_SUCCESS)
2537 		return (DDI_PROP_NO_MEMORY);
2538 
2539 	/*
2540 	 * Encode the array of ints.
2541 	 */
2542 	tmp = (int64_t *)data;
2543 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2544 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2545 		if (i < DDI_PROP_RESULT_OK) {
2546 			switch (i) {
2547 			case DDI_PROP_RESULT_EOF:
2548 				return (DDI_PROP_END_OF_DATA);
2549 
2550 			case DDI_PROP_RESULT_ERROR:
2551 				return (DDI_PROP_CANNOT_ENCODE);
2552 			}
2553 		}
2554 	}
2555 
2556 	return (DDI_PROP_SUCCESS);
2557 }
2558 
2559 /*
2560  * Decode a single string property
2561  */
2562 static int
2563 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2564 {
2565 	char		*tmp;
2566 	char		*str;
2567 	int		i;
2568 	int		size;
2569 
2570 	/*
2571 	 * If there is nothing to decode return an error
2572 	 */
2573 	if (ph->ph_size == 0)
2574 		return (DDI_PROP_END_OF_DATA);
2575 
2576 	/*
2577 	 * Get the decoded size of the encoded string.
2578 	 */
2579 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2580 	if (size < DDI_PROP_RESULT_OK) {
2581 		switch (size) {
2582 		case DDI_PROP_RESULT_EOF:
2583 			return (DDI_PROP_END_OF_DATA);
2584 
2585 		case DDI_PROP_RESULT_ERROR:
2586 			return (DDI_PROP_CANNOT_DECODE);
2587 		}
2588 	}
2589 
2590 	/*
2591 	 * Allocated memory to store the decoded value in.
2592 	 */
2593 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2594 
2595 	ddi_prop_reset_pos(ph);
2596 
2597 	/*
2598 	 * Decode the str and place it in the space we just allocated
2599 	 */
2600 	tmp = str;
2601 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2602 	if (i < DDI_PROP_RESULT_OK) {
2603 		/*
2604 		 * Free the space we just allocated
2605 		 * and return an error.
2606 		 */
2607 		ddi_prop_free(str);
2608 		switch (i) {
2609 		case DDI_PROP_RESULT_EOF:
2610 			return (DDI_PROP_END_OF_DATA);
2611 
2612 		case DDI_PROP_RESULT_ERROR:
2613 			return (DDI_PROP_CANNOT_DECODE);
2614 		}
2615 	}
2616 
2617 	*(char **)data = str;
2618 	*nelements = 1;
2619 
2620 	return (DDI_PROP_SUCCESS);
2621 }
2622 
2623 /*
2624  * Decode an array of strings.
2625  */
2626 int
2627 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2628 {
2629 	int		cnt = 0;
2630 	char		**strs;
2631 	char		**tmp;
2632 	char		*ptr;
2633 	int		i;
2634 	int		n;
2635 	int		size;
2636 	size_t		nbytes;
2637 
2638 	/*
2639 	 * Figure out how many array elements there are by going through the
2640 	 * data without decoding it first and counting.
2641 	 */
2642 	for (;;) {
2643 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2644 		if (i < 0)
2645 			break;
2646 		cnt++;
2647 	}
2648 
2649 	/*
2650 	 * If there are no elements return an error
2651 	 */
2652 	if (cnt == 0)
2653 		return (DDI_PROP_END_OF_DATA);
2654 
2655 	/*
2656 	 * If we cannot skip through the data, we cannot decode it
2657 	 */
2658 	if (i == DDI_PROP_RESULT_ERROR)
2659 		return (DDI_PROP_CANNOT_DECODE);
2660 
2661 	/*
2662 	 * Reset the data pointer to the beginning of the encoded data
2663 	 */
2664 	ddi_prop_reset_pos(ph);
2665 
2666 	/*
2667 	 * Figure out how much memory we need for the sum total
2668 	 */
2669 	nbytes = (cnt + 1) * sizeof (char *);
2670 
2671 	for (n = 0; n < cnt; n++) {
2672 		/*
2673 		 * Get the decoded size of the current encoded string.
2674 		 */
2675 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2676 		if (size < DDI_PROP_RESULT_OK) {
2677 			switch (size) {
2678 			case DDI_PROP_RESULT_EOF:
2679 				return (DDI_PROP_END_OF_DATA);
2680 
2681 			case DDI_PROP_RESULT_ERROR:
2682 				return (DDI_PROP_CANNOT_DECODE);
2683 			}
2684 		}
2685 
2686 		nbytes += size;
2687 	}
2688 
2689 	/*
2690 	 * Allocate memory in which to store the decoded strings.
2691 	 */
2692 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2693 
2694 	/*
2695 	 * Set up pointers for each string by figuring out yet
2696 	 * again how long each string is.
2697 	 */
2698 	ddi_prop_reset_pos(ph);
2699 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2700 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2701 		/*
2702 		 * Get the decoded size of the current encoded string.
2703 		 */
2704 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2705 		if (size < DDI_PROP_RESULT_OK) {
2706 			ddi_prop_free(strs);
2707 			switch (size) {
2708 			case DDI_PROP_RESULT_EOF:
2709 				return (DDI_PROP_END_OF_DATA);
2710 
2711 			case DDI_PROP_RESULT_ERROR:
2712 				return (DDI_PROP_CANNOT_DECODE);
2713 			}
2714 		}
2715 
2716 		*tmp = ptr;
2717 		ptr += size;
2718 	}
2719 
2720 	/*
2721 	 * String array is terminated by a NULL
2722 	 */
2723 	*tmp = NULL;
2724 
2725 	/*
2726 	 * Finally, we can decode each string
2727 	 */
2728 	ddi_prop_reset_pos(ph);
2729 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2730 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2731 		if (i < DDI_PROP_RESULT_OK) {
2732 			/*
2733 			 * Free the space we just allocated
2734 			 * and return an error
2735 			 */
2736 			ddi_prop_free(strs);
2737 			switch (i) {
2738 			case DDI_PROP_RESULT_EOF:
2739 				return (DDI_PROP_END_OF_DATA);
2740 
2741 			case DDI_PROP_RESULT_ERROR:
2742 				return (DDI_PROP_CANNOT_DECODE);
2743 			}
2744 		}
2745 	}
2746 
2747 	*(char ***)data = strs;
2748 	*nelements = cnt;
2749 
2750 	return (DDI_PROP_SUCCESS);
2751 }
2752 
2753 /*
2754  * Encode a string.
2755  */
2756 int
2757 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2758 {
2759 	char		**tmp;
2760 	int		size;
2761 	int		i;
2762 
2763 	/*
2764 	 * If there is no data, we cannot do anything
2765 	 */
2766 	if (nelements == 0)
2767 		return (DDI_PROP_CANNOT_ENCODE);
2768 
2769 	/*
2770 	 * Get the size of the encoded string.
2771 	 */
2772 	tmp = (char **)data;
2773 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2774 	if (size < DDI_PROP_RESULT_OK) {
2775 		switch (size) {
2776 		case DDI_PROP_RESULT_EOF:
2777 			return (DDI_PROP_END_OF_DATA);
2778 
2779 		case DDI_PROP_RESULT_ERROR:
2780 			return (DDI_PROP_CANNOT_ENCODE);
2781 		}
2782 	}
2783 
2784 	/*
2785 	 * Allocate space in the handle to store the encoded string.
2786 	 */
2787 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2788 		return (DDI_PROP_NO_MEMORY);
2789 
2790 	ddi_prop_reset_pos(ph);
2791 
2792 	/*
2793 	 * Encode the string.
2794 	 */
2795 	tmp = (char **)data;
2796 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2797 	if (i < DDI_PROP_RESULT_OK) {
2798 		switch (i) {
2799 		case DDI_PROP_RESULT_EOF:
2800 			return (DDI_PROP_END_OF_DATA);
2801 
2802 		case DDI_PROP_RESULT_ERROR:
2803 			return (DDI_PROP_CANNOT_ENCODE);
2804 		}
2805 	}
2806 
2807 	return (DDI_PROP_SUCCESS);
2808 }
2809 
2810 
2811 /*
2812  * Encode an array of strings.
2813  */
2814 int
2815 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2816 {
2817 	int		cnt = 0;
2818 	char		**tmp;
2819 	int		size;
2820 	uint_t		total_size;
2821 	int		i;
2822 
2823 	/*
2824 	 * If there is no data, we cannot do anything
2825 	 */
2826 	if (nelements == 0)
2827 		return (DDI_PROP_CANNOT_ENCODE);
2828 
2829 	/*
2830 	 * Get the total size required to encode all the strings.
2831 	 */
2832 	total_size = 0;
2833 	tmp = (char **)data;
2834 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2835 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2836 		if (size < DDI_PROP_RESULT_OK) {
2837 			switch (size) {
2838 			case DDI_PROP_RESULT_EOF:
2839 				return (DDI_PROP_END_OF_DATA);
2840 
2841 			case DDI_PROP_RESULT_ERROR:
2842 				return (DDI_PROP_CANNOT_ENCODE);
2843 			}
2844 		}
2845 		total_size += (uint_t)size;
2846 	}
2847 
2848 	/*
2849 	 * Allocate space in the handle to store the encoded strings.
2850 	 */
2851 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2852 		return (DDI_PROP_NO_MEMORY);
2853 
2854 	ddi_prop_reset_pos(ph);
2855 
2856 	/*
2857 	 * Encode the array of strings.
2858 	 */
2859 	tmp = (char **)data;
2860 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2861 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2862 		if (i < DDI_PROP_RESULT_OK) {
2863 			switch (i) {
2864 			case DDI_PROP_RESULT_EOF:
2865 				return (DDI_PROP_END_OF_DATA);
2866 
2867 			case DDI_PROP_RESULT_ERROR:
2868 				return (DDI_PROP_CANNOT_ENCODE);
2869 			}
2870 		}
2871 	}
2872 
2873 	return (DDI_PROP_SUCCESS);
2874 }
2875 
2876 
2877 /*
2878  * Decode an array of bytes.
2879  */
2880 static int
2881 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2882 {
2883 	uchar_t		*tmp;
2884 	int		nbytes;
2885 	int		i;
2886 
2887 	/*
2888 	 * If there are no elements return an error
2889 	 */
2890 	if (ph->ph_size == 0)
2891 		return (DDI_PROP_END_OF_DATA);
2892 
2893 	/*
2894 	 * Get the size of the encoded array of bytes.
2895 	 */
2896 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2897 		data, ph->ph_size);
2898 	if (nbytes < DDI_PROP_RESULT_OK) {
2899 		switch (nbytes) {
2900 		case DDI_PROP_RESULT_EOF:
2901 			return (DDI_PROP_END_OF_DATA);
2902 
2903 		case DDI_PROP_RESULT_ERROR:
2904 			return (DDI_PROP_CANNOT_DECODE);
2905 		}
2906 	}
2907 
2908 	/*
2909 	 * Allocated memory to store the decoded value in.
2910 	 */
2911 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2912 
2913 	/*
2914 	 * Decode each element and place it in the space we just allocated
2915 	 */
2916 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2917 	if (i < DDI_PROP_RESULT_OK) {
2918 		/*
2919 		 * Free the space we just allocated
2920 		 * and return an error
2921 		 */
2922 		ddi_prop_free(tmp);
2923 		switch (i) {
2924 		case DDI_PROP_RESULT_EOF:
2925 			return (DDI_PROP_END_OF_DATA);
2926 
2927 		case DDI_PROP_RESULT_ERROR:
2928 			return (DDI_PROP_CANNOT_DECODE);
2929 		}
2930 	}
2931 
2932 	*(uchar_t **)data = tmp;
2933 	*nelements = nbytes;
2934 
2935 	return (DDI_PROP_SUCCESS);
2936 }
2937 
2938 /*
2939  * Encode an array of bytes.
2940  */
2941 int
2942 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2943 {
2944 	int		size;
2945 	int		i;
2946 
2947 	/*
2948 	 * If there are no elements, then this is a boolean property,
2949 	 * so just create a property handle with no data and return.
2950 	 */
2951 	if (nelements == 0) {
2952 		(void) ddi_prop_encode_alloc(ph, 0);
2953 		return (DDI_PROP_SUCCESS);
2954 	}
2955 
2956 	/*
2957 	 * Get the size of the encoded array of bytes.
2958 	 */
2959 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2960 		nelements);
2961 	if (size < DDI_PROP_RESULT_OK) {
2962 		switch (size) {
2963 		case DDI_PROP_RESULT_EOF:
2964 			return (DDI_PROP_END_OF_DATA);
2965 
2966 		case DDI_PROP_RESULT_ERROR:
2967 			return (DDI_PROP_CANNOT_DECODE);
2968 		}
2969 	}
2970 
2971 	/*
2972 	 * Allocate space in the handle to store the encoded bytes.
2973 	 */
2974 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2975 		return (DDI_PROP_NO_MEMORY);
2976 
2977 	/*
2978 	 * Encode the array of bytes.
2979 	 */
2980 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2981 		nelements);
2982 	if (i < DDI_PROP_RESULT_OK) {
2983 		switch (i) {
2984 		case DDI_PROP_RESULT_EOF:
2985 			return (DDI_PROP_END_OF_DATA);
2986 
2987 		case DDI_PROP_RESULT_ERROR:
2988 			return (DDI_PROP_CANNOT_ENCODE);
2989 		}
2990 	}
2991 
2992 	return (DDI_PROP_SUCCESS);
2993 }
2994 
2995 /*
2996  * OBP 1275 integer, string and byte operators.
2997  *
2998  * DDI_PROP_CMD_DECODE:
2999  *
3000  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3001  *	DDI_PROP_RESULT_EOF:		end of data
3002  *	DDI_PROP_OK:			data was decoded
3003  *
3004  * DDI_PROP_CMD_ENCODE:
3005  *
3006  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3007  *	DDI_PROP_RESULT_EOF:		end of data
3008  *	DDI_PROP_OK:			data was encoded
3009  *
3010  * DDI_PROP_CMD_SKIP:
3011  *
3012  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3013  *	DDI_PROP_RESULT_EOF:		end of data
3014  *	DDI_PROP_OK:			data was skipped
3015  *
3016  * DDI_PROP_CMD_GET_ESIZE:
3017  *
3018  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3019  *	DDI_PROP_RESULT_EOF:		end of data
3020  *	> 0:				the encoded size
3021  *
3022  * DDI_PROP_CMD_GET_DSIZE:
3023  *
3024  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3025  *	DDI_PROP_RESULT_EOF:		end of data
3026  *	> 0:				the decoded size
3027  */
3028 
3029 /*
3030  * OBP 1275 integer operator
3031  *
3032  * OBP properties are a byte stream of data, so integers may not be
3033  * properly aligned.  Therefore we need to copy them one byte at a time.
3034  */
3035 int
3036 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3037 {
3038 	int	i;
3039 
3040 	switch (cmd) {
3041 	case DDI_PROP_CMD_DECODE:
3042 		/*
3043 		 * Check that there is encoded data
3044 		 */
3045 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3046 			return (DDI_PROP_RESULT_ERROR);
3047 		if (ph->ph_flags & PH_FROM_PROM) {
3048 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3049 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3050 				ph->ph_size - i))
3051 				return (DDI_PROP_RESULT_ERROR);
3052 		} else {
3053 			if (ph->ph_size < sizeof (int) ||
3054 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3055 				ph->ph_size - sizeof (int))))
3056 			return (DDI_PROP_RESULT_ERROR);
3057 		}
3058 
3059 		/*
3060 		 * Copy the integer, using the implementation-specific
3061 		 * copy function if the property is coming from the PROM.
3062 		 */
3063 		if (ph->ph_flags & PH_FROM_PROM) {
3064 			*data = impl_ddi_prop_int_from_prom(
3065 				(uchar_t *)ph->ph_cur_pos,
3066 				(ph->ph_size < PROP_1275_INT_SIZE) ?
3067 				ph->ph_size : PROP_1275_INT_SIZE);
3068 		} else {
3069 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3070 		}
3071 
3072 		/*
3073 		 * Move the current location to the start of the next
3074 		 * bit of undecoded data.
3075 		 */
3076 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3077 			PROP_1275_INT_SIZE;
3078 		return (DDI_PROP_RESULT_OK);
3079 
3080 	case DDI_PROP_CMD_ENCODE:
3081 		/*
3082 		 * Check that there is room to encoded the data
3083 		 */
3084 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3085 			ph->ph_size < PROP_1275_INT_SIZE ||
3086 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3087 				ph->ph_size - sizeof (int))))
3088 			return (DDI_PROP_RESULT_ERROR);
3089 
3090 		/*
3091 		 * Encode the integer into the byte stream one byte at a
3092 		 * time.
3093 		 */
3094 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3095 
3096 		/*
3097 		 * Move the current location to the start of the next bit of
3098 		 * space where we can store encoded data.
3099 		 */
3100 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3101 		return (DDI_PROP_RESULT_OK);
3102 
3103 	case DDI_PROP_CMD_SKIP:
3104 		/*
3105 		 * Check that there is encoded data
3106 		 */
3107 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3108 				ph->ph_size < PROP_1275_INT_SIZE)
3109 			return (DDI_PROP_RESULT_ERROR);
3110 
3111 
3112 		if ((caddr_t)ph->ph_cur_pos ==
3113 				(caddr_t)ph->ph_data + ph->ph_size) {
3114 			return (DDI_PROP_RESULT_EOF);
3115 		} else if ((caddr_t)ph->ph_cur_pos >
3116 				(caddr_t)ph->ph_data + ph->ph_size) {
3117 			return (DDI_PROP_RESULT_EOF);
3118 		}
3119 
3120 		/*
3121 		 * Move the current location to the start of the next bit of
3122 		 * undecoded data.
3123 		 */
3124 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3125 		return (DDI_PROP_RESULT_OK);
3126 
3127 	case DDI_PROP_CMD_GET_ESIZE:
3128 		/*
3129 		 * Return the size of an encoded integer on OBP
3130 		 */
3131 		return (PROP_1275_INT_SIZE);
3132 
3133 	case DDI_PROP_CMD_GET_DSIZE:
3134 		/*
3135 		 * Return the size of a decoded integer on the system.
3136 		 */
3137 		return (sizeof (int));
3138 
3139 	default:
3140 #ifdef DEBUG
3141 		panic("ddi_prop_1275_int: %x impossible", cmd);
3142 		/*NOTREACHED*/
3143 #else
3144 		return (DDI_PROP_RESULT_ERROR);
3145 #endif	/* DEBUG */
3146 	}
3147 }
3148 
3149 /*
3150  * 64 bit integer operator.
3151  *
3152  * This is an extension, defined by Sun, to the 1275 integer
3153  * operator.  This routine handles the encoding/decoding of
3154  * 64 bit integer properties.
3155  */
3156 int
3157 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3158 {
3159 
3160 	switch (cmd) {
3161 	case DDI_PROP_CMD_DECODE:
3162 		/*
3163 		 * Check that there is encoded data
3164 		 */
3165 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3166 			return (DDI_PROP_RESULT_ERROR);
3167 		if (ph->ph_flags & PH_FROM_PROM) {
3168 			return (DDI_PROP_RESULT_ERROR);
3169 		} else {
3170 			if (ph->ph_size < sizeof (int64_t) ||
3171 			    ((int64_t *)ph->ph_cur_pos >
3172 			    ((int64_t *)ph->ph_data +
3173 			    ph->ph_size - sizeof (int64_t))))
3174 				return (DDI_PROP_RESULT_ERROR);
3175 		}
3176 		/*
3177 		 * Copy the integer, using the implementation-specific
3178 		 * copy function if the property is coming from the PROM.
3179 		 */
3180 		if (ph->ph_flags & PH_FROM_PROM) {
3181 			return (DDI_PROP_RESULT_ERROR);
3182 		} else {
3183 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3184 		}
3185 
3186 		/*
3187 		 * Move the current location to the start of the next
3188 		 * bit of undecoded data.
3189 		 */
3190 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3191 		    sizeof (int64_t);
3192 			return (DDI_PROP_RESULT_OK);
3193 
3194 	case DDI_PROP_CMD_ENCODE:
3195 		/*
3196 		 * Check that there is room to encoded the data
3197 		 */
3198 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3199 		    ph->ph_size < sizeof (int64_t) ||
3200 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3201 		    ph->ph_size - sizeof (int64_t))))
3202 			return (DDI_PROP_RESULT_ERROR);
3203 
3204 		/*
3205 		 * Encode the integer into the byte stream one byte at a
3206 		 * time.
3207 		 */
3208 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3209 
3210 		/*
3211 		 * Move the current location to the start of the next bit of
3212 		 * space where we can store encoded data.
3213 		 */
3214 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3215 		    sizeof (int64_t);
3216 		return (DDI_PROP_RESULT_OK);
3217 
3218 	case DDI_PROP_CMD_SKIP:
3219 		/*
3220 		 * Check that there is encoded data
3221 		 */
3222 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3223 		    ph->ph_size < sizeof (int64_t))
3224 			return (DDI_PROP_RESULT_ERROR);
3225 
3226 		if ((caddr_t)ph->ph_cur_pos ==
3227 		    (caddr_t)ph->ph_data + ph->ph_size) {
3228 			return (DDI_PROP_RESULT_EOF);
3229 		} else if ((caddr_t)ph->ph_cur_pos >
3230 		    (caddr_t)ph->ph_data + ph->ph_size) {
3231 			return (DDI_PROP_RESULT_EOF);
3232 		}
3233 
3234 		/*
3235 		 * Move the current location to the start of
3236 		 * the next bit of undecoded data.
3237 		 */
3238 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3239 		    sizeof (int64_t);
3240 			return (DDI_PROP_RESULT_OK);
3241 
3242 	case DDI_PROP_CMD_GET_ESIZE:
3243 		/*
3244 		 * Return the size of an encoded integer on OBP
3245 		 */
3246 		return (sizeof (int64_t));
3247 
3248 	case DDI_PROP_CMD_GET_DSIZE:
3249 		/*
3250 		 * Return the size of a decoded integer on the system.
3251 		 */
3252 		return (sizeof (int64_t));
3253 
3254 	default:
3255 #ifdef DEBUG
3256 		panic("ddi_prop_int64_op: %x impossible", cmd);
3257 		/*NOTREACHED*/
3258 #else
3259 		return (DDI_PROP_RESULT_ERROR);
3260 #endif  /* DEBUG */
3261 	}
3262 }
3263 
3264 /*
3265  * OBP 1275 string operator.
3266  *
3267  * OBP strings are NULL terminated.
3268  */
3269 int
3270 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3271 {
3272 	int	n;
3273 	char	*p;
3274 	char	*end;
3275 
3276 	switch (cmd) {
3277 	case DDI_PROP_CMD_DECODE:
3278 		/*
3279 		 * Check that there is encoded data
3280 		 */
3281 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3282 			return (DDI_PROP_RESULT_ERROR);
3283 		}
3284 
3285 		n = strlen((char *)ph->ph_cur_pos) + 1;
3286 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3287 				ph->ph_size - n)) {
3288 			return (DDI_PROP_RESULT_ERROR);
3289 		}
3290 
3291 		/*
3292 		 * Copy the NULL terminated string
3293 		 */
3294 		bcopy(ph->ph_cur_pos, data, n);
3295 
3296 		/*
3297 		 * Move the current location to the start of the next bit of
3298 		 * undecoded data.
3299 		 */
3300 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3301 		return (DDI_PROP_RESULT_OK);
3302 
3303 	case DDI_PROP_CMD_ENCODE:
3304 		/*
3305 		 * Check that there is room to encoded the data
3306 		 */
3307 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3308 			return (DDI_PROP_RESULT_ERROR);
3309 		}
3310 
3311 		n = strlen(data) + 1;
3312 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3313 				ph->ph_size - n)) {
3314 			return (DDI_PROP_RESULT_ERROR);
3315 		}
3316 
3317 		/*
3318 		 * Copy the NULL terminated string
3319 		 */
3320 		bcopy(data, ph->ph_cur_pos, n);
3321 
3322 		/*
3323 		 * Move the current location to the start of the next bit of
3324 		 * space where we can store encoded data.
3325 		 */
3326 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3327 		return (DDI_PROP_RESULT_OK);
3328 
3329 	case DDI_PROP_CMD_SKIP:
3330 		/*
3331 		 * Check that there is encoded data
3332 		 */
3333 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3334 			return (DDI_PROP_RESULT_ERROR);
3335 		}
3336 
3337 		/*
3338 		 * Return the string length plus one for the NULL
3339 		 * We know the size of the property, we need to
3340 		 * ensure that the string is properly formatted,
3341 		 * since we may be looking up random OBP data.
3342 		 */
3343 		p = (char *)ph->ph_cur_pos;
3344 		end = (char *)ph->ph_data + ph->ph_size;
3345 
3346 		if (p == end) {
3347 			return (DDI_PROP_RESULT_EOF);
3348 		}
3349 
3350 		for (n = 0; p < end; n++) {
3351 			if (*p++ == 0) {
3352 				ph->ph_cur_pos = p;
3353 				return (DDI_PROP_RESULT_OK);
3354 			}
3355 		}
3356 
3357 		return (DDI_PROP_RESULT_ERROR);
3358 
3359 	case DDI_PROP_CMD_GET_ESIZE:
3360 		/*
3361 		 * Return the size of the encoded string on OBP.
3362 		 */
3363 		return (strlen(data) + 1);
3364 
3365 	case DDI_PROP_CMD_GET_DSIZE:
3366 		/*
3367 		 * Return the string length plus one for the NULL
3368 		 * We know the size of the property, we need to
3369 		 * ensure that the string is properly formatted,
3370 		 * since we may be looking up random OBP data.
3371 		 */
3372 		p = (char *)ph->ph_cur_pos;
3373 		end = (char *)ph->ph_data + ph->ph_size;
3374 		for (n = 0; p < end; n++) {
3375 			if (*p++ == 0) {
3376 				ph->ph_cur_pos = p;
3377 				return (n+1);
3378 			}
3379 		}
3380 		return (DDI_PROP_RESULT_ERROR);
3381 
3382 	default:
3383 #ifdef DEBUG
3384 		panic("ddi_prop_1275_string: %x impossible", cmd);
3385 		/*NOTREACHED*/
3386 #else
3387 		return (DDI_PROP_RESULT_ERROR);
3388 #endif	/* DEBUG */
3389 	}
3390 }
3391 
3392 /*
3393  * OBP 1275 byte operator
3394  *
3395  * Caller must specify the number of bytes to get.  OBP encodes bytes
3396  * as a byte so there is a 1-to-1 translation.
3397  */
3398 int
3399 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3400 	uint_t nelements)
3401 {
3402 	switch (cmd) {
3403 	case DDI_PROP_CMD_DECODE:
3404 		/*
3405 		 * Check that there is encoded data
3406 		 */
3407 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3408 			ph->ph_size < nelements ||
3409 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3410 				ph->ph_size - nelements)))
3411 			return (DDI_PROP_RESULT_ERROR);
3412 
3413 		/*
3414 		 * Copy out the bytes
3415 		 */
3416 		bcopy(ph->ph_cur_pos, data, nelements);
3417 
3418 		/*
3419 		 * Move the current location
3420 		 */
3421 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3422 		return (DDI_PROP_RESULT_OK);
3423 
3424 	case DDI_PROP_CMD_ENCODE:
3425 		/*
3426 		 * Check that there is room to encode the data
3427 		 */
3428 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3429 			ph->ph_size < nelements ||
3430 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3431 				ph->ph_size - nelements)))
3432 			return (DDI_PROP_RESULT_ERROR);
3433 
3434 		/*
3435 		 * Copy in the bytes
3436 		 */
3437 		bcopy(data, ph->ph_cur_pos, nelements);
3438 
3439 		/*
3440 		 * Move the current location to the start of the next bit of
3441 		 * space where we can store encoded data.
3442 		 */
3443 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3444 		return (DDI_PROP_RESULT_OK);
3445 
3446 	case DDI_PROP_CMD_SKIP:
3447 		/*
3448 		 * Check that there is encoded data
3449 		 */
3450 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3451 				ph->ph_size < nelements)
3452 			return (DDI_PROP_RESULT_ERROR);
3453 
3454 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3455 				ph->ph_size - nelements))
3456 			return (DDI_PROP_RESULT_EOF);
3457 
3458 		/*
3459 		 * Move the current location
3460 		 */
3461 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3462 		return (DDI_PROP_RESULT_OK);
3463 
3464 	case DDI_PROP_CMD_GET_ESIZE:
3465 		/*
3466 		 * The size in bytes of the encoded size is the
3467 		 * same as the decoded size provided by the caller.
3468 		 */
3469 		return (nelements);
3470 
3471 	case DDI_PROP_CMD_GET_DSIZE:
3472 		/*
3473 		 * Just return the number of bytes specified by the caller.
3474 		 */
3475 		return (nelements);
3476 
3477 	default:
3478 #ifdef DEBUG
3479 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3480 		/*NOTREACHED*/
3481 #else
3482 		return (DDI_PROP_RESULT_ERROR);
3483 #endif	/* DEBUG */
3484 	}
3485 }
3486 
3487 /*
3488  * Used for properties that come from the OBP, hardware configuration files,
3489  * or that are created by calls to ddi_prop_update(9F).
3490  */
3491 static struct prop_handle_ops prop_1275_ops = {
3492 	ddi_prop_1275_int,
3493 	ddi_prop_1275_string,
3494 	ddi_prop_1275_bytes,
3495 	ddi_prop_int64_op
3496 };
3497 
3498 
3499 /*
3500  * Interface to create/modify a managed property on child's behalf...
3501  * Flags interpreted are:
3502  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3503  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3504  *
3505  * Use same dev_t when modifying or undefining a property.
3506  * Search for properties with DDI_DEV_T_ANY to match first named
3507  * property on the list.
3508  *
3509  * Properties are stored LIFO and subsequently will match the first
3510  * `matching' instance.
3511  */
3512 
3513 /*
3514  * ddi_prop_add:	Add a software defined property
3515  */
3516 
3517 /*
3518  * define to get a new ddi_prop_t.
3519  * km_flags are KM_SLEEP or KM_NOSLEEP.
3520  */
3521 
3522 #define	DDI_NEW_PROP_T(km_flags)	\
3523 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3524 
3525 static int
3526 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3527     char *name, caddr_t value, int length)
3528 {
3529 	ddi_prop_t	*new_propp, *propp;
3530 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3531 	int		km_flags = KM_NOSLEEP;
3532 	int		name_buf_len;
3533 
3534 	/*
3535 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3536 	 */
3537 
3538 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3539 		return (DDI_PROP_INVAL_ARG);
3540 
3541 	if (flags & DDI_PROP_CANSLEEP)
3542 		km_flags = KM_SLEEP;
3543 
3544 	if (flags & DDI_PROP_SYSTEM_DEF)
3545 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3546 	else if (flags & DDI_PROP_HW_DEF)
3547 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3548 
3549 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3550 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3551 		return (DDI_PROP_NO_MEMORY);
3552 	}
3553 
3554 	/*
3555 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3556 	 * to get the real major number for the device.  This needs to be
3557 	 * done because some drivers need to call ddi_prop_create in their
3558 	 * attach routines but they don't have a dev.  By creating the dev
3559 	 * ourself if the major number is 0, drivers will not have to know what
3560 	 * their major number.	They can just create a dev with major number
3561 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3562 	 * work by recreating the same dev that we already have, but its the
3563 	 * price you pay :-).
3564 	 *
3565 	 * This fixes bug #1098060.
3566 	 */
3567 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3568 		new_propp->prop_dev =
3569 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3570 		    getminor(dev));
3571 	} else
3572 		new_propp->prop_dev = dev;
3573 
3574 	/*
3575 	 * Allocate space for property name and copy it in...
3576 	 */
3577 
3578 	name_buf_len = strlen(name) + 1;
3579 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3580 	if (new_propp->prop_name == 0)	{
3581 		kmem_free(new_propp, sizeof (ddi_prop_t));
3582 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3583 		return (DDI_PROP_NO_MEMORY);
3584 	}
3585 	bcopy(name, new_propp->prop_name, name_buf_len);
3586 
3587 	/*
3588 	 * Set the property type
3589 	 */
3590 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3591 
3592 	/*
3593 	 * Set length and value ONLY if not an explicit property undefine:
3594 	 * NOTE: value and length are zero for explicit undefines.
3595 	 */
3596 
3597 	if (flags & DDI_PROP_UNDEF_IT) {
3598 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3599 	} else {
3600 		if ((new_propp->prop_len = length) != 0) {
3601 			new_propp->prop_val = kmem_alloc(length, km_flags);
3602 			if (new_propp->prop_val == 0)  {
3603 				kmem_free(new_propp->prop_name, name_buf_len);
3604 				kmem_free(new_propp, sizeof (ddi_prop_t));
3605 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3606 				return (DDI_PROP_NO_MEMORY);
3607 			}
3608 			bcopy(value, new_propp->prop_val, length);
3609 		}
3610 	}
3611 
3612 	/*
3613 	 * Link property into beginning of list. (Properties are LIFO order.)
3614 	 */
3615 
3616 	mutex_enter(&(DEVI(dip)->devi_lock));
3617 	propp = *list_head;
3618 	new_propp->prop_next = propp;
3619 	*list_head = new_propp;
3620 	mutex_exit(&(DEVI(dip)->devi_lock));
3621 	return (DDI_PROP_SUCCESS);
3622 }
3623 
3624 
3625 /*
3626  * ddi_prop_change:	Modify a software managed property value
3627  *
3628  *			Set new length and value if found.
3629  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3630  *			input name is the NULL string.
3631  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3632  *
3633  *			Note: an undef can be modified to be a define,
3634  *			(you can't go the other way.)
3635  */
3636 
3637 static int
3638 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3639     char *name, caddr_t value, int length)
3640 {
3641 	ddi_prop_t	*propp;
3642 	ddi_prop_t	**ppropp;
3643 	caddr_t		p = NULL;
3644 
3645 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3646 		return (DDI_PROP_INVAL_ARG);
3647 
3648 	/*
3649 	 * Preallocate buffer, even if we don't need it...
3650 	 */
3651 	if (length != 0)  {
3652 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3653 		    KM_SLEEP : KM_NOSLEEP);
3654 		if (p == NULL)	{
3655 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3656 			return (DDI_PROP_NO_MEMORY);
3657 		}
3658 	}
3659 
3660 	/*
3661 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3662 	 * number, a real dev_t value should be created based upon the dip's
3663 	 * binding driver.  See ddi_prop_add...
3664 	 */
3665 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3666 		dev = makedevice(
3667 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3668 		    getminor(dev));
3669 
3670 	/*
3671 	 * Check to see if the property exists.  If so we modify it.
3672 	 * Else we create it by calling ddi_prop_add().
3673 	 */
3674 	mutex_enter(&(DEVI(dip)->devi_lock));
3675 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3676 	if (flags & DDI_PROP_SYSTEM_DEF)
3677 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3678 	else if (flags & DDI_PROP_HW_DEF)
3679 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3680 
3681 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3682 		/*
3683 		 * Need to reallocate buffer?  If so, do it
3684 		 * carefully (reuse same space if new prop
3685 		 * is same size and non-NULL sized).
3686 		 */
3687 		if (length != 0)
3688 			bcopy(value, p, length);
3689 
3690 		if (propp->prop_len != 0)
3691 			kmem_free(propp->prop_val, propp->prop_len);
3692 
3693 		propp->prop_len = length;
3694 		propp->prop_val = p;
3695 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3696 		mutex_exit(&(DEVI(dip)->devi_lock));
3697 		return (DDI_PROP_SUCCESS);
3698 	}
3699 
3700 	mutex_exit(&(DEVI(dip)->devi_lock));
3701 	if (length != 0)
3702 		kmem_free(p, length);
3703 
3704 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3705 }
3706 
3707 /*
3708  * Common update routine used to update and encode a property.	Creates
3709  * a property handle, calls the property encode routine, figures out if
3710  * the property already exists and updates if it does.	Otherwise it
3711  * creates if it does not exist.
3712  */
3713 int
3714 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3715     char *name, void *data, uint_t nelements,
3716     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3717 {
3718 	prop_handle_t	ph;
3719 	int		rval;
3720 	uint_t		ourflags;
3721 
3722 	/*
3723 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3724 	 * return error.
3725 	 */
3726 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3727 		return (DDI_PROP_INVAL_ARG);
3728 
3729 	/*
3730 	 * Create the handle
3731 	 */
3732 	ph.ph_data = NULL;
3733 	ph.ph_cur_pos = NULL;
3734 	ph.ph_save_pos = NULL;
3735 	ph.ph_size = 0;
3736 	ph.ph_ops = &prop_1275_ops;
3737 
3738 	/*
3739 	 * ourflags:
3740 	 * For compatibility with the old interfaces.  The old interfaces
3741 	 * didn't sleep by default and slept when the flag was set.  These
3742 	 * interfaces to the opposite.	So the old interfaces now set the
3743 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3744 	 *
3745 	 * ph.ph_flags:
3746 	 * Blocked data or unblocked data allocation
3747 	 * for ph.ph_data in ddi_prop_encode_alloc()
3748 	 */
3749 	if (flags & DDI_PROP_DONTSLEEP) {
3750 		ourflags = flags;
3751 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3752 	} else {
3753 		ourflags = flags | DDI_PROP_CANSLEEP;
3754 		ph.ph_flags = DDI_PROP_CANSLEEP;
3755 	}
3756 
3757 	/*
3758 	 * Encode the data and store it in the property handle by
3759 	 * calling the prop_encode routine.
3760 	 */
3761 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3762 	    DDI_PROP_SUCCESS) {
3763 		if (rval == DDI_PROP_NO_MEMORY)
3764 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3765 		if (ph.ph_size != 0)
3766 			kmem_free(ph.ph_data, ph.ph_size);
3767 		return (rval);
3768 	}
3769 
3770 	/*
3771 	 * The old interfaces use a stacking approach to creating
3772 	 * properties.	If we are being called from the old interfaces,
3773 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3774 	 * create without checking.
3775 	 */
3776 	if (flags & DDI_PROP_STACK_CREATE) {
3777 		rval = ddi_prop_add(match_dev, dip,
3778 		    ourflags, name, ph.ph_data, ph.ph_size);
3779 	} else {
3780 		rval = ddi_prop_change(match_dev, dip,
3781 		    ourflags, name, ph.ph_data, ph.ph_size);
3782 	}
3783 
3784 	/*
3785 	 * Free the encoded data allocated in the prop_encode routine.
3786 	 */
3787 	if (ph.ph_size != 0)
3788 		kmem_free(ph.ph_data, ph.ph_size);
3789 
3790 	return (rval);
3791 }
3792 
3793 
3794 /*
3795  * ddi_prop_create:	Define a managed property:
3796  *			See above for details.
3797  */
3798 
3799 int
3800 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3801     char *name, caddr_t value, int length)
3802 {
3803 	if (!(flag & DDI_PROP_CANSLEEP)) {
3804 		flag |= DDI_PROP_DONTSLEEP;
3805 #ifdef DDI_PROP_DEBUG
3806 		if (length != 0)
3807 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3808 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3809 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3810 #endif /* DDI_PROP_DEBUG */
3811 	}
3812 	flag &= ~DDI_PROP_SYSTEM_DEF;
3813 	return (ddi_prop_update_common(dev, dip,
3814 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name,
3815 	    value, length, ddi_prop_fm_encode_bytes));
3816 }
3817 
3818 int
3819 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3820     char *name, caddr_t value, int length)
3821 {
3822 	if (!(flag & DDI_PROP_CANSLEEP))
3823 		flag |= DDI_PROP_DONTSLEEP;
3824 	return (ddi_prop_update_common(dev, dip,
3825 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
3826 	    DDI_PROP_TYPE_ANY),
3827 	    name, value, length, ddi_prop_fm_encode_bytes));
3828 }
3829 
3830 int
3831 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3832     char *name, caddr_t value, int length)
3833 {
3834 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3835 
3836 	/*
3837 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3838 	 * return error.
3839 	 */
3840 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3841 		return (DDI_PROP_INVAL_ARG);
3842 
3843 	if (!(flag & DDI_PROP_CANSLEEP))
3844 		flag |= DDI_PROP_DONTSLEEP;
3845 	flag &= ~DDI_PROP_SYSTEM_DEF;
3846 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3847 		return (DDI_PROP_NOT_FOUND);
3848 
3849 	return (ddi_prop_update_common(dev, dip,
3850 	    (flag | DDI_PROP_TYPE_BYTE), name,
3851 	    value, length, ddi_prop_fm_encode_bytes));
3852 }
3853 
3854 int
3855 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3856     char *name, caddr_t value, int length)
3857 {
3858 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3859 
3860 	/*
3861 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3862 	 * return error.
3863 	 */
3864 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3865 		return (DDI_PROP_INVAL_ARG);
3866 
3867 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3868 		return (DDI_PROP_NOT_FOUND);
3869 
3870 	if (!(flag & DDI_PROP_CANSLEEP))
3871 		flag |= DDI_PROP_DONTSLEEP;
3872 	return (ddi_prop_update_common(dev, dip,
3873 		(flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3874 		name, value, length, ddi_prop_fm_encode_bytes));
3875 }
3876 
3877 
3878 /*
3879  * Common lookup routine used to lookup and decode a property.
3880  * Creates a property handle, searches for the raw encoded data,
3881  * fills in the handle, and calls the property decode functions
3882  * passed in.
3883  *
3884  * This routine is not static because ddi_bus_prop_op() which lives in
3885  * ddi_impl.c calls it.  No driver should be calling this routine.
3886  */
3887 int
3888 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3889     uint_t flags, char *name, void *data, uint_t *nelements,
3890     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3891 {
3892 	int		rval;
3893 	uint_t		ourflags;
3894 	prop_handle_t	ph;
3895 
3896 	if ((match_dev == DDI_DEV_T_NONE) ||
3897 	    (name == NULL) || (strlen(name) == 0))
3898 		return (DDI_PROP_INVAL_ARG);
3899 
3900 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3901 		flags | DDI_PROP_CANSLEEP;
3902 
3903 	/*
3904 	 * Get the encoded data
3905 	 */
3906 	bzero(&ph, sizeof (prop_handle_t));
3907 
3908 	if (flags & DDI_UNBND_DLPI2) {
3909 		/*
3910 		 * For unbound dlpi style-2 devices, index into
3911 		 * the devnames' array and search the global
3912 		 * property list.
3913 		 */
3914 		ourflags &= ~DDI_UNBND_DLPI2;
3915 		rval = i_ddi_prop_search_global(match_dev,
3916 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3917 	} else {
3918 		rval = ddi_prop_search_common(match_dev, dip,
3919 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3920 		    &ph.ph_data, &ph.ph_size);
3921 
3922 	}
3923 
3924 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3925 		ASSERT(ph.ph_data == NULL);
3926 		ASSERT(ph.ph_size == 0);
3927 		return (rval);
3928 	}
3929 
3930 	/*
3931 	 * If the encoded data came from a OBP or software
3932 	 * use the 1275 OBP decode/encode routines.
3933 	 */
3934 	ph.ph_cur_pos = ph.ph_data;
3935 	ph.ph_save_pos = ph.ph_data;
3936 	ph.ph_ops = &prop_1275_ops;
3937 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3938 
3939 	rval = (*prop_decoder)(&ph, data, nelements);
3940 
3941 	/*
3942 	 * Free the encoded data
3943 	 */
3944 	if (ph.ph_size != 0)
3945 		kmem_free(ph.ph_data, ph.ph_size);
3946 
3947 	return (rval);
3948 }
3949 
3950 /*
3951  * Lookup and return an array of composite properties.  The driver must
3952  * provide the decode routine.
3953  */
3954 int
3955 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3956     uint_t flags, char *name, void *data, uint_t *nelements,
3957     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3958 {
3959 	return (ddi_prop_lookup_common(match_dev, dip,
3960 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3961 	    data, nelements, prop_decoder));
3962 }
3963 
3964 /*
3965  * Return 1 if a property exists (no type checking done).
3966  * Return 0 if it does not exist.
3967  */
3968 int
3969 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3970 {
3971 	int	i;
3972 	uint_t	x = 0;
3973 
3974 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3975 		flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3976 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3977 }
3978 
3979 
3980 /*
3981  * Update an array of composite properties.  The driver must
3982  * provide the encode routine.
3983  */
3984 int
3985 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3986     char *name, void *data, uint_t nelements,
3987     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3988 {
3989 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3990 	    name, data, nelements, prop_create));
3991 }
3992 
3993 /*
3994  * Get a single integer or boolean property and return it.
3995  * If the property does not exists, or cannot be decoded,
3996  * then return the defvalue passed in.
3997  *
3998  * This routine always succeeds.
3999  */
4000 int
4001 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4002     char *name, int defvalue)
4003 {
4004 	int	data;
4005 	uint_t	nelements;
4006 	int	rval;
4007 
4008 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4009 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4010 #ifdef DEBUG
4011 		if (dip != NULL) {
4012 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4013 			    " 0x%x (prop = %s, node = %s%d)", flags,
4014 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4015 		}
4016 #endif /* DEBUG */
4017 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4018 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4019 	}
4020 
4021 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4022 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4023 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4024 		if (rval == DDI_PROP_END_OF_DATA)
4025 			data = 1;
4026 		else
4027 			data = defvalue;
4028 	}
4029 	return (data);
4030 }
4031 
4032 /*
4033  * Get a single 64 bit integer or boolean property and return it.
4034  * If the property does not exists, or cannot be decoded,
4035  * then return the defvalue passed in.
4036  *
4037  * This routine always succeeds.
4038  */
4039 int64_t
4040 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4041     char *name, int64_t defvalue)
4042 {
4043 	int64_t	data;
4044 	uint_t	nelements;
4045 	int	rval;
4046 
4047 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4048 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4049 #ifdef DEBUG
4050 		if (dip != NULL) {
4051 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4052 			    " 0x%x (prop = %s, node = %s%d)", flags,
4053 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4054 		}
4055 #endif /* DEBUG */
4056 		return (DDI_PROP_INVAL_ARG);
4057 	}
4058 
4059 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4060 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4061 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4062 	    != DDI_PROP_SUCCESS) {
4063 		if (rval == DDI_PROP_END_OF_DATA)
4064 			data = 1;
4065 		else
4066 			data = defvalue;
4067 	}
4068 	return (data);
4069 }
4070 
4071 /*
4072  * Get an array of integer property
4073  */
4074 int
4075 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4076     char *name, int **data, uint_t *nelements)
4077 {
4078 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4079 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4080 #ifdef DEBUG
4081 		if (dip != NULL) {
4082 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4083 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4084 			    flags, name, ddi_driver_name(dip),
4085 			    ddi_get_instance(dip));
4086 		}
4087 #endif /* DEBUG */
4088 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4089 		LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4090 	}
4091 
4092 	return (ddi_prop_lookup_common(match_dev, dip,
4093 	    (flags | DDI_PROP_TYPE_INT), name, data,
4094 	    nelements, ddi_prop_fm_decode_ints));
4095 }
4096 
4097 /*
4098  * Get an array of 64 bit integer properties
4099  */
4100 int
4101 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4102     char *name, int64_t **data, uint_t *nelements)
4103 {
4104 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4105 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4106 #ifdef DEBUG
4107 		if (dip != NULL) {
4108 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4109 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4110 			    flags, name, ddi_driver_name(dip),
4111 			    ddi_get_instance(dip));
4112 		}
4113 #endif /* DEBUG */
4114 		return (DDI_PROP_INVAL_ARG);
4115 	}
4116 
4117 	return (ddi_prop_lookup_common(match_dev, dip,
4118 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4119 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4120 }
4121 
4122 /*
4123  * Update a single integer property.  If the property exists on the drivers
4124  * property list it updates, else it creates it.
4125  */
4126 int
4127 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4128     char *name, int data)
4129 {
4130 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4131 	    name, &data, 1, ddi_prop_fm_encode_ints));
4132 }
4133 
4134 /*
4135  * Update a single 64 bit integer property.
4136  * Update the driver property list if it exists, else create it.
4137  */
4138 int
4139 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4140     char *name, int64_t data)
4141 {
4142 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4143 	    name, &data, 1, ddi_prop_fm_encode_int64));
4144 }
4145 
4146 int
4147 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4148     char *name, int data)
4149 {
4150 	return (ddi_prop_update_common(match_dev, dip,
4151 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4152 	    name, &data, 1, ddi_prop_fm_encode_ints));
4153 }
4154 
4155 int
4156 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4157     char *name, int64_t data)
4158 {
4159 	return (ddi_prop_update_common(match_dev, dip,
4160 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4161 	    name, &data, 1, ddi_prop_fm_encode_int64));
4162 }
4163 
4164 /*
4165  * Update an array of integer property.  If the property exists on the drivers
4166  * property list it updates, else it creates it.
4167  */
4168 int
4169 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4170     char *name, int *data, uint_t nelements)
4171 {
4172 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4173 	    name, data, nelements, ddi_prop_fm_encode_ints));
4174 }
4175 
4176 /*
4177  * Update an array of 64 bit integer properties.
4178  * Update the driver property list if it exists, else create it.
4179  */
4180 int
4181 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4182     char *name, int64_t *data, uint_t nelements)
4183 {
4184 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4185 	    name, data, nelements, ddi_prop_fm_encode_int64));
4186 }
4187 
4188 int
4189 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4190     char *name, int64_t *data, uint_t nelements)
4191 {
4192 	return (ddi_prop_update_common(match_dev, dip,
4193 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4194 	    name, data, nelements, ddi_prop_fm_encode_int64));
4195 }
4196 
4197 int
4198 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4199     char *name, int *data, uint_t nelements)
4200 {
4201 	return (ddi_prop_update_common(match_dev, dip,
4202 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4203 	    name, data, nelements, ddi_prop_fm_encode_ints));
4204 }
4205 
4206 /*
4207  * Get a single string property.
4208  */
4209 int
4210 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4211     char *name, char **data)
4212 {
4213 	uint_t x;
4214 
4215 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4216 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4217 #ifdef DEBUG
4218 		if (dip != NULL) {
4219 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4220 			    "(prop = %s, node = %s%d); invalid bits ignored",
4221 			    "ddi_prop_lookup_string", flags, name,
4222 			    ddi_driver_name(dip), ddi_get_instance(dip));
4223 		}
4224 #endif /* DEBUG */
4225 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4226 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4227 	}
4228 
4229 	return (ddi_prop_lookup_common(match_dev, dip,
4230 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4231 	    &x, ddi_prop_fm_decode_string));
4232 }
4233 
4234 /*
4235  * Get an array of strings property.
4236  */
4237 int
4238 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4239     char *name, char ***data, uint_t *nelements)
4240 {
4241 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4242 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4243 #ifdef DEBUG
4244 		if (dip != NULL) {
4245 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4246 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4247 			    flags, name, ddi_driver_name(dip),
4248 			    ddi_get_instance(dip));
4249 		}
4250 #endif /* DEBUG */
4251 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4252 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4253 	}
4254 
4255 	return (ddi_prop_lookup_common(match_dev, dip,
4256 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4257 	    nelements, ddi_prop_fm_decode_strings));
4258 }
4259 
4260 /*
4261  * Update a single string property.
4262  */
4263 int
4264 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4265     char *name, char *data)
4266 {
4267 	return (ddi_prop_update_common(match_dev, dip,
4268 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4269 	    ddi_prop_fm_encode_string));
4270 }
4271 
4272 int
4273 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4274     char *name, char *data)
4275 {
4276 	return (ddi_prop_update_common(match_dev, dip,
4277 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4278 	    name, &data, 1, ddi_prop_fm_encode_string));
4279 }
4280 
4281 
4282 /*
4283  * Update an array of strings property.
4284  */
4285 int
4286 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4287     char *name, char **data, uint_t nelements)
4288 {
4289 	return (ddi_prop_update_common(match_dev, dip,
4290 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4291 	    ddi_prop_fm_encode_strings));
4292 }
4293 
4294 int
4295 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4296     char *name, char **data, uint_t nelements)
4297 {
4298 	return (ddi_prop_update_common(match_dev, dip,
4299 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4300 	    name, data, nelements,
4301 	    ddi_prop_fm_encode_strings));
4302 }
4303 
4304 
4305 /*
4306  * Get an array of bytes property.
4307  */
4308 int
4309 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4310     char *name, uchar_t **data, uint_t *nelements)
4311 {
4312 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4313 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4314 #ifdef DEBUG
4315 		if (dip != NULL) {
4316 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4317 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4318 			    flags, name, ddi_driver_name(dip),
4319 			    ddi_get_instance(dip));
4320 		}
4321 #endif /* DEBUG */
4322 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4323 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4324 	}
4325 
4326 	return (ddi_prop_lookup_common(match_dev, dip,
4327 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4328 	    nelements, ddi_prop_fm_decode_bytes));
4329 }
4330 
4331 /*
4332  * Update an array of bytes property.
4333  */
4334 int
4335 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4336     char *name, uchar_t *data, uint_t nelements)
4337 {
4338 	if (nelements == 0)
4339 		return (DDI_PROP_INVAL_ARG);
4340 
4341 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4342 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4343 }
4344 
4345 
4346 int
4347 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4348     char *name, uchar_t *data, uint_t nelements)
4349 {
4350 	if (nelements == 0)
4351 		return (DDI_PROP_INVAL_ARG);
4352 
4353 	return (ddi_prop_update_common(match_dev, dip,
4354 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4355 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4356 }
4357 
4358 
4359 /*
4360  * ddi_prop_remove_common:	Undefine a managed property:
4361  *			Input dev_t must match dev_t when defined.
4362  *			Returns DDI_PROP_NOT_FOUND, possibly.
4363  *			DDI_PROP_INVAL_ARG is also possible if dev is
4364  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4365  */
4366 int
4367 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4368 {
4369 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4370 	ddi_prop_t	*propp;
4371 	ddi_prop_t	*lastpropp = NULL;
4372 
4373 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4374 	    (strlen(name) == 0)) {
4375 		return (DDI_PROP_INVAL_ARG);
4376 	}
4377 
4378 	if (flag & DDI_PROP_SYSTEM_DEF)
4379 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4380 	else if (flag & DDI_PROP_HW_DEF)
4381 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4382 
4383 	mutex_enter(&(DEVI(dip)->devi_lock));
4384 
4385 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4386 		if (DDI_STRSAME(propp->prop_name, name) &&
4387 		    (dev == propp->prop_dev)) {
4388 			/*
4389 			 * Unlink this propp allowing for it to
4390 			 * be first in the list:
4391 			 */
4392 
4393 			if (lastpropp == NULL)
4394 				*list_head = propp->prop_next;
4395 			else
4396 				lastpropp->prop_next = propp->prop_next;
4397 
4398 			mutex_exit(&(DEVI(dip)->devi_lock));
4399 
4400 			/*
4401 			 * Free memory and return...
4402 			 */
4403 			kmem_free(propp->prop_name,
4404 			    strlen(propp->prop_name) + 1);
4405 			if (propp->prop_len != 0)
4406 				kmem_free(propp->prop_val, propp->prop_len);
4407 			kmem_free(propp, sizeof (ddi_prop_t));
4408 			return (DDI_PROP_SUCCESS);
4409 		}
4410 		lastpropp = propp;
4411 	}
4412 	mutex_exit(&(DEVI(dip)->devi_lock));
4413 	return (DDI_PROP_NOT_FOUND);
4414 }
4415 
4416 int
4417 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4418 {
4419 	return (ddi_prop_remove_common(dev, dip, name, 0));
4420 }
4421 
4422 int
4423 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4424 {
4425 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4426 }
4427 
4428 /*
4429  * e_ddi_prop_list_delete: remove a list of properties
4430  *	Note that the caller needs to provide the required protection
4431  *	(eg. devi_lock if these properties are still attached to a devi)
4432  */
4433 void
4434 e_ddi_prop_list_delete(ddi_prop_t *props)
4435 {
4436 	i_ddi_prop_list_delete(props);
4437 }
4438 
4439 /*
4440  * ddi_prop_remove_all_common:
4441  *	Used before unloading a driver to remove
4442  *	all properties. (undefines all dev_t's props.)
4443  *	Also removes `explicitly undefined' props.
4444  *	No errors possible.
4445  */
4446 void
4447 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4448 {
4449 	ddi_prop_t	**list_head;
4450 
4451 	mutex_enter(&(DEVI(dip)->devi_lock));
4452 	if (flag & DDI_PROP_SYSTEM_DEF) {
4453 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4454 	} else if (flag & DDI_PROP_HW_DEF) {
4455 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4456 	} else {
4457 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4458 	}
4459 	i_ddi_prop_list_delete(*list_head);
4460 	*list_head = NULL;
4461 	mutex_exit(&(DEVI(dip)->devi_lock));
4462 }
4463 
4464 
4465 /*
4466  * ddi_prop_remove_all:		Remove all driver prop definitions.
4467  */
4468 
4469 void
4470 ddi_prop_remove_all(dev_info_t *dip)
4471 {
4472 	ddi_prop_remove_all_common(dip, 0);
4473 }
4474 
4475 /*
4476  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4477  */
4478 
4479 void
4480 e_ddi_prop_remove_all(dev_info_t *dip)
4481 {
4482 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4483 }
4484 
4485 
4486 /*
4487  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4488  *			searches which match this property return
4489  *			the error code DDI_PROP_UNDEFINED.
4490  *
4491  *			Use ddi_prop_remove to negate effect of
4492  *			ddi_prop_undefine
4493  *
4494  *			See above for error returns.
4495  */
4496 
4497 int
4498 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4499 {
4500 	if (!(flag & DDI_PROP_CANSLEEP))
4501 		flag |= DDI_PROP_DONTSLEEP;
4502 	return (ddi_prop_update_common(dev, dip,
4503 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT |
4504 	    DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes));
4505 }
4506 
4507 int
4508 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4509 {
4510 	if (!(flag & DDI_PROP_CANSLEEP))
4511 		flag |= DDI_PROP_DONTSLEEP;
4512 	return (ddi_prop_update_common(dev, dip,
4513 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4514 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY),
4515 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4516 }
4517 
4518 /*
4519  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4520  *
4521  * if input dip != child_dip, then call is on behalf of child
4522  * to search PROM, do it via ddi_prop_search_common() and ascend only
4523  * if allowed.
4524  *
4525  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4526  * to search for PROM defined props only.
4527  *
4528  * Note that the PROM search is done only if the requested dev
4529  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4530  * have no associated dev, thus are automatically associated with
4531  * DDI_DEV_T_NONE.
4532  *
4533  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4534  *
4535  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4536  * that the property resides in the prom.
4537  */
4538 int
4539 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4540     ddi_prop_op_t prop_op, int mod_flags,
4541     char *name, caddr_t valuep, int *lengthp)
4542 {
4543 	int	len;
4544 	caddr_t buffer;
4545 
4546 	/*
4547 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4548 	 * look in caller's PROM if it's a self identifying device...
4549 	 *
4550 	 * Note that this is very similar to ddi_prop_op, but we
4551 	 * search the PROM instead of the s/w defined properties,
4552 	 * and we are called on by the parent driver to do this for
4553 	 * the child.
4554 	 */
4555 
4556 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4557 	    ndi_dev_is_prom_node(ch_dip) &&
4558 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4559 		len = prom_getproplen((dnode_t)DEVI(ch_dip)->devi_nodeid, name);
4560 		if (len == -1) {
4561 			return (DDI_PROP_NOT_FOUND);
4562 		}
4563 
4564 		/*
4565 		 * If exists only request, we're done
4566 		 */
4567 		if (prop_op == PROP_EXISTS) {
4568 			return (DDI_PROP_FOUND_1275);
4569 		}
4570 
4571 		/*
4572 		 * If length only request or prop length == 0, get out
4573 		 */
4574 		if ((prop_op == PROP_LEN) || (len == 0)) {
4575 			*lengthp = len;
4576 			return (DDI_PROP_FOUND_1275);
4577 		}
4578 
4579 		/*
4580 		 * Allocate buffer if required... (either way `buffer'
4581 		 * is receiving address).
4582 		 */
4583 
4584 		switch (prop_op) {
4585 
4586 		case PROP_LEN_AND_VAL_ALLOC:
4587 
4588 			buffer = kmem_alloc((size_t)len,
4589 			    mod_flags & DDI_PROP_CANSLEEP ?
4590 			    KM_SLEEP : KM_NOSLEEP);
4591 			if (buffer == NULL) {
4592 				return (DDI_PROP_NO_MEMORY);
4593 			}
4594 			*(caddr_t *)valuep = buffer;
4595 			break;
4596 
4597 		case PROP_LEN_AND_VAL_BUF:
4598 
4599 			if (len > (*lengthp)) {
4600 				*lengthp = len;
4601 				return (DDI_PROP_BUF_TOO_SMALL);
4602 			}
4603 
4604 			buffer = valuep;
4605 			break;
4606 
4607 		default:
4608 			break;
4609 		}
4610 
4611 		/*
4612 		 * Call the PROM function to do the copy.
4613 		 */
4614 		(void) prom_getprop((dnode_t)DEVI(ch_dip)->devi_nodeid,
4615 			name, buffer);
4616 
4617 		*lengthp = len; /* return the actual length to the caller */
4618 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4619 		return (DDI_PROP_FOUND_1275);
4620 	}
4621 
4622 	return (DDI_PROP_NOT_FOUND);
4623 }
4624 
4625 /*
4626  * The ddi_bus_prop_op default bus nexus prop op function.
4627  *
4628  * Code to search hardware layer (PROM), if it exists,
4629  * on behalf of child, then, if appropriate, ascend and check
4630  * my own software defined properties...
4631  */
4632 int
4633 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4634     ddi_prop_op_t prop_op, int mod_flags,
4635     char *name, caddr_t valuep, int *lengthp)
4636 {
4637 	int	error;
4638 
4639 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4640 				    name, valuep, lengthp);
4641 
4642 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4643 	    error == DDI_PROP_BUF_TOO_SMALL)
4644 		return (error);
4645 
4646 	if (error == DDI_PROP_NO_MEMORY) {
4647 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4648 		return (DDI_PROP_NO_MEMORY);
4649 	}
4650 
4651 	/*
4652 	 * Check the 'options' node as a last resort
4653 	 */
4654 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4655 		return (DDI_PROP_NOT_FOUND);
4656 
4657 	if (ch_dip == ddi_root_node())	{
4658 		/*
4659 		 * As a last resort, when we've reached
4660 		 * the top and still haven't found the
4661 		 * property, see if the desired property
4662 		 * is attached to the options node.
4663 		 *
4664 		 * The options dip is attached right after boot.
4665 		 */
4666 		ASSERT(options_dip != NULL);
4667 		/*
4668 		 * Force the "don't pass" flag to *just* see
4669 		 * what the options node has to offer.
4670 		 */
4671 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4672 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4673 		    (uint_t *)lengthp));
4674 	}
4675 
4676 	/*
4677 	 * Otherwise, continue search with parent's s/w defined properties...
4678 	 * NOTE: Using `dip' in following call increments the level.
4679 	 */
4680 
4681 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4682 	    name, valuep, (uint_t *)lengthp));
4683 }
4684 
4685 /*
4686  * External property functions used by other parts of the kernel...
4687  */
4688 
4689 /*
4690  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4691  */
4692 
4693 int
4694 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4695     caddr_t valuep, int *lengthp)
4696 {
4697 	_NOTE(ARGUNUSED(type))
4698 	dev_info_t *devi;
4699 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4700 	int error;
4701 
4702 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4703 		return (DDI_PROP_NOT_FOUND);
4704 
4705 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4706 	ddi_release_devi(devi);
4707 	return (error);
4708 }
4709 
4710 /*
4711  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4712  */
4713 
4714 int
4715 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4716     caddr_t valuep, int *lengthp)
4717 {
4718 	_NOTE(ARGUNUSED(type))
4719 	dev_info_t *devi;
4720 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4721 	int error;
4722 
4723 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4724 		return (DDI_PROP_NOT_FOUND);
4725 
4726 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4727 	ddi_release_devi(devi);
4728 	return (error);
4729 }
4730 
4731 /*
4732  * e_ddi_getprop:	See comments for ddi_getprop.
4733  */
4734 int
4735 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4736 {
4737 	_NOTE(ARGUNUSED(type))
4738 	dev_info_t *devi;
4739 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4740 	int	propvalue = defvalue;
4741 	int	proplength = sizeof (int);
4742 	int	error;
4743 
4744 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4745 		return (defvalue);
4746 
4747 	error = cdev_prop_op(dev, devi, prop_op,
4748 	    flags, name, (caddr_t)&propvalue, &proplength);
4749 	ddi_release_devi(devi);
4750 
4751 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4752 		propvalue = 1;
4753 
4754 	return (propvalue);
4755 }
4756 
4757 /*
4758  * e_ddi_getprop_int64:
4759  *
4760  * This is a typed interfaces, but predates typed properties. With the
4761  * introduction of typed properties the framework tries to ensure
4762  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4763  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4764  * typed interface invokes legacy (non-typed) interfaces:
4765  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4766  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4767  * this type of lookup as a single operation we invoke the legacy
4768  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4769  * framework ddi_prop_op(9F) implementation is expected to check for
4770  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4771  * (currently TYPE_INT64).
4772  */
4773 int64_t
4774 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4775     int flags, int64_t defvalue)
4776 {
4777 	_NOTE(ARGUNUSED(type))
4778 	dev_info_t	*devi;
4779 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4780 	int64_t		propvalue = defvalue;
4781 	int		proplength = sizeof (propvalue);
4782 	int		error;
4783 
4784 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4785 		return (defvalue);
4786 
4787 	error = cdev_prop_op(dev, devi, prop_op, flags |
4788 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4789 	ddi_release_devi(devi);
4790 
4791 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4792 		propvalue = 1;
4793 
4794 	return (propvalue);
4795 }
4796 
4797 /*
4798  * e_ddi_getproplen:	See comments for ddi_getproplen.
4799  */
4800 int
4801 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4802 {
4803 	_NOTE(ARGUNUSED(type))
4804 	dev_info_t *devi;
4805 	ddi_prop_op_t prop_op = PROP_LEN;
4806 	int error;
4807 
4808 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4809 		return (DDI_PROP_NOT_FOUND);
4810 
4811 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4812 	ddi_release_devi(devi);
4813 	return (error);
4814 }
4815 
4816 /*
4817  * Routines to get at elements of the dev_info structure
4818  */
4819 
4820 /*
4821  * ddi_binding_name: Return the driver binding name of the devinfo node
4822  *		This is the name the OS used to bind the node to a driver.
4823  */
4824 char *
4825 ddi_binding_name(dev_info_t *dip)
4826 {
4827 	return (DEVI(dip)->devi_binding_name);
4828 }
4829 
4830 /*
4831  * ddi_driver_major: Return the major number of the driver that
4832  *		the supplied devinfo is bound to (-1 if none)
4833  */
4834 major_t
4835 ddi_driver_major(dev_info_t *devi)
4836 {
4837 	return (DEVI(devi)->devi_major);
4838 }
4839 
4840 /*
4841  * ddi_driver_name: Return the normalized driver name. this is the
4842  *		actual driver name
4843  */
4844 const char *
4845 ddi_driver_name(dev_info_t *devi)
4846 {
4847 	major_t major;
4848 
4849 	if ((major = ddi_driver_major(devi)) != (major_t)-1)
4850 		return (ddi_major_to_name(major));
4851 
4852 	return (ddi_node_name(devi));
4853 }
4854 
4855 /*
4856  * i_ddi_set_binding_name:	Set binding name.
4857  *
4858  *	Set the binding name to the given name.
4859  *	This routine is for use by the ddi implementation, not by drivers.
4860  */
4861 void
4862 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4863 {
4864 	DEVI(dip)->devi_binding_name = name;
4865 
4866 }
4867 
4868 /*
4869  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4870  * the implementation has used to bind the node to a driver.
4871  */
4872 char *
4873 ddi_get_name(dev_info_t *dip)
4874 {
4875 	return (DEVI(dip)->devi_binding_name);
4876 }
4877 
4878 /*
4879  * ddi_node_name: Return the name property of the devinfo node
4880  *		This may differ from ddi_binding_name if the node name
4881  *		does not define a binding to a driver (i.e. generic names).
4882  */
4883 char *
4884 ddi_node_name(dev_info_t *dip)
4885 {
4886 	return (DEVI(dip)->devi_node_name);
4887 }
4888 
4889 
4890 /*
4891  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4892  */
4893 int
4894 ddi_get_nodeid(dev_info_t *dip)
4895 {
4896 	return (DEVI(dip)->devi_nodeid);
4897 }
4898 
4899 int
4900 ddi_get_instance(dev_info_t *dip)
4901 {
4902 	return (DEVI(dip)->devi_instance);
4903 }
4904 
4905 struct dev_ops *
4906 ddi_get_driver(dev_info_t *dip)
4907 {
4908 	return (DEVI(dip)->devi_ops);
4909 }
4910 
4911 void
4912 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4913 {
4914 	DEVI(dip)->devi_ops = devo;
4915 }
4916 
4917 /*
4918  * ddi_set_driver_private/ddi_get_driver_private:
4919  * Get/set device driver private data in devinfo.
4920  */
4921 void
4922 ddi_set_driver_private(dev_info_t *dip, void *data)
4923 {
4924 	DEVI(dip)->devi_driver_data = data;
4925 }
4926 
4927 void *
4928 ddi_get_driver_private(dev_info_t *dip)
4929 {
4930 	return (DEVI(dip)->devi_driver_data);
4931 }
4932 
4933 /*
4934  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4935  */
4936 
4937 dev_info_t *
4938 ddi_get_parent(dev_info_t *dip)
4939 {
4940 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4941 }
4942 
4943 dev_info_t *
4944 ddi_get_child(dev_info_t *dip)
4945 {
4946 	return ((dev_info_t *)DEVI(dip)->devi_child);
4947 }
4948 
4949 dev_info_t *
4950 ddi_get_next_sibling(dev_info_t *dip)
4951 {
4952 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4953 }
4954 
4955 dev_info_t *
4956 ddi_get_next(dev_info_t *dip)
4957 {
4958 	return ((dev_info_t *)DEVI(dip)->devi_next);
4959 }
4960 
4961 void
4962 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4963 {
4964 	DEVI(dip)->devi_next = DEVI(nextdip);
4965 }
4966 
4967 /*
4968  * ddi_root_node:		Return root node of devinfo tree
4969  */
4970 
4971 dev_info_t *
4972 ddi_root_node(void)
4973 {
4974 	extern dev_info_t *top_devinfo;
4975 
4976 	return (top_devinfo);
4977 }
4978 
4979 /*
4980  * Miscellaneous functions:
4981  */
4982 
4983 /*
4984  * Implementation specific hooks
4985  */
4986 
4987 void
4988 ddi_report_dev(dev_info_t *d)
4989 {
4990 	char *b;
4991 
4992 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4993 
4994 	/*
4995 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4996 	 * userland, so we print its full name together with the instance
4997 	 * number 'abbreviation' that the driver may use internally.
4998 	 */
4999 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5000 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5001 		cmn_err(CE_CONT, "?%s%d is %s\n",
5002 		    ddi_driver_name(d), ddi_get_instance(d),
5003 		    ddi_pathname(d, b));
5004 		kmem_free(b, MAXPATHLEN);
5005 	}
5006 }
5007 
5008 /*
5009  * ddi_ctlops() is described in the assembler not to buy a new register
5010  * window when it's called and can reduce cost in climbing the device tree
5011  * without using the tail call optimization.
5012  */
5013 int
5014 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5015 {
5016 	int ret;
5017 
5018 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5019 	    (void *)&rnumber, (void *)result);
5020 
5021 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5022 }
5023 
5024 int
5025 ddi_dev_nregs(dev_info_t *dev, int *result)
5026 {
5027 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5028 }
5029 
5030 int
5031 ddi_dev_is_sid(dev_info_t *d)
5032 {
5033 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5034 }
5035 
5036 int
5037 ddi_slaveonly(dev_info_t *d)
5038 {
5039 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5040 }
5041 
5042 int
5043 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5044 {
5045 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5046 }
5047 
5048 int
5049 ddi_streams_driver(dev_info_t *dip)
5050 {
5051 	if ((i_ddi_node_state(dip) >= DS_ATTACHED) &&
5052 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5053 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5054 		return (DDI_SUCCESS);
5055 	return (DDI_FAILURE);
5056 }
5057 
5058 /*
5059  * callback free list
5060  */
5061 
5062 static int ncallbacks;
5063 static int nc_low = 170;
5064 static int nc_med = 512;
5065 static int nc_high = 2048;
5066 static struct ddi_callback *callbackq;
5067 static struct ddi_callback *callbackqfree;
5068 
5069 /*
5070  * set/run callback lists
5071  */
5072 struct	cbstats	{
5073 	kstat_named_t	cb_asked;
5074 	kstat_named_t	cb_new;
5075 	kstat_named_t	cb_run;
5076 	kstat_named_t	cb_delete;
5077 	kstat_named_t	cb_maxreq;
5078 	kstat_named_t	cb_maxlist;
5079 	kstat_named_t	cb_alloc;
5080 	kstat_named_t	cb_runouts;
5081 	kstat_named_t	cb_L2;
5082 	kstat_named_t	cb_grow;
5083 } cbstats = {
5084 	{"asked",	KSTAT_DATA_UINT32},
5085 	{"new",		KSTAT_DATA_UINT32},
5086 	{"run",		KSTAT_DATA_UINT32},
5087 	{"delete",	KSTAT_DATA_UINT32},
5088 	{"maxreq",	KSTAT_DATA_UINT32},
5089 	{"maxlist",	KSTAT_DATA_UINT32},
5090 	{"alloc",	KSTAT_DATA_UINT32},
5091 	{"runouts",	KSTAT_DATA_UINT32},
5092 	{"L2",		KSTAT_DATA_UINT32},
5093 	{"grow",	KSTAT_DATA_UINT32},
5094 };
5095 
5096 #define	nc_asked	cb_asked.value.ui32
5097 #define	nc_new		cb_new.value.ui32
5098 #define	nc_run		cb_run.value.ui32
5099 #define	nc_delete	cb_delete.value.ui32
5100 #define	nc_maxreq	cb_maxreq.value.ui32
5101 #define	nc_maxlist	cb_maxlist.value.ui32
5102 #define	nc_alloc	cb_alloc.value.ui32
5103 #define	nc_runouts	cb_runouts.value.ui32
5104 #define	nc_L2		cb_L2.value.ui32
5105 #define	nc_grow		cb_grow.value.ui32
5106 
5107 static kmutex_t ddi_callback_mutex;
5108 
5109 /*
5110  * callbacks are handled using a L1/L2 cache. The L1 cache
5111  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5112  * we can't get callbacks from the L1 cache [because pageout is doing
5113  * I/O at the time freemem is 0], we allocate callbacks out of the
5114  * L2 cache. The L2 cache is static and depends on the memory size.
5115  * [We might also count the number of devices at probe time and
5116  * allocate one structure per device and adjust for deferred attach]
5117  */
5118 void
5119 impl_ddi_callback_init(void)
5120 {
5121 	int	i;
5122 	uint_t	physmegs;
5123 	kstat_t	*ksp;
5124 
5125 	physmegs = physmem >> (20 - PAGESHIFT);
5126 	if (physmegs < 48) {
5127 		ncallbacks = nc_low;
5128 	} else if (physmegs < 128) {
5129 		ncallbacks = nc_med;
5130 	} else {
5131 		ncallbacks = nc_high;
5132 	}
5133 
5134 	/*
5135 	 * init free list
5136 	 */
5137 	callbackq = kmem_zalloc(
5138 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5139 	for (i = 0; i < ncallbacks-1; i++)
5140 		callbackq[i].c_nfree = &callbackq[i+1];
5141 	callbackqfree = callbackq;
5142 
5143 	/* init kstats */
5144 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5145 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5146 		ksp->ks_data = (void *) &cbstats;
5147 		kstat_install(ksp);
5148 	}
5149 
5150 }
5151 
5152 static void
5153 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5154 	int count)
5155 {
5156 	struct ddi_callback *list, *marker, *new;
5157 	size_t size = sizeof (struct ddi_callback);
5158 
5159 	list = marker = (struct ddi_callback *)*listid;
5160 	while (list != NULL) {
5161 		if (list->c_call == funcp && list->c_arg == arg) {
5162 			list->c_count += count;
5163 			return;
5164 		}
5165 		marker = list;
5166 		list = list->c_nlist;
5167 	}
5168 	new = kmem_alloc(size, KM_NOSLEEP);
5169 	if (new == NULL) {
5170 		new = callbackqfree;
5171 		if (new == NULL) {
5172 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5173 			    &size, KM_NOSLEEP | KM_PANIC);
5174 			cbstats.nc_grow++;
5175 		} else {
5176 			callbackqfree = new->c_nfree;
5177 			cbstats.nc_L2++;
5178 		}
5179 	}
5180 	if (marker != NULL) {
5181 		marker->c_nlist = new;
5182 	} else {
5183 		*listid = (uintptr_t)new;
5184 	}
5185 	new->c_size = size;
5186 	new->c_nlist = NULL;
5187 	new->c_call = funcp;
5188 	new->c_arg = arg;
5189 	new->c_count = count;
5190 	cbstats.nc_new++;
5191 	cbstats.nc_alloc++;
5192 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5193 		cbstats.nc_maxlist = cbstats.nc_alloc;
5194 }
5195 
5196 void
5197 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5198 {
5199 	mutex_enter(&ddi_callback_mutex);
5200 	cbstats.nc_asked++;
5201 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5202 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5203 	(void) callback_insert(funcp, arg, listid, 1);
5204 	mutex_exit(&ddi_callback_mutex);
5205 }
5206 
5207 static void
5208 real_callback_run(void *Queue)
5209 {
5210 	int (*funcp)(caddr_t);
5211 	caddr_t arg;
5212 	int count, rval;
5213 	uintptr_t *listid;
5214 	struct ddi_callback *list, *marker;
5215 	int check_pending = 1;
5216 	int pending = 0;
5217 
5218 	do {
5219 		mutex_enter(&ddi_callback_mutex);
5220 		listid = Queue;
5221 		list = (struct ddi_callback *)*listid;
5222 		if (list == NULL) {
5223 			mutex_exit(&ddi_callback_mutex);
5224 			return;
5225 		}
5226 		if (check_pending) {
5227 			marker = list;
5228 			while (marker != NULL) {
5229 				pending += marker->c_count;
5230 				marker = marker->c_nlist;
5231 			}
5232 			check_pending = 0;
5233 		}
5234 		ASSERT(pending > 0);
5235 		ASSERT(list->c_count > 0);
5236 		funcp = list->c_call;
5237 		arg = list->c_arg;
5238 		count = list->c_count;
5239 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5240 		if (list >= &callbackq[0] &&
5241 		    list <= &callbackq[ncallbacks-1]) {
5242 			list->c_nfree = callbackqfree;
5243 			callbackqfree = list;
5244 		} else
5245 			kmem_free(list, list->c_size);
5246 
5247 		cbstats.nc_delete++;
5248 		cbstats.nc_alloc--;
5249 		mutex_exit(&ddi_callback_mutex);
5250 
5251 		do {
5252 			if ((rval = (*funcp)(arg)) == 0) {
5253 				pending -= count;
5254 				mutex_enter(&ddi_callback_mutex);
5255 				(void) callback_insert(funcp, arg, listid,
5256 					count);
5257 				cbstats.nc_runouts++;
5258 			} else {
5259 				pending--;
5260 				mutex_enter(&ddi_callback_mutex);
5261 				cbstats.nc_run++;
5262 			}
5263 			mutex_exit(&ddi_callback_mutex);
5264 		} while (rval != 0 && (--count > 0));
5265 	} while (pending > 0);
5266 }
5267 
5268 void
5269 ddi_run_callback(uintptr_t *listid)
5270 {
5271 	softcall(real_callback_run, listid);
5272 }
5273 
5274 dev_info_t *
5275 nodevinfo(dev_t dev, int otyp)
5276 {
5277 	_NOTE(ARGUNUSED(dev, otyp))
5278 	return ((dev_info_t *)0);
5279 }
5280 
5281 /*
5282  * A driver should support its own getinfo(9E) entry point. This function
5283  * is provided as a convenience for ON drivers that don't expect their
5284  * getinfo(9E) entry point to be called. A driver that uses this must not
5285  * call ddi_create_minor_node.
5286  */
5287 int
5288 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5289 {
5290 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5291 	return (DDI_FAILURE);
5292 }
5293 
5294 /*
5295  * A driver should support its own getinfo(9E) entry point. This function
5296  * is provided as a convenience for ON drivers that where the minor number
5297  * is the instance. Drivers that do not have 1:1 mapping must implement
5298  * their own getinfo(9E) function.
5299  */
5300 int
5301 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5302     void *arg, void **result)
5303 {
5304 	_NOTE(ARGUNUSED(dip))
5305 	int	instance;
5306 
5307 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5308 		return (DDI_FAILURE);
5309 
5310 	instance = getminor((dev_t)(uintptr_t)arg);
5311 	*result = (void *)(uintptr_t)instance;
5312 	return (DDI_SUCCESS);
5313 }
5314 
5315 int
5316 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5317 {
5318 	_NOTE(ARGUNUSED(devi, cmd))
5319 	return (DDI_FAILURE);
5320 }
5321 
5322 int
5323 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5324     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5325 {
5326 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5327 	return (DDI_DMA_NOMAPPING);
5328 }
5329 
5330 int
5331 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5332     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5333 {
5334 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5335 	return (DDI_DMA_BADATTR);
5336 }
5337 
5338 int
5339 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5340     ddi_dma_handle_t handle)
5341 {
5342 	_NOTE(ARGUNUSED(dip, rdip, handle))
5343 	return (DDI_FAILURE);
5344 }
5345 
5346 int
5347 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5348     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5349     ddi_dma_cookie_t *cp, uint_t *ccountp)
5350 {
5351 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5352 	return (DDI_DMA_NOMAPPING);
5353 }
5354 
5355 int
5356 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5357     ddi_dma_handle_t handle)
5358 {
5359 	_NOTE(ARGUNUSED(dip, rdip, handle))
5360 	return (DDI_FAILURE);
5361 }
5362 
5363 int
5364 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5365     ddi_dma_handle_t handle, off_t off, size_t len,
5366     uint_t cache_flags)
5367 {
5368 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5369 	return (DDI_FAILURE);
5370 }
5371 
5372 int
5373 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5374     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5375     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5376 {
5377 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5378 	return (DDI_FAILURE);
5379 }
5380 
5381 int
5382 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5383     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5384     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5385 {
5386 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5387 	return (DDI_FAILURE);
5388 }
5389 
5390 void
5391 ddivoid(void)
5392 {}
5393 
5394 int
5395 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5396     struct pollhead **pollhdrp)
5397 {
5398 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5399 	return (ENXIO);
5400 }
5401 
5402 cred_t *
5403 ddi_get_cred(void)
5404 {
5405 	return (CRED());
5406 }
5407 
5408 clock_t
5409 ddi_get_lbolt(void)
5410 {
5411 	return (lbolt);
5412 }
5413 
5414 time_t
5415 ddi_get_time(void)
5416 {
5417 	time_t	now;
5418 
5419 	if ((now = gethrestime_sec()) == 0) {
5420 		timestruc_t ts;
5421 		mutex_enter(&tod_lock);
5422 		ts = tod_get();
5423 		mutex_exit(&tod_lock);
5424 		return (ts.tv_sec);
5425 	} else {
5426 		return (now);
5427 	}
5428 }
5429 
5430 pid_t
5431 ddi_get_pid(void)
5432 {
5433 	return (ttoproc(curthread)->p_pid);
5434 }
5435 
5436 kt_did_t
5437 ddi_get_kt_did(void)
5438 {
5439 	return (curthread->t_did);
5440 }
5441 
5442 /*
5443  * This function returns B_TRUE if the caller can reasonably expect that a call
5444  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5445  * by user-level signal.  If it returns B_FALSE, then the caller should use
5446  * other means to make certain that the wait will not hang "forever."
5447  *
5448  * It does not check the signal mask, nor for reception of any particular
5449  * signal.
5450  *
5451  * Currently, a thread can receive a signal if it's not a kernel thread and it
5452  * is not in the middle of exit(2) tear-down.  Threads that are in that
5453  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5454  * cv_timedwait, and qwait_sig to qwait.
5455  */
5456 boolean_t
5457 ddi_can_receive_sig(void)
5458 {
5459 	proc_t *pp;
5460 
5461 	if (curthread->t_proc_flag & TP_LWPEXIT)
5462 		return (B_FALSE);
5463 	if ((pp = ttoproc(curthread)) == NULL)
5464 		return (B_FALSE);
5465 	return (pp->p_as != &kas);
5466 }
5467 
5468 /*
5469  * Swap bytes in 16-bit [half-]words
5470  */
5471 void
5472 swab(void *src, void *dst, size_t nbytes)
5473 {
5474 	uchar_t *pf = (uchar_t *)src;
5475 	uchar_t *pt = (uchar_t *)dst;
5476 	uchar_t tmp;
5477 	int nshorts;
5478 
5479 	nshorts = nbytes >> 1;
5480 
5481 	while (--nshorts >= 0) {
5482 		tmp = *pf++;
5483 		*pt++ = *pf++;
5484 		*pt++ = tmp;
5485 	}
5486 }
5487 
5488 static void
5489 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5490 {
5491 	struct ddi_minor_data *dp;
5492 
5493 	mutex_enter(&(DEVI(ddip)->devi_lock));
5494 	i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5495 
5496 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5497 		DEVI(ddip)->devi_minor = dmdp;
5498 	} else {
5499 		while (dp->next != (struct ddi_minor_data *)NULL)
5500 			dp = dp->next;
5501 		dp->next = dmdp;
5502 	}
5503 
5504 	i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1);
5505 	mutex_exit(&(DEVI(ddip)->devi_lock));
5506 }
5507 
5508 /*
5509  * Part of the obsolete SunCluster DDI Hooks.
5510  * Keep for binary compatibility
5511  */
5512 minor_t
5513 ddi_getiminor(dev_t dev)
5514 {
5515 	return (getminor(dev));
5516 }
5517 
5518 static int
5519 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5520 {
5521 	int se_flag;
5522 	int kmem_flag;
5523 	int se_err;
5524 	char *pathname;
5525 	sysevent_t *ev = NULL;
5526 	sysevent_id_t eid;
5527 	sysevent_value_t se_val;
5528 	sysevent_attr_list_t *ev_attr_list = NULL;
5529 
5530 	/* determine interrupt context */
5531 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5532 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5533 
5534 	i_ddi_di_cache_invalidate(kmem_flag);
5535 
5536 #ifdef DEBUG
5537 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5538 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5539 		    "interrupt level by driver %s",
5540 		    ddi_driver_name(dip));
5541 	}
5542 #endif /* DEBUG */
5543 
5544 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5545 	if (ev == NULL) {
5546 		goto fail;
5547 	}
5548 
5549 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5550 	if (pathname == NULL) {
5551 		sysevent_free(ev);
5552 		goto fail;
5553 	}
5554 
5555 	(void) ddi_pathname(dip, pathname);
5556 	ASSERT(strlen(pathname));
5557 	se_val.value_type = SE_DATA_TYPE_STRING;
5558 	se_val.value.sv_string = pathname;
5559 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5560 	    &se_val, se_flag) != 0) {
5561 		kmem_free(pathname, MAXPATHLEN);
5562 		sysevent_free(ev);
5563 		goto fail;
5564 	}
5565 	kmem_free(pathname, MAXPATHLEN);
5566 
5567 	/*
5568 	 * allow for NULL minor names
5569 	 */
5570 	if (minor_name != NULL) {
5571 		se_val.value.sv_string = minor_name;
5572 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5573 		    &se_val, se_flag) != 0) {
5574 			sysevent_free_attr(ev_attr_list);
5575 			sysevent_free(ev);
5576 			goto fail;
5577 		}
5578 	}
5579 
5580 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5581 		sysevent_free_attr(ev_attr_list);
5582 		sysevent_free(ev);
5583 		goto fail;
5584 	}
5585 
5586 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5587 		if (se_err == SE_NO_TRANSPORT) {
5588 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5589 			    "for driver %s (%s). Run devfsadm -i %s",
5590 			    ddi_driver_name(dip), "syseventd not responding",
5591 			    ddi_driver_name(dip));
5592 		} else {
5593 			sysevent_free(ev);
5594 			goto fail;
5595 		}
5596 	}
5597 
5598 	sysevent_free(ev);
5599 	return (DDI_SUCCESS);
5600 fail:
5601 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5602 	    "for driver %s. Run devfsadm -i %s",
5603 	    ddi_driver_name(dip), ddi_driver_name(dip));
5604 	return (DDI_SUCCESS);
5605 }
5606 
5607 /*
5608  * failing to remove a minor node is not of interest
5609  * therefore we do not generate an error message
5610  */
5611 static int
5612 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5613 {
5614 	char *pathname;
5615 	sysevent_t *ev;
5616 	sysevent_id_t eid;
5617 	sysevent_value_t se_val;
5618 	sysevent_attr_list_t *ev_attr_list = NULL;
5619 
5620 	/*
5621 	 * only log ddi_remove_minor_node() calls outside the scope
5622 	 * of attach/detach reconfigurations and when the dip is
5623 	 * still initialized.
5624 	 */
5625 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5626 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5627 		return (DDI_SUCCESS);
5628 	}
5629 
5630 	i_ddi_di_cache_invalidate(KM_SLEEP);
5631 
5632 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5633 	if (ev == NULL) {
5634 		return (DDI_SUCCESS);
5635 	}
5636 
5637 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5638 	if (pathname == NULL) {
5639 		sysevent_free(ev);
5640 		return (DDI_SUCCESS);
5641 	}
5642 
5643 	(void) ddi_pathname(dip, pathname);
5644 	ASSERT(strlen(pathname));
5645 	se_val.value_type = SE_DATA_TYPE_STRING;
5646 	se_val.value.sv_string = pathname;
5647 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5648 	    &se_val, SE_SLEEP) != 0) {
5649 		kmem_free(pathname, MAXPATHLEN);
5650 		sysevent_free(ev);
5651 		return (DDI_SUCCESS);
5652 	}
5653 
5654 	kmem_free(pathname, MAXPATHLEN);
5655 
5656 	/*
5657 	 * allow for NULL minor names
5658 	 */
5659 	if (minor_name != NULL) {
5660 		se_val.value.sv_string = minor_name;
5661 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5662 		    &se_val, SE_SLEEP) != 0) {
5663 			sysevent_free_attr(ev_attr_list);
5664 			goto fail;
5665 		}
5666 	}
5667 
5668 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5669 		sysevent_free_attr(ev_attr_list);
5670 	} else {
5671 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5672 	}
5673 fail:
5674 	sysevent_free(ev);
5675 	return (DDI_SUCCESS);
5676 }
5677 
5678 /*
5679  * Derive the device class of the node.
5680  * Device class names aren't defined yet. Until this is done we use
5681  * devfs event subclass names as device class names.
5682  */
5683 static int
5684 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5685 {
5686 	int rv = DDI_SUCCESS;
5687 
5688 	if (i_ddi_devi_class(dip) == NULL) {
5689 		if (strncmp(node_type, DDI_NT_BLOCK,
5690 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5691 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5692 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5693 		    strcmp(node_type, DDI_NT_FD) != 0) {
5694 
5695 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5696 
5697 		} else if (strncmp(node_type, DDI_NT_NET,
5698 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5699 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5700 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5701 
5702 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5703 		}
5704 	}
5705 
5706 	return (rv);
5707 }
5708 
5709 /*
5710  * Check compliance with PSARC 2003/375:
5711  *
5712  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5713  * exceed IFNAMSIZ (16) characters in length.
5714  */
5715 static boolean_t
5716 verify_name(char *name)
5717 {
5718 	size_t	len = strlen(name);
5719 	char	*cp;
5720 
5721 	if (len == 0 || len > IFNAMSIZ)
5722 		return (B_FALSE);
5723 
5724 	for (cp = name; *cp != '\0'; cp++) {
5725 		if (!isalnum(*cp) && *cp != '_')
5726 			return (B_FALSE);
5727 	}
5728 
5729 	return (B_TRUE);
5730 }
5731 
5732 /*
5733  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5734  *				attach it to the given devinfo node.
5735  */
5736 
5737 int
5738 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5739     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5740     const char *read_priv, const char *write_priv, mode_t priv_mode)
5741 {
5742 	struct ddi_minor_data *dmdp;
5743 	major_t major;
5744 
5745 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5746 		return (DDI_FAILURE);
5747 
5748 	if (name == NULL)
5749 		return (DDI_FAILURE);
5750 
5751 	/*
5752 	 * Log a message if the minor number the driver is creating
5753 	 * is not expressible on the on-disk filesystem (currently
5754 	 * this is limited to 18 bits both by UFS). The device can
5755 	 * be opened via devfs, but not by device special files created
5756 	 * via mknod().
5757 	 */
5758 	if (minor_num > L_MAXMIN32) {
5759 		cmn_err(CE_WARN,
5760 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5761 		    ddi_driver_name(dip), ddi_get_instance(dip),
5762 		    name, minor_num);
5763 		return (DDI_FAILURE);
5764 	}
5765 
5766 	/* dip must be bound and attached */
5767 	major = ddi_driver_major(dip);
5768 	ASSERT(major != (major_t)-1);
5769 
5770 	/*
5771 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5772 	 */
5773 	if (node_type == NULL) {
5774 		node_type = DDI_PSEUDO;
5775 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5776 		    " minor node %s; default to DDI_PSEUDO",
5777 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5778 	}
5779 
5780 	/*
5781 	 * If the driver is a network driver, ensure that the name falls within
5782 	 * the interface naming constraints specified by PSARC/2003/375.
5783 	 */
5784 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5785 		if (!verify_name(name))
5786 			return (DDI_FAILURE);
5787 
5788 		if (mtype == DDM_MINOR) {
5789 			struct devnames *dnp = &devnamesp[major];
5790 
5791 			/* Mark driver as a network driver */
5792 			LOCK_DEV_OPS(&dnp->dn_lock);
5793 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5794 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5795 		}
5796 	}
5797 
5798 	if (mtype == DDM_MINOR) {
5799 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5800 		    DDI_SUCCESS)
5801 			return (DDI_FAILURE);
5802 	}
5803 
5804 	/*
5805 	 * Take care of minor number information for the node.
5806 	 */
5807 
5808 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5809 	    KM_NOSLEEP)) == NULL) {
5810 		return (DDI_FAILURE);
5811 	}
5812 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5813 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5814 		return (DDI_FAILURE);
5815 	}
5816 	dmdp->dip = dip;
5817 	dmdp->ddm_dev = makedevice(major, minor_num);
5818 	dmdp->ddm_spec_type = spec_type;
5819 	dmdp->ddm_node_type = node_type;
5820 	dmdp->type = mtype;
5821 	if (flag & CLONE_DEV) {
5822 		dmdp->type = DDM_ALIAS;
5823 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5824 	}
5825 	if (flag & PRIVONLY_DEV) {
5826 		dmdp->ddm_flags |= DM_NO_FSPERM;
5827 	}
5828 	if (read_priv || write_priv) {
5829 		dmdp->ddm_node_priv =
5830 		    devpolicy_priv_by_name(read_priv, write_priv);
5831 	}
5832 	dmdp->ddm_priv_mode = priv_mode;
5833 
5834 	ddi_append_minor_node(dip, dmdp);
5835 
5836 	/*
5837 	 * only log ddi_create_minor_node() calls which occur
5838 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5839 	 */
5840 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip))) {
5841 		(void) i_log_devfs_minor_create(dip, name);
5842 	}
5843 
5844 	/*
5845 	 * Check if any dacf rules match the creation of this minor node
5846 	 */
5847 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5848 	return (DDI_SUCCESS);
5849 }
5850 
5851 int
5852 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5853     minor_t minor_num, char *node_type, int flag)
5854 {
5855 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5856 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5857 }
5858 
5859 int
5860 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5861     minor_t minor_num, char *node_type, int flag,
5862     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5863 {
5864 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5865 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5866 }
5867 
5868 int
5869 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5870     minor_t minor_num, char *node_type, int flag)
5871 {
5872 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5873 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5874 }
5875 
5876 /*
5877  * Internal (non-ddi) routine for drivers to export names known
5878  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5879  * but not exported externally to /dev
5880  */
5881 int
5882 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5883     minor_t minor_num)
5884 {
5885 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5886 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5887 }
5888 
5889 void
5890 ddi_remove_minor_node(dev_info_t *dip, char *name)
5891 {
5892 	struct ddi_minor_data *dmdp, *dmdp1;
5893 	struct ddi_minor_data **dmdp_prev;
5894 
5895 	mutex_enter(&(DEVI(dip)->devi_lock));
5896 	i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5897 
5898 	dmdp_prev = &DEVI(dip)->devi_minor;
5899 	dmdp = DEVI(dip)->devi_minor;
5900 	while (dmdp != NULL) {
5901 		dmdp1 = dmdp->next;
5902 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5903 		    strcmp(name, dmdp->ddm_name) == 0))) {
5904 			if (dmdp->ddm_name != NULL) {
5905 				(void) i_log_devfs_minor_remove(dip,
5906 				    dmdp->ddm_name);
5907 				kmem_free(dmdp->ddm_name,
5908 				    strlen(dmdp->ddm_name) + 1);
5909 			}
5910 			/*
5911 			 * Release device privilege, if any.
5912 			 * Release dacf client data associated with this minor
5913 			 * node by storing NULL.
5914 			 */
5915 			if (dmdp->ddm_node_priv)
5916 				dpfree(dmdp->ddm_node_priv);
5917 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5918 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5919 			*dmdp_prev = dmdp1;
5920 			/*
5921 			 * OK, we found it, so get out now -- if we drive on,
5922 			 * we will strcmp against garbage.  See 1139209.
5923 			 */
5924 			if (name != NULL)
5925 				break;
5926 		} else {
5927 			dmdp_prev = &dmdp->next;
5928 		}
5929 		dmdp = dmdp1;
5930 	}
5931 
5932 	i_devi_exit(dip, DEVI_S_MD_UPDATE, 1);
5933 	mutex_exit(&(DEVI(dip)->devi_lock));
5934 }
5935 
5936 
5937 int
5938 ddi_in_panic()
5939 {
5940 	return (panicstr != NULL);
5941 }
5942 
5943 
5944 /*
5945  * Find first bit set in a mask (returned counting from 1 up)
5946  */
5947 
5948 int
5949 ddi_ffs(long mask)
5950 {
5951 	extern int ffs(long mask);
5952 	return (ffs(mask));
5953 }
5954 
5955 /*
5956  * Find last bit set. Take mask and clear
5957  * all but the most significant bit, and
5958  * then let ffs do the rest of the work.
5959  *
5960  * Algorithm courtesy of Steve Chessin.
5961  */
5962 
5963 int
5964 ddi_fls(long mask)
5965 {
5966 	extern int ffs(long);
5967 
5968 	while (mask) {
5969 		long nx;
5970 
5971 		if ((nx = (mask & (mask - 1))) == 0)
5972 			break;
5973 		mask = nx;
5974 	}
5975 	return (ffs(mask));
5976 }
5977 
5978 /*
5979  * The next five routines comprise generic storage management utilities
5980  * for driver soft state structures (in "the old days," this was done
5981  * with a statically sized array - big systems and dynamic loading
5982  * and unloading make heap allocation more attractive)
5983  */
5984 
5985 /*
5986  * Allocate a set of pointers to 'n_items' objects of size 'size'
5987  * bytes.  Each pointer is initialized to nil.
5988  *
5989  * The 'size' and 'n_items' values are stashed in the opaque
5990  * handle returned to the caller.
5991  *
5992  * This implementation interprets 'set of pointers' to mean 'array
5993  * of pointers' but note that nothing in the interface definition
5994  * precludes an implementation that uses, for example, a linked list.
5995  * However there should be a small efficiency gain from using an array
5996  * at lookup time.
5997  *
5998  * NOTE	As an optimization, we make our growable array allocations in
5999  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6000  *	gives us anyway.  It should save us some free/realloc's ..
6001  *
6002  *	As a further optimization, we make the growable array start out
6003  *	with MIN_N_ITEMS in it.
6004  */
6005 
6006 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6007 
6008 int
6009 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6010 {
6011 	struct i_ddi_soft_state *ss;
6012 
6013 	if (state_p == NULL || *state_p != NULL || size == 0)
6014 		return (EINVAL);
6015 
6016 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6017 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6018 	ss->size = size;
6019 
6020 	if (n_items < MIN_N_ITEMS)
6021 		ss->n_items = MIN_N_ITEMS;
6022 	else {
6023 		int bitlog;
6024 
6025 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6026 			bitlog--;
6027 		ss->n_items = 1 << bitlog;
6028 	}
6029 
6030 	ASSERT(ss->n_items >= n_items);
6031 
6032 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6033 
6034 	*state_p = ss;
6035 
6036 	return (0);
6037 }
6038 
6039 
6040 /*
6041  * Allocate a state structure of size 'size' to be associated
6042  * with item 'item'.
6043  *
6044  * In this implementation, the array is extended to
6045  * allow the requested offset, if needed.
6046  */
6047 int
6048 ddi_soft_state_zalloc(void *state, int item)
6049 {
6050 	struct i_ddi_soft_state *ss;
6051 	void **array;
6052 	void *new_element;
6053 
6054 	if ((ss = state) == NULL || item < 0)
6055 		return (DDI_FAILURE);
6056 
6057 	mutex_enter(&ss->lock);
6058 	if (ss->size == 0) {
6059 		mutex_exit(&ss->lock);
6060 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6061 		    mod_containing_pc(caller()));
6062 		return (DDI_FAILURE);
6063 	}
6064 
6065 	array = ss->array;	/* NULL if ss->n_items == 0 */
6066 	ASSERT(ss->n_items != 0 && array != NULL);
6067 
6068 	/*
6069 	 * refuse to tread on an existing element
6070 	 */
6071 	if (item < ss->n_items && array[item] != NULL) {
6072 		mutex_exit(&ss->lock);
6073 		return (DDI_FAILURE);
6074 	}
6075 
6076 	/*
6077 	 * Allocate a new element to plug in
6078 	 */
6079 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6080 
6081 	/*
6082 	 * Check if the array is big enough, if not, grow it.
6083 	 */
6084 	if (item >= ss->n_items) {
6085 		void	**new_array;
6086 		size_t	new_n_items;
6087 		struct i_ddi_soft_state *dirty;
6088 
6089 		/*
6090 		 * Allocate a new array of the right length, copy
6091 		 * all the old pointers to the new array, then
6092 		 * if it exists at all, put the old array on the
6093 		 * dirty list.
6094 		 *
6095 		 * Note that we can't kmem_free() the old array.
6096 		 *
6097 		 * Why -- well the 'get' operation is 'mutex-free', so we
6098 		 * can't easily catch a suspended thread that is just about
6099 		 * to dereference the array we just grew out of.  So we
6100 		 * cons up a header and put it on a list of 'dirty'
6101 		 * pointer arrays.  (Dirty in the sense that there may
6102 		 * be suspended threads somewhere that are in the middle
6103 		 * of referencing them).  Fortunately, we -can- garbage
6104 		 * collect it all at ddi_soft_state_fini time.
6105 		 */
6106 		new_n_items = ss->n_items;
6107 		while (new_n_items < (1 + item))
6108 			new_n_items <<= 1;	/* double array size .. */
6109 
6110 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6111 
6112 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6113 		    KM_SLEEP);
6114 		/*
6115 		 * Copy the pointers into the new array
6116 		 */
6117 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6118 
6119 		/*
6120 		 * Save the old array on the dirty list
6121 		 */
6122 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6123 		dirty->array = ss->array;
6124 		dirty->n_items = ss->n_items;
6125 		dirty->next = ss->next;
6126 		ss->next = dirty;
6127 
6128 		ss->array = (array = new_array);
6129 		ss->n_items = new_n_items;
6130 	}
6131 
6132 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6133 
6134 	array[item] = new_element;
6135 
6136 	mutex_exit(&ss->lock);
6137 	return (DDI_SUCCESS);
6138 }
6139 
6140 
6141 /*
6142  * Fetch a pointer to the allocated soft state structure.
6143  *
6144  * This is designed to be cheap.
6145  *
6146  * There's an argument that there should be more checking for
6147  * nil pointers and out of bounds on the array.. but we do a lot
6148  * of that in the alloc/free routines.
6149  *
6150  * An array has the convenience that we don't need to lock read-access
6151  * to it c.f. a linked list.  However our "expanding array" strategy
6152  * means that we should hold a readers lock on the i_ddi_soft_state
6153  * structure.
6154  *
6155  * However, from a performance viewpoint, we need to do it without
6156  * any locks at all -- this also makes it a leaf routine.  The algorithm
6157  * is 'lock-free' because we only discard the pointer arrays at
6158  * ddi_soft_state_fini() time.
6159  */
6160 void *
6161 ddi_get_soft_state(void *state, int item)
6162 {
6163 	struct i_ddi_soft_state *ss = state;
6164 
6165 	ASSERT(ss != NULL && item >= 0);
6166 
6167 	if (item < ss->n_items && ss->array != NULL)
6168 		return (ss->array[item]);
6169 	return (NULL);
6170 }
6171 
6172 /*
6173  * Free the state structure corresponding to 'item.'   Freeing an
6174  * element that has either gone or was never allocated is not
6175  * considered an error.  Note that we free the state structure, but
6176  * we don't shrink our pointer array, or discard 'dirty' arrays,
6177  * since even a few pointers don't really waste too much memory.
6178  *
6179  * Passing an item number that is out of bounds, or a null pointer will
6180  * provoke an error message.
6181  */
6182 void
6183 ddi_soft_state_free(void *state, int item)
6184 {
6185 	struct i_ddi_soft_state *ss;
6186 	void **array;
6187 	void *element;
6188 	static char msg[] = "ddi_soft_state_free:";
6189 
6190 	if ((ss = state) == NULL) {
6191 		cmn_err(CE_WARN, "%s null handle: %s",
6192 		    msg, mod_containing_pc(caller()));
6193 		return;
6194 	}
6195 
6196 	element = NULL;
6197 
6198 	mutex_enter(&ss->lock);
6199 
6200 	if ((array = ss->array) == NULL || ss->size == 0) {
6201 		cmn_err(CE_WARN, "%s bad handle: %s",
6202 		    msg, mod_containing_pc(caller()));
6203 	} else if (item < 0 || item >= ss->n_items) {
6204 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6205 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6206 	} else if (array[item] != NULL) {
6207 		element = array[item];
6208 		array[item] = NULL;
6209 	}
6210 
6211 	mutex_exit(&ss->lock);
6212 
6213 	if (element)
6214 		kmem_free(element, ss->size);
6215 }
6216 
6217 
6218 /*
6219  * Free the entire set of pointers, and any
6220  * soft state structures contained therein.
6221  *
6222  * Note that we don't grab the ss->lock mutex, even though
6223  * we're inspecting the various fields of the data structure.
6224  *
6225  * There is an implicit assumption that this routine will
6226  * never run concurrently with any of the above on this
6227  * particular state structure i.e. by the time the driver
6228  * calls this routine, there should be no other threads
6229  * running in the driver.
6230  */
6231 void
6232 ddi_soft_state_fini(void **state_p)
6233 {
6234 	struct i_ddi_soft_state *ss, *dirty;
6235 	int item;
6236 	static char msg[] = "ddi_soft_state_fini:";
6237 
6238 	if (state_p == NULL || (ss = *state_p) == NULL) {
6239 		cmn_err(CE_WARN, "%s null handle: %s",
6240 		    msg, mod_containing_pc(caller()));
6241 		return;
6242 	}
6243 
6244 	if (ss->size == 0) {
6245 		cmn_err(CE_WARN, "%s bad handle: %s",
6246 		    msg, mod_containing_pc(caller()));
6247 		return;
6248 	}
6249 
6250 	if (ss->n_items > 0) {
6251 		for (item = 0; item < ss->n_items; item++)
6252 			ddi_soft_state_free(ss, item);
6253 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6254 	}
6255 
6256 	/*
6257 	 * Now delete any dirty arrays from previous 'grow' operations
6258 	 */
6259 	for (dirty = ss->next; dirty; dirty = ss->next) {
6260 		ss->next = dirty->next;
6261 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6262 		kmem_free(dirty, sizeof (*dirty));
6263 	}
6264 
6265 	mutex_destroy(&ss->lock);
6266 	kmem_free(ss, sizeof (*ss));
6267 
6268 	*state_p = NULL;
6269 }
6270 
6271 /*
6272  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6273  * Storage is double buffered to prevent updates during devi_addr use -
6274  * double buffering is adaquate for reliable ddi_deviname() consumption.
6275  * The double buffer is not freed until dev_info structure destruction
6276  * (by i_ddi_free_node).
6277  */
6278 void
6279 ddi_set_name_addr(dev_info_t *dip, char *name)
6280 {
6281 	char	*buf = DEVI(dip)->devi_addr_buf;
6282 	char	*newaddr;
6283 
6284 	if (buf == NULL) {
6285 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6286 		DEVI(dip)->devi_addr_buf = buf;
6287 	}
6288 
6289 	if (name) {
6290 		ASSERT(strlen(name) < MAXNAMELEN);
6291 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6292 		    (buf + MAXNAMELEN) : buf;
6293 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6294 	} else
6295 		newaddr = NULL;
6296 
6297 	DEVI(dip)->devi_addr = newaddr;
6298 }
6299 
6300 char *
6301 ddi_get_name_addr(dev_info_t *dip)
6302 {
6303 	return (DEVI(dip)->devi_addr);
6304 }
6305 
6306 void
6307 ddi_set_parent_data(dev_info_t *dip, void *pd)
6308 {
6309 	DEVI(dip)->devi_parent_data = pd;
6310 }
6311 
6312 void *
6313 ddi_get_parent_data(dev_info_t *dip)
6314 {
6315 	return (DEVI(dip)->devi_parent_data);
6316 }
6317 
6318 /*
6319  * ddi_name_to_major: Returns the major number of a module given its name.
6320  */
6321 major_t
6322 ddi_name_to_major(char *name)
6323 {
6324 	return (mod_name_to_major(name));
6325 }
6326 
6327 /*
6328  * ddi_major_to_name: Returns the module name bound to a major number.
6329  */
6330 char *
6331 ddi_major_to_name(major_t major)
6332 {
6333 	return (mod_major_to_name(major));
6334 }
6335 
6336 /*
6337  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6338  * pointed at by 'name.'  A devinfo node is named as a result of calling
6339  * ddi_initchild().
6340  *
6341  * Note: the driver must be held before calling this function!
6342  */
6343 char *
6344 ddi_deviname(dev_info_t *dip, char *name)
6345 {
6346 	char *addrname;
6347 	char none = '\0';
6348 
6349 	if (dip == ddi_root_node()) {
6350 		*name = '\0';
6351 		return (name);
6352 	}
6353 
6354 	if (i_ddi_node_state(dip) < DS_INITIALIZED) {
6355 		addrname = &none;
6356 	} else {
6357 		addrname = ddi_get_name_addr(dip);
6358 	}
6359 
6360 	if (*addrname == '\0') {
6361 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6362 	} else {
6363 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6364 	}
6365 
6366 	return (name);
6367 }
6368 
6369 /*
6370  * Spits out the name of device node, typically name@addr, for a given node,
6371  * using the driver name, not the nodename.
6372  *
6373  * Used by match_parent. Not to be used elsewhere.
6374  */
6375 char *
6376 i_ddi_parname(dev_info_t *dip, char *name)
6377 {
6378 	char *addrname;
6379 
6380 	if (dip == ddi_root_node()) {
6381 		*name = '\0';
6382 		return (name);
6383 	}
6384 
6385 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6386 
6387 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6388 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6389 	else
6390 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6391 	return (name);
6392 }
6393 
6394 static char *
6395 pathname_work(dev_info_t *dip, char *path)
6396 {
6397 	char *bp;
6398 
6399 	if (dip == ddi_root_node()) {
6400 		*path = '\0';
6401 		return (path);
6402 	}
6403 	(void) pathname_work(ddi_get_parent(dip), path);
6404 	bp = path + strlen(path);
6405 	(void) ddi_deviname(dip, bp);
6406 	return (path);
6407 }
6408 
6409 char *
6410 ddi_pathname(dev_info_t *dip, char *path)
6411 {
6412 	return (pathname_work(dip, path));
6413 }
6414 
6415 /*
6416  * Given a dev_t, return the pathname of the corresponding device in the
6417  * buffer pointed at by "path."  The buffer is assumed to be large enough
6418  * to hold the pathname of the device (MAXPATHLEN).
6419  *
6420  * The pathname of a device is the pathname of the devinfo node to which
6421  * the device "belongs," concatenated with the character ':' and the name
6422  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6423  * just the pathname of the devinfo node is returned without driving attach
6424  * of that node.  For a non-zero spec_type, an attach is performed and a
6425  * search of the minor list occurs.
6426  *
6427  * It is possible that the path associated with the dev_t is not
6428  * currently available in the devinfo tree.  In order to have a
6429  * dev_t, a device must have been discovered before, which means
6430  * that the path is always in the instance tree.  The one exception
6431  * to this is if the dev_t is associated with a pseudo driver, in
6432  * which case the device must exist on the pseudo branch of the
6433  * devinfo tree as a result of parsing .conf files.
6434  */
6435 int
6436 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6437 {
6438 	major_t		major = getmajor(devt);
6439 	int		instance;
6440 	dev_info_t	*dip;
6441 	char		*minorname;
6442 	char		*drvname;
6443 
6444 	if (major >= devcnt)
6445 		goto fail;
6446 	if (major == clone_major) {
6447 		/* clone has no minor nodes, manufacture the path here */
6448 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6449 			goto fail;
6450 
6451 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6452 		return (DDI_SUCCESS);
6453 	}
6454 
6455 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6456 	if ((instance = dev_to_instance(devt)) == -1)
6457 		goto fail;
6458 
6459 	/* reconstruct the path given the major/instance */
6460 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6461 		goto fail;
6462 
6463 	/* if spec_type given we must drive attach and search minor nodes */
6464 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6465 		/* attach the path so we can search minors */
6466 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6467 			goto fail;
6468 
6469 		/* Add minorname to path. */
6470 		mutex_enter(&(DEVI(dip)->devi_lock));
6471 		minorname = i_ddi_devtspectype_to_minorname(dip,
6472 		    devt, spec_type);
6473 		if (minorname) {
6474 			(void) strcat(path, ":");
6475 			(void) strcat(path, minorname);
6476 		}
6477 		mutex_exit(&(DEVI(dip)->devi_lock));
6478 		ddi_release_devi(dip);
6479 		if (minorname == NULL)
6480 			goto fail;
6481 	}
6482 	ASSERT(strlen(path) < MAXPATHLEN);
6483 	return (DDI_SUCCESS);
6484 
6485 fail:	*path = 0;
6486 	return (DDI_FAILURE);
6487 }
6488 
6489 /*
6490  * Given a major number and an instance, return the path.
6491  * This interface does NOT drive attach.
6492  */
6493 int
6494 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6495 {
6496 	struct devnames *dnp;
6497 	dev_info_t	*dip;
6498 
6499 	if ((major >= devcnt) || (instance == -1)) {
6500 		*path = 0;
6501 		return (DDI_FAILURE);
6502 	}
6503 
6504 	/* look for the major/instance in the instance tree */
6505 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6506 	    path) == DDI_SUCCESS) {
6507 		ASSERT(strlen(path) < MAXPATHLEN);
6508 		return (DDI_SUCCESS);
6509 	}
6510 
6511 	/*
6512 	 * Not in instance tree, find the instance on the per driver list and
6513 	 * construct path to instance via ddi_pathname(). This is how paths
6514 	 * down the 'pseudo' branch are constructed.
6515 	 */
6516 	dnp = &(devnamesp[major]);
6517 	LOCK_DEV_OPS(&(dnp->dn_lock));
6518 	for (dip = dnp->dn_head; dip;
6519 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6520 		/* Skip if instance does not match. */
6521 		if (DEVI(dip)->devi_instance != instance)
6522 			continue;
6523 
6524 		/*
6525 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6526 		 * node demotion, so it is not an effective way of ensuring
6527 		 * that the ddi_pathname result has a unit-address.  Instead,
6528 		 * we reverify the node state after calling ddi_pathname().
6529 		 */
6530 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6531 			(void) ddi_pathname(dip, path);
6532 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6533 				continue;
6534 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6535 			ASSERT(strlen(path) < MAXPATHLEN);
6536 			return (DDI_SUCCESS);
6537 		}
6538 	}
6539 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6540 
6541 	/* can't reconstruct the path */
6542 	*path = 0;
6543 	return (DDI_FAILURE);
6544 }
6545 
6546 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6547 
6548 /*
6549  * Given the dip for a network interface return the ppa for that interface.
6550  *
6551  * In all cases except GLD v0 drivers, the ppa == instance.
6552  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6553  * So for these drivers when the attach routine calls gld_register(),
6554  * the GLD framework creates an integer property called "gld_driver_ppa"
6555  * that can be queried here.
6556  *
6557  * The only time this function is used is when a system is booting over nfs.
6558  * In this case the system has to resolve the pathname of the boot device
6559  * to it's ppa.
6560  */
6561 int
6562 i_ddi_devi_get_ppa(dev_info_t *dip)
6563 {
6564 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6565 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6566 			GLD_DRIVER_PPA, ddi_get_instance(dip)));
6567 }
6568 
6569 /*
6570  * i_ddi_devi_set_ppa() should only be called from gld_register()
6571  * and only for GLD v0 drivers
6572  */
6573 void
6574 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6575 {
6576 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6577 }
6578 
6579 
6580 /*
6581  * Private DDI Console bell functions.
6582  */
6583 void
6584 ddi_ring_console_bell(clock_t duration)
6585 {
6586 	if (ddi_console_bell_func != NULL)
6587 		(*ddi_console_bell_func)(duration);
6588 }
6589 
6590 void
6591 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6592 {
6593 	ddi_console_bell_func = bellfunc;
6594 }
6595 
6596 int
6597 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6598 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6599 {
6600 	int (*funcp)() = ddi_dma_allochdl;
6601 	ddi_dma_attr_t dma_attr;
6602 	struct bus_ops *bop;
6603 
6604 	if (attr == (ddi_dma_attr_t *)0)
6605 		return (DDI_DMA_BADATTR);
6606 
6607 	dma_attr = *attr;
6608 
6609 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6610 	if (bop && bop->bus_dma_allochdl)
6611 		funcp = bop->bus_dma_allochdl;
6612 
6613 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6614 }
6615 
6616 void
6617 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6618 {
6619 	ddi_dma_handle_t h = *handlep;
6620 	(void) ddi_dma_freehdl(HD, HD, h);
6621 }
6622 
6623 static uintptr_t dma_mem_list_id = 0;
6624 
6625 
6626 int
6627 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6628 	ddi_device_acc_attr_t *accattrp, uint_t xfermodes,
6629 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6630 	size_t *real_length, ddi_acc_handle_t *handlep)
6631 {
6632 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6633 	dev_info_t *dip = hp->dmai_rdip;
6634 	ddi_acc_hdl_t *ap;
6635 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6636 	uint_t sleepflag;
6637 	int (*fp)(caddr_t);
6638 	int rval;
6639 
6640 	if (waitfp == DDI_DMA_SLEEP)
6641 		fp = (int (*)())KM_SLEEP;
6642 	else if (waitfp == DDI_DMA_DONTWAIT)
6643 		fp = (int (*)())KM_NOSLEEP;
6644 	else
6645 		fp = waitfp;
6646 	*handlep = impl_acc_hdl_alloc(fp, arg);
6647 	if (*handlep == NULL)
6648 		return (DDI_FAILURE);
6649 
6650 	/*
6651 	 * initialize the common elements of data access handle
6652 	 */
6653 	ap = impl_acc_hdl_get(*handlep);
6654 	ap->ah_vers = VERS_ACCHDL;
6655 	ap->ah_dip = dip;
6656 	ap->ah_offset = 0;
6657 	ap->ah_len = 0;
6658 	ap->ah_xfermodes = xfermodes;
6659 	ap->ah_acc = *accattrp;
6660 
6661 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6662 	if (xfermodes == DDI_DMA_CONSISTENT) {
6663 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 0,
6664 			    accattrp, kaddrp, NULL, ap);
6665 		*real_length = length;
6666 	} else {
6667 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 1,
6668 			    accattrp, kaddrp, real_length, ap);
6669 	}
6670 	if (rval == DDI_SUCCESS) {
6671 		ap->ah_len = (off_t)(*real_length);
6672 		ap->ah_addr = *kaddrp;
6673 	} else {
6674 		impl_acc_hdl_free(*handlep);
6675 		*handlep = (ddi_acc_handle_t)NULL;
6676 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6677 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6678 		}
6679 		rval = DDI_FAILURE;
6680 	}
6681 	return (rval);
6682 }
6683 
6684 void
6685 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6686 {
6687 	ddi_acc_hdl_t *ap;
6688 
6689 	ap = impl_acc_hdl_get(*handlep);
6690 	ASSERT(ap);
6691 
6692 	if (ap->ah_xfermodes == DDI_DMA_CONSISTENT) {
6693 		i_ddi_mem_free((caddr_t)ap->ah_addr, 0);
6694 	} else {
6695 		i_ddi_mem_free((caddr_t)ap->ah_addr, 1);
6696 	}
6697 
6698 	/*
6699 	 * free the handle
6700 	 */
6701 	impl_acc_hdl_free(*handlep);
6702 	*handlep = (ddi_acc_handle_t)NULL;
6703 
6704 	if (dma_mem_list_id != 0) {
6705 		ddi_run_callback(&dma_mem_list_id);
6706 	}
6707 }
6708 
6709 int
6710 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6711 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6712 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6713 {
6714 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6715 	dev_info_t *hdip, *dip;
6716 	struct ddi_dma_req dmareq;
6717 	int (*funcp)();
6718 
6719 	dmareq.dmar_flags = flags;
6720 	dmareq.dmar_fp = waitfp;
6721 	dmareq.dmar_arg = arg;
6722 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
6723 
6724 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
6725 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
6726 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
6727 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
6728 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
6729 	} else {
6730 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
6731 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
6732 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
6733 							bp->b_shadow;
6734 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
6735 		} else {
6736 			dmareq.dmar_object.dmao_type =
6737 				(bp->b_flags & (B_PHYS | B_REMAPPED))?
6738 				DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
6739 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6740 		}
6741 
6742 		/*
6743 		 * If the buffer has no proc pointer, or the proc
6744 		 * struct has the kernel address space, or the buffer has
6745 		 * been marked B_REMAPPED (meaning that it is now
6746 		 * mapped into the kernel's address space), then
6747 		 * the address space is kas (kernel address space).
6748 		 */
6749 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
6750 		    (bp->b_flags & B_REMAPPED) != 0) {
6751 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
6752 		} else {
6753 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
6754 			    bp->b_proc->p_as;
6755 		}
6756 	}
6757 
6758 	dip = hp->dmai_rdip;
6759 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6760 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6761 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6762 }
6763 
6764 int
6765 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
6766 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
6767 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6768 {
6769 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6770 	dev_info_t *hdip, *dip;
6771 	struct ddi_dma_req dmareq;
6772 	int (*funcp)();
6773 
6774 	if (len == (uint_t)0) {
6775 		return (DDI_DMA_NOMAPPING);
6776 	}
6777 	dmareq.dmar_flags = flags;
6778 	dmareq.dmar_fp = waitfp;
6779 	dmareq.dmar_arg = arg;
6780 	dmareq.dmar_object.dmao_size = len;
6781 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
6782 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
6783 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
6784 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6785 
6786 	dip = hp->dmai_rdip;
6787 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6788 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6789 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6790 }
6791 
6792 void
6793 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
6794 {
6795 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6796 	ddi_dma_cookie_t *cp;
6797 
6798 	cp = hp->dmai_cookie;
6799 	ASSERT(cp);
6800 
6801 	cookiep->dmac_notused = cp->dmac_notused;
6802 	cookiep->dmac_type = cp->dmac_type;
6803 	cookiep->dmac_address = cp->dmac_address;
6804 	cookiep->dmac_size = cp->dmac_size;
6805 	hp->dmai_cookie++;
6806 }
6807 
6808 int
6809 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
6810 {
6811 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6812 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
6813 		return (DDI_FAILURE);
6814 	} else {
6815 		*nwinp = hp->dmai_nwin;
6816 		return (DDI_SUCCESS);
6817 	}
6818 }
6819 
6820 int
6821 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
6822 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6823 {
6824 	int (*funcp)() = ddi_dma_win;
6825 	struct bus_ops *bop;
6826 
6827 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
6828 	if (bop && bop->bus_dma_win)
6829 		funcp = bop->bus_dma_win;
6830 
6831 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
6832 }
6833 
6834 int
6835 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
6836 {
6837 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
6838 		&burstsizes, 0, 0));
6839 }
6840 
6841 int
6842 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
6843 {
6844 	return (hp->dmai_fault);
6845 }
6846 
6847 int
6848 ddi_check_dma_handle(ddi_dma_handle_t handle)
6849 {
6850 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6851 	int (*check)(ddi_dma_impl_t *);
6852 
6853 	if ((check = hp->dmai_fault_check) == NULL)
6854 		check = i_ddi_dma_fault_check;
6855 
6856 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
6857 }
6858 
6859 void
6860 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
6861 {
6862 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6863 	void (*notify)(ddi_dma_impl_t *);
6864 
6865 	if (!hp->dmai_fault) {
6866 		hp->dmai_fault = 1;
6867 		if ((notify = hp->dmai_fault_notify) != NULL)
6868 			(*notify)(hp);
6869 	}
6870 }
6871 
6872 void
6873 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
6874 {
6875 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6876 	void (*notify)(ddi_dma_impl_t *);
6877 
6878 	if (hp->dmai_fault) {
6879 		hp->dmai_fault = 0;
6880 		if ((notify = hp->dmai_fault_notify) != NULL)
6881 			(*notify)(hp);
6882 	}
6883 }
6884 
6885 /*
6886  * register mapping routines.
6887  */
6888 int
6889 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
6890 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
6891 	ddi_acc_handle_t *handle)
6892 {
6893 	ddi_map_req_t mr;
6894 	ddi_acc_hdl_t *hp;
6895 	int result;
6896 
6897 	/*
6898 	 * Allocate and initialize the common elements of data access handle.
6899 	 */
6900 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
6901 	hp = impl_acc_hdl_get(*handle);
6902 	hp->ah_vers = VERS_ACCHDL;
6903 	hp->ah_dip = dip;
6904 	hp->ah_rnumber = rnumber;
6905 	hp->ah_offset = offset;
6906 	hp->ah_len = len;
6907 	hp->ah_acc = *accattrp;
6908 
6909 	/*
6910 	 * Set up the mapping request and call to parent.
6911 	 */
6912 	mr.map_op = DDI_MO_MAP_LOCKED;
6913 	mr.map_type = DDI_MT_RNUMBER;
6914 	mr.map_obj.rnumber = rnumber;
6915 	mr.map_prot = PROT_READ | PROT_WRITE;
6916 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6917 	mr.map_handlep = hp;
6918 	mr.map_vers = DDI_MAP_VERSION;
6919 	result = ddi_map(dip, &mr, offset, len, addrp);
6920 
6921 	/*
6922 	 * check for end result
6923 	 */
6924 	if (result != DDI_SUCCESS) {
6925 		impl_acc_hdl_free(*handle);
6926 		*handle = (ddi_acc_handle_t)NULL;
6927 	} else {
6928 		hp->ah_addr = *addrp;
6929 	}
6930 
6931 	return (result);
6932 }
6933 
6934 void
6935 ddi_regs_map_free(ddi_acc_handle_t *handlep)
6936 {
6937 	ddi_map_req_t mr;
6938 	ddi_acc_hdl_t *hp;
6939 
6940 	hp = impl_acc_hdl_get(*handlep);
6941 	ASSERT(hp);
6942 
6943 	mr.map_op = DDI_MO_UNMAP;
6944 	mr.map_type = DDI_MT_RNUMBER;
6945 	mr.map_obj.rnumber = hp->ah_rnumber;
6946 	mr.map_prot = PROT_READ | PROT_WRITE;
6947 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6948 	mr.map_handlep = hp;
6949 	mr.map_vers = DDI_MAP_VERSION;
6950 
6951 	/*
6952 	 * Call my parent to unmap my regs.
6953 	 */
6954 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
6955 		hp->ah_len, &hp->ah_addr);
6956 	/*
6957 	 * free the handle
6958 	 */
6959 	impl_acc_hdl_free(*handlep);
6960 	*handlep = (ddi_acc_handle_t)NULL;
6961 }
6962 
6963 int
6964 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
6965 	ssize_t dev_advcnt, uint_t dev_datasz)
6966 {
6967 	uint8_t *b;
6968 	uint16_t *w;
6969 	uint32_t *l;
6970 	uint64_t *ll;
6971 
6972 	/* check for total byte count is multiple of data transfer size */
6973 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
6974 		return (DDI_FAILURE);
6975 
6976 	switch (dev_datasz) {
6977 	case DDI_DATA_SZ01_ACC:
6978 		for (b = (uint8_t *)dev_addr;
6979 			bytecount != 0; bytecount -= 1, b += dev_advcnt)
6980 			ddi_put8(handle, b, 0);
6981 		break;
6982 	case DDI_DATA_SZ02_ACC:
6983 		for (w = (uint16_t *)dev_addr;
6984 			bytecount != 0; bytecount -= 2, w += dev_advcnt)
6985 			ddi_put16(handle, w, 0);
6986 		break;
6987 	case DDI_DATA_SZ04_ACC:
6988 		for (l = (uint32_t *)dev_addr;
6989 			bytecount != 0; bytecount -= 4, l += dev_advcnt)
6990 			ddi_put32(handle, l, 0);
6991 		break;
6992 	case DDI_DATA_SZ08_ACC:
6993 		for (ll = (uint64_t *)dev_addr;
6994 			bytecount != 0; bytecount -= 8, ll += dev_advcnt)
6995 			ddi_put64(handle, ll, 0x0ll);
6996 		break;
6997 	default:
6998 		return (DDI_FAILURE);
6999 	}
7000 	return (DDI_SUCCESS);
7001 }
7002 
7003 int
7004 ddi_device_copy(
7005 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7006 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7007 	size_t bytecount, uint_t dev_datasz)
7008 {
7009 	uint8_t *b_src, *b_dst;
7010 	uint16_t *w_src, *w_dst;
7011 	uint32_t *l_src, *l_dst;
7012 	uint64_t *ll_src, *ll_dst;
7013 
7014 	/* check for total byte count is multiple of data transfer size */
7015 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7016 		return (DDI_FAILURE);
7017 
7018 	switch (dev_datasz) {
7019 	case DDI_DATA_SZ01_ACC:
7020 		b_src = (uint8_t *)src_addr;
7021 		b_dst = (uint8_t *)dest_addr;
7022 
7023 		for (; bytecount != 0; bytecount -= 1) {
7024 			ddi_put8(dest_handle, b_dst,
7025 				ddi_get8(src_handle, b_src));
7026 			b_dst += dest_advcnt;
7027 			b_src += src_advcnt;
7028 		}
7029 		break;
7030 	case DDI_DATA_SZ02_ACC:
7031 		w_src = (uint16_t *)src_addr;
7032 		w_dst = (uint16_t *)dest_addr;
7033 
7034 		for (; bytecount != 0; bytecount -= 2) {
7035 			ddi_put16(dest_handle, w_dst,
7036 				ddi_get16(src_handle, w_src));
7037 			w_dst += dest_advcnt;
7038 			w_src += src_advcnt;
7039 		}
7040 		break;
7041 	case DDI_DATA_SZ04_ACC:
7042 		l_src = (uint32_t *)src_addr;
7043 		l_dst = (uint32_t *)dest_addr;
7044 
7045 		for (; bytecount != 0; bytecount -= 4) {
7046 			ddi_put32(dest_handle, l_dst,
7047 				ddi_get32(src_handle, l_src));
7048 			l_dst += dest_advcnt;
7049 			l_src += src_advcnt;
7050 		}
7051 		break;
7052 	case DDI_DATA_SZ08_ACC:
7053 		ll_src = (uint64_t *)src_addr;
7054 		ll_dst = (uint64_t *)dest_addr;
7055 
7056 		for (; bytecount != 0; bytecount -= 8) {
7057 			ddi_put64(dest_handle, ll_dst,
7058 				ddi_get64(src_handle, ll_src));
7059 			ll_dst += dest_advcnt;
7060 			ll_src += src_advcnt;
7061 		}
7062 		break;
7063 	default:
7064 		return (DDI_FAILURE);
7065 	}
7066 	return (DDI_SUCCESS);
7067 }
7068 
7069 #define	swap16(value)  \
7070 	((((value) & 0xff) << 8) | ((value) >> 8))
7071 
7072 #define	swap32(value)	\
7073 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7074 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7075 
7076 #define	swap64(value)	\
7077 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7078 	    << 32) | \
7079 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7080 
7081 uint16_t
7082 ddi_swap16(uint16_t value)
7083 {
7084 	return (swap16(value));
7085 }
7086 
7087 uint32_t
7088 ddi_swap32(uint32_t value)
7089 {
7090 	return (swap32(value));
7091 }
7092 
7093 uint64_t
7094 ddi_swap64(uint64_t value)
7095 {
7096 	return (swap64(value));
7097 }
7098 
7099 /*
7100  * Convert a binding name to a driver name.
7101  * A binding name is the name used to determine the driver for a
7102  * device - it may be either an alias for the driver or the name
7103  * of the driver itself.
7104  */
7105 char *
7106 i_binding_to_drv_name(char *bname)
7107 {
7108 	major_t major_no;
7109 
7110 	ASSERT(bname != NULL);
7111 
7112 	if ((major_no = ddi_name_to_major(bname)) == -1)
7113 		return (NULL);
7114 	return (ddi_major_to_name(major_no));
7115 }
7116 
7117 /*
7118  * Search for minor name that has specified dev_t and spec_type.
7119  * If spec_type is zero then any dev_t match works.  Since we
7120  * are returning a pointer to the minor name string, we require the
7121  * caller to do the locking.
7122  */
7123 char *
7124 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7125 {
7126 	struct ddi_minor_data	*dmdp;
7127 
7128 	/*
7129 	 * The did layered driver currently intentionally returns a
7130 	 * devinfo ptr for an underlying sd instance based on a did
7131 	 * dev_t. In this case it is not an error.
7132 	 *
7133 	 * The did layered driver is associated with Sun Cluster.
7134 	 */
7135 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7136 		(strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7137 	ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7138 
7139 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7140 		if (((dmdp->type == DDM_MINOR) ||
7141 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7142 		    (dmdp->type == DDM_DEFAULT)) &&
7143 		    (dmdp->ddm_dev == dev) &&
7144 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7145 		    (dmdp->ddm_spec_type == spec_type)))
7146 			return (dmdp->ddm_name);
7147 	}
7148 
7149 	return (NULL);
7150 }
7151 
7152 /*
7153  * Find the devt and spectype of the specified minor_name.
7154  * Return DDI_FAILURE if minor_name not found. Since we are
7155  * returning everything via arguments we can do the locking.
7156  */
7157 int
7158 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7159 	dev_t *devtp, int *spectypep)
7160 {
7161 	struct ddi_minor_data	*dmdp;
7162 
7163 	/* deal with clone minor nodes */
7164 	if (dip == clone_dip) {
7165 		major_t	major;
7166 		/*
7167 		 * Make sure minor_name is a STREAMS driver.
7168 		 * We load the driver but don't attach to any instances.
7169 		 */
7170 
7171 		major = ddi_name_to_major(minor_name);
7172 		if (major == (major_t)-1)
7173 			return (DDI_FAILURE);
7174 
7175 		if (ddi_hold_driver(major) == NULL)
7176 			return (DDI_FAILURE);
7177 
7178 		if (STREAMSTAB(major) == NULL) {
7179 			ddi_rele_driver(major);
7180 			return (DDI_FAILURE);
7181 		}
7182 		ddi_rele_driver(major);
7183 
7184 		if (devtp)
7185 			*devtp = makedevice(clone_major, (minor_t)major);
7186 
7187 		if (spectypep)
7188 			*spectypep = S_IFCHR;
7189 
7190 		return (DDI_SUCCESS);
7191 	}
7192 
7193 	ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7194 	mutex_enter(&(DEVI(dip)->devi_lock));
7195 
7196 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7197 		if (((dmdp->type != DDM_MINOR) &&
7198 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7199 		    (dmdp->type != DDM_DEFAULT)) ||
7200 		    strcmp(minor_name, dmdp->ddm_name))
7201 			continue;
7202 
7203 		if (devtp)
7204 			*devtp = dmdp->ddm_dev;
7205 
7206 		if (spectypep)
7207 			*spectypep = dmdp->ddm_spec_type;
7208 
7209 		mutex_exit(&(DEVI(dip)->devi_lock));
7210 		return (DDI_SUCCESS);
7211 	}
7212 
7213 	mutex_exit(&(DEVI(dip)->devi_lock));
7214 	return (DDI_FAILURE);
7215 }
7216 
7217 extern char	hw_serial[];
7218 static kmutex_t devid_gen_mutex;
7219 static short	devid_gen_number;
7220 
7221 #ifdef DEBUG
7222 
7223 static int	devid_register_corrupt = 0;
7224 static int	devid_register_corrupt_major = 0;
7225 static int	devid_register_corrupt_hint = 0;
7226 static int	devid_register_corrupt_hint_major = 0;
7227 
7228 static int devid_lyr_debug = 0;
7229 
7230 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7231 	if (devid_lyr_debug)					\
7232 		ddi_debug_devid_devts(msg, ndevs, devs)
7233 
7234 #else
7235 
7236 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7237 
7238 #endif /* DEBUG */
7239 
7240 
7241 #ifdef	DEBUG
7242 
7243 static void
7244 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7245 {
7246 	int i;
7247 
7248 	cmn_err(CE_CONT, "%s:\n", msg);
7249 	for (i = 0; i < ndevs; i++) {
7250 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7251 	}
7252 }
7253 
7254 static void
7255 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7256 {
7257 	int i;
7258 
7259 	cmn_err(CE_CONT, "%s:\n", msg);
7260 	for (i = 0; i < npaths; i++) {
7261 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7262 	}
7263 }
7264 
7265 static void
7266 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7267 {
7268 	int i;
7269 
7270 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7271 	for (i = 0; i < ndevs; i++) {
7272 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7273 	}
7274 }
7275 
7276 #endif	/* DEBUG */
7277 
7278 /*
7279  * Register device id into DDI framework.
7280  * Must be called when device is attached.
7281  */
7282 static int
7283 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7284 {
7285 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7286 	size_t		driver_len;
7287 	const char	*driver_name;
7288 	char		*devid_str;
7289 	major_t		major;
7290 
7291 	if ((dip == NULL) ||
7292 	    ((major = ddi_driver_major(dip)) == (major_t)-1))
7293 		return (DDI_FAILURE);
7294 
7295 	/* verify that the devid is valid */
7296 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7297 		return (DDI_FAILURE);
7298 
7299 	/* Updating driver name hint in devid */
7300 	driver_name = ddi_driver_name(dip);
7301 	driver_len = strlen(driver_name);
7302 	if (driver_len > DEVID_HINT_SIZE) {
7303 		/* Pick up last four characters of driver name */
7304 		driver_name += driver_len - DEVID_HINT_SIZE;
7305 		driver_len = DEVID_HINT_SIZE;
7306 	}
7307 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7308 	bcopy(driver_name, i_devid->did_driver, driver_len);
7309 
7310 #ifdef DEBUG
7311 	/* Corrupt the devid for testing. */
7312 	if (devid_register_corrupt)
7313 		i_devid->did_id[0] += devid_register_corrupt;
7314 	if (devid_register_corrupt_major &&
7315 	    (major == devid_register_corrupt_major))
7316 		i_devid->did_id[0] += 1;
7317 	if (devid_register_corrupt_hint)
7318 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7319 	if (devid_register_corrupt_hint_major &&
7320 	    (major == devid_register_corrupt_hint_major))
7321 		i_devid->did_driver[0] += 1;
7322 #endif /* DEBUG */
7323 
7324 	/* encode the devid as a string */
7325 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7326 		return (DDI_FAILURE);
7327 
7328 	/* add string as a string property */
7329 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7330 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7331 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7332 			ddi_driver_name(dip), ddi_get_instance(dip));
7333 		ddi_devid_str_free(devid_str);
7334 		return (DDI_FAILURE);
7335 	}
7336 
7337 	ddi_devid_str_free(devid_str);
7338 
7339 #ifdef	DEVID_COMPATIBILITY
7340 	/*
7341 	 * marker for devinfo snapshot compatibility.
7342 	 * This code gets deleted when di_devid is gone from libdevid
7343 	 */
7344 	DEVI(dip)->devi_devid = DEVID_COMPATIBILITY;
7345 #endif	/* DEVID_COMPATIBILITY */
7346 	return (DDI_SUCCESS);
7347 }
7348 
7349 int
7350 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7351 {
7352 	int rval;
7353 
7354 	rval = i_ddi_devid_register(dip, devid);
7355 	if (rval == DDI_SUCCESS) {
7356 		/*
7357 		 * Register devid in devid-to-path cache
7358 		 */
7359 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7360 			mutex_enter(&DEVI(dip)->devi_lock);
7361 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7362 			mutex_exit(&DEVI(dip)->devi_lock);
7363 		} else {
7364 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7365 				ddi_driver_name(dip), ddi_get_instance(dip));
7366 		}
7367 	} else {
7368 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7369 			ddi_driver_name(dip), ddi_get_instance(dip));
7370 	}
7371 	return (rval);
7372 }
7373 
7374 /*
7375  * Remove (unregister) device id from DDI framework.
7376  * Must be called when device is detached.
7377  */
7378 static void
7379 i_ddi_devid_unregister(dev_info_t *dip)
7380 {
7381 #ifdef	DEVID_COMPATIBILITY
7382 	/*
7383 	 * marker for micro release devinfo snapshot compatibility.
7384 	 * This code gets deleted for the minor release.
7385 	 */
7386 	DEVI(dip)->devi_devid = NULL;		/* unset DEVID_PROP */
7387 #endif	/* DEVID_COMPATIBILITY */
7388 
7389 	/* remove the devid property */
7390 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7391 }
7392 
7393 void
7394 ddi_devid_unregister(dev_info_t *dip)
7395 {
7396 	mutex_enter(&DEVI(dip)->devi_lock);
7397 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7398 	mutex_exit(&DEVI(dip)->devi_lock);
7399 	e_devid_cache_unregister(dip);
7400 	i_ddi_devid_unregister(dip);
7401 }
7402 
7403 /*
7404  * Allocate and initialize a device id.
7405  */
7406 int
7407 ddi_devid_init(
7408 	dev_info_t	*dip,
7409 	ushort_t	devid_type,
7410 	ushort_t	nbytes,
7411 	void		*id,
7412 	ddi_devid_t	*ret_devid)
7413 {
7414 	impl_devid_t	*i_devid;
7415 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7416 	int		driver_len;
7417 	const char	*driver_name;
7418 
7419 	switch (devid_type) {
7420 	case DEVID_SCSI3_WWN:
7421 		/*FALLTHRU*/
7422 	case DEVID_SCSI_SERIAL:
7423 		/*FALLTHRU*/
7424 	case DEVID_ATA_SERIAL:
7425 		/*FALLTHRU*/
7426 	case DEVID_ENCAP:
7427 		if (nbytes == 0)
7428 			return (DDI_FAILURE);
7429 		if (id == NULL)
7430 			return (DDI_FAILURE);
7431 		break;
7432 	case DEVID_FAB:
7433 		if (nbytes != 0)
7434 			return (DDI_FAILURE);
7435 		if (id != NULL)
7436 			return (DDI_FAILURE);
7437 		nbytes = sizeof (int) +
7438 		    sizeof (struct timeval32) + sizeof (short);
7439 		sz += nbytes;
7440 		break;
7441 	default:
7442 		return (DDI_FAILURE);
7443 	}
7444 
7445 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7446 		return (DDI_FAILURE);
7447 
7448 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7449 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7450 	i_devid->did_rev_hi = DEVID_REV_MSB;
7451 	i_devid->did_rev_lo = DEVID_REV_LSB;
7452 	DEVID_FORMTYPE(i_devid, devid_type);
7453 	DEVID_FORMLEN(i_devid, nbytes);
7454 
7455 	/* Fill in driver name hint */
7456 	driver_name = ddi_driver_name(dip);
7457 	driver_len = strlen(driver_name);
7458 	if (driver_len > DEVID_HINT_SIZE) {
7459 		/* Pick up last four characters of driver name */
7460 		driver_name += driver_len - DEVID_HINT_SIZE;
7461 		driver_len = DEVID_HINT_SIZE;
7462 	}
7463 
7464 	bcopy(driver_name, i_devid->did_driver, driver_len);
7465 
7466 	/* Fill in id field */
7467 	if (devid_type == DEVID_FAB) {
7468 		char		*cp;
7469 		int		hostid;
7470 		char		*hostid_cp = &hw_serial[0];
7471 		struct timeval32 timestamp32;
7472 		int		i;
7473 		int		*ip;
7474 		short		gen;
7475 
7476 		/* increase the generation number */
7477 		mutex_enter(&devid_gen_mutex);
7478 		gen = devid_gen_number++;
7479 		mutex_exit(&devid_gen_mutex);
7480 
7481 		cp = i_devid->did_id;
7482 
7483 		/* Fill in host id (big-endian byte ordering) */
7484 		hostid = stoi(&hostid_cp);
7485 		*cp++ = hibyte(hiword(hostid));
7486 		*cp++ = lobyte(hiword(hostid));
7487 		*cp++ = hibyte(loword(hostid));
7488 		*cp++ = lobyte(loword(hostid));
7489 
7490 		/*
7491 		 * Fill in timestamp (big-endian byte ordering)
7492 		 *
7493 		 * (Note that the format may have to be changed
7494 		 * before 2038 comes around, though it's arguably
7495 		 * unique enough as it is..)
7496 		 */
7497 		uniqtime32(&timestamp32);
7498 		ip = (int *)&timestamp32;
7499 		for (i = 0;
7500 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7501 			int	val;
7502 			val = *ip;
7503 			*cp++ = hibyte(hiword(val));
7504 			*cp++ = lobyte(hiword(val));
7505 			*cp++ = hibyte(loword(val));
7506 			*cp++ = lobyte(loword(val));
7507 		}
7508 
7509 		/* fill in the generation number */
7510 		*cp++ = hibyte(gen);
7511 		*cp++ = lobyte(gen);
7512 	} else
7513 		bcopy(id, i_devid->did_id, nbytes);
7514 
7515 	/* return device id */
7516 	*ret_devid = (ddi_devid_t)i_devid;
7517 	return (DDI_SUCCESS);
7518 }
7519 
7520 int
7521 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7522 {
7523 	char		*devidstr;
7524 
7525 	ASSERT(dev != DDI_DEV_T_NONE);
7526 
7527 	/* look up the property, devt specific first */
7528 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7529 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7530 		if ((dev == DDI_DEV_T_ANY) ||
7531 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7532 			DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7533 			DDI_PROP_SUCCESS)) {
7534 				return (DDI_FAILURE);
7535 		}
7536 	}
7537 
7538 	/* convert to binary form */
7539 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7540 		ddi_prop_free(devidstr);
7541 		return (DDI_FAILURE);
7542 	}
7543 	ddi_prop_free(devidstr);
7544 	return (DDI_SUCCESS);
7545 }
7546 
7547 /*
7548  * Return a copy of the device id for dev_t
7549  */
7550 int
7551 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7552 {
7553 	dev_info_t	*dip;
7554 	int		rval;
7555 
7556 	/* get the dip */
7557 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7558 		return (DDI_FAILURE);
7559 
7560 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7561 
7562 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7563 	return (rval);
7564 }
7565 
7566 /*
7567  * Return a copy of the minor name for dev_t and spec_type
7568  */
7569 int
7570 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7571 {
7572 	dev_info_t	*dip;
7573 	char		*nm;
7574 	size_t		alloc_sz, sz;
7575 
7576 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7577 		return (DDI_FAILURE);
7578 
7579 	mutex_enter(&(DEVI(dip)->devi_lock));
7580 
7581 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7582 	    dev, spec_type)) == NULL) {
7583 		mutex_exit(&(DEVI(dip)->devi_lock));
7584 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7585 		return (DDI_FAILURE);
7586 	}
7587 
7588 	/* make a copy */
7589 	alloc_sz = strlen(nm) + 1;
7590 retry:
7591 	/* drop lock to allocate memory */
7592 	mutex_exit(&(DEVI(dip)->devi_lock));
7593 	*minor_name = kmem_alloc(alloc_sz, KM_SLEEP);
7594 	mutex_enter(&(DEVI(dip)->devi_lock));
7595 
7596 	/* re-check things, since we dropped the lock */
7597 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7598 	    dev, spec_type)) == NULL) {
7599 		mutex_exit(&(DEVI(dip)->devi_lock));
7600 		kmem_free(*minor_name, alloc_sz);
7601 		*minor_name = NULL;
7602 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7603 		return (DDI_FAILURE);
7604 	}
7605 
7606 	/* verify size is the same */
7607 	sz = strlen(nm) + 1;
7608 	if (alloc_sz != sz) {
7609 		kmem_free(*minor_name, alloc_sz);
7610 		alloc_sz = sz;
7611 		goto retry;
7612 	}
7613 
7614 	/* sz == alloc_sz - make a copy */
7615 	(void) strcpy(*minor_name, nm);
7616 
7617 	mutex_exit(&(DEVI(dip)->devi_lock));
7618 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7619 	return (DDI_SUCCESS);
7620 }
7621 
7622 int
7623 ddi_lyr_devid_to_devlist(
7624 	ddi_devid_t	devid,
7625 	char		*minor_name,
7626 	int		*retndevs,
7627 	dev_t		**retdevs)
7628 {
7629 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7630 
7631 	if (e_devid_cache_to_devt_list(devid, minor_name,
7632 	    retndevs, retdevs) == DDI_SUCCESS) {
7633 		ASSERT(*retndevs > 0);
7634 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7635 			*retndevs, *retdevs);
7636 		return (DDI_SUCCESS);
7637 	}
7638 
7639 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7640 		return (DDI_FAILURE);
7641 	}
7642 
7643 	if (e_devid_cache_to_devt_list(devid, minor_name,
7644 	    retndevs, retdevs) == DDI_SUCCESS) {
7645 		ASSERT(*retndevs > 0);
7646 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7647 			*retndevs, *retdevs);
7648 		return (DDI_SUCCESS);
7649 	}
7650 
7651 	return (DDI_FAILURE);
7652 }
7653 
7654 void
7655 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7656 {
7657 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7658 }
7659 
7660 /*
7661  * Note: This will need to be fixed if we ever allow processes to
7662  * have more than one data model per exec.
7663  */
7664 model_t
7665 ddi_mmap_get_model(void)
7666 {
7667 	return (get_udatamodel());
7668 }
7669 
7670 model_t
7671 ddi_model_convert_from(model_t model)
7672 {
7673 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
7674 }
7675 
7676 /*
7677  * ddi interfaces managing storage and retrieval of eventcookies.
7678  */
7679 
7680 /*
7681  * Invoke bus nexus driver's implementation of the
7682  * (*bus_remove_eventcall)() interface to remove a registered
7683  * callback handler for "event".
7684  */
7685 int
7686 ddi_remove_event_handler(ddi_callback_id_t id)
7687 {
7688 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
7689 	dev_info_t *ddip;
7690 
7691 	ASSERT(cb);
7692 	if (!cb) {
7693 		return (DDI_FAILURE);
7694 	}
7695 
7696 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
7697 	return (ndi_busop_remove_eventcall(ddip, id));
7698 }
7699 
7700 /*
7701  * Invoke bus nexus driver's implementation of the
7702  * (*bus_add_eventcall)() interface to register a callback handler
7703  * for "event".
7704  */
7705 int
7706 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
7707     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
7708     void *arg, ddi_callback_id_t *id)
7709 {
7710 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
7711 }
7712 
7713 
7714 /*
7715  * Return a handle for event "name" by calling up the device tree
7716  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
7717  * by a bus nexus or top of dev_info tree is reached.
7718  */
7719 int
7720 ddi_get_eventcookie(dev_info_t *dip, char *name,
7721     ddi_eventcookie_t *event_cookiep)
7722 {
7723 	return (ndi_busop_get_eventcookie(dip, dip,
7724 	    name, event_cookiep));
7725 }
7726 
7727 /*
7728  * single thread access to dev_info node and set state
7729  */
7730 void
7731 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock)
7732 {
7733 	if (!has_lock)
7734 		mutex_enter(&(DEVI(dip)->devi_lock));
7735 
7736 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7737 
7738 	/*
7739 	 * wait until state(s) have been changed
7740 	 */
7741 	while ((DEVI(dip)->devi_state & w_mask) != 0) {
7742 		cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock));
7743 	}
7744 	DEVI(dip)->devi_state |= s_mask;
7745 
7746 	if (!has_lock)
7747 		mutex_exit(&(DEVI(dip)->devi_lock));
7748 }
7749 
7750 void
7751 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock)
7752 {
7753 	if (!has_lock)
7754 		mutex_enter(&(DEVI(dip)->devi_lock));
7755 
7756 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7757 
7758 	/*
7759 	 * clear the state(s) and wakeup any threads waiting
7760 	 * for state change
7761 	 */
7762 	DEVI(dip)->devi_state &= ~c_mask;
7763 	cv_broadcast(&(DEVI(dip)->devi_cv));
7764 
7765 	if (!has_lock)
7766 		mutex_exit(&(DEVI(dip)->devi_lock));
7767 }
7768 
7769 /*
7770  * This procedure is provided as the general callback function when
7771  * umem_lockmemory calls as_add_callback for long term memory locking.
7772  * When as_unmap, as_setprot, or as_free encounter segments which have
7773  * locked memory, this callback will be invoked.
7774  */
7775 void
7776 umem_lock_undo(struct as *as, void *arg, uint_t event)
7777 {
7778 	_NOTE(ARGUNUSED(as, event))
7779 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
7780 
7781 	/*
7782 	 * Call the cleanup function.  Decrement the cookie reference
7783 	 * count, if it goes to zero, return the memory for the cookie.
7784 	 * The i_ddi_umem_unlock for this cookie may or may not have been
7785 	 * called already.  It is the responsibility of the caller of
7786 	 * umem_lockmemory to handle the case of the cleanup routine
7787 	 * being called after a ddi_umem_unlock for the cookie
7788 	 * was called.
7789 	 */
7790 
7791 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
7792 
7793 	/* remove the cookie if reference goes to zero */
7794 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
7795 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
7796 	}
7797 }
7798 
7799 /*
7800  * The following two Consolidation Private routines provide generic
7801  * interfaces to increase/decrease the amount of device-locked memory.
7802  *
7803  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
7804  * must be called every time i_ddi_incr_locked_memory() is called.
7805  */
7806 int
7807 /* ARGSUSED */
7808 i_ddi_incr_locked_memory(proc_t *procp, task_t *taskp,
7809     kproject_t *projectp, zone_t *zonep, rctl_qty_t inc)
7810 {
7811 	kproject_t *projp;
7812 
7813 	ASSERT(procp);
7814 	ASSERT(mutex_owned(&procp->p_lock));
7815 
7816 	projp = procp->p_task->tk_proj;
7817 	mutex_enter(&umem_devlockmem_rctl_lock);
7818 	/*
7819 	 * Test if the requested memory can be locked without exceeding the
7820 	 * limits.
7821 	 */
7822 	if (rctl_test(rc_project_devlockmem, projp->kpj_rctls,
7823 	    procp, inc, RCA_SAFE) & RCT_DENY) {
7824 		mutex_exit(&umem_devlockmem_rctl_lock);
7825 		return (ENOMEM);
7826 	}
7827 	projp->kpj_data.kpd_devlockmem += inc;
7828 	mutex_exit(&umem_devlockmem_rctl_lock);
7829 	/*
7830 	 * Grab a hold on the project.
7831 	 */
7832 	(void) project_hold(projp);
7833 
7834 	return (0);
7835 }
7836 
7837 /*
7838  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
7839  * must be called every time i_ddi_decr_locked_memory() is called.
7840  */
7841 /* ARGSUSED */
7842 void
7843 i_ddi_decr_locked_memory(proc_t *procp, task_t *taskp,
7844     kproject_t *projectp, zone_t *zonep, rctl_qty_t dec)
7845 {
7846 	ASSERT(projectp);
7847 
7848 	mutex_enter(&umem_devlockmem_rctl_lock);
7849 	projectp->kpj_data.kpd_devlockmem -= dec;
7850 	mutex_exit(&umem_devlockmem_rctl_lock);
7851 
7852 	/*
7853 	 * Release the project pointer reference accquired in
7854 	 * i_ddi_incr_locked_memory().
7855 	 */
7856 	(void) project_rele(projectp);
7857 }
7858 
7859 /*
7860  * This routine checks if the max-device-locked-memory resource ctl is
7861  * exceeded, if not increments it, grabs a hold on the project.
7862  * Returns 0 if successful otherwise returns error code
7863  */
7864 static int
7865 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
7866 {
7867 	proc_t		*procp;
7868 	int		ret;
7869 
7870 	ASSERT(cookie);
7871 	procp = cookie->procp;
7872 	ASSERT(procp);
7873 
7874 	mutex_enter(&procp->p_lock);
7875 
7876 	if ((ret = i_ddi_incr_locked_memory(procp, NULL,
7877 		NULL, NULL, cookie->size)) != 0) {
7878 		mutex_exit(&procp->p_lock);
7879 		return (ret);
7880 	}
7881 
7882 	/*
7883 	 * save the project pointer in the
7884 	 * umem cookie, project pointer already
7885 	 * hold in i_ddi_incr_locked_memory
7886 	 */
7887 	cookie->lockmem_proj = (void *)procp->p_task->tk_proj;
7888 	mutex_exit(&procp->p_lock);
7889 
7890 	return (0);
7891 }
7892 
7893 /*
7894  * Decrements the max-device-locked-memory resource ctl and releases
7895  * the hold on the project that was acquired during umem_incr_devlockmem
7896  */
7897 static void
7898 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
7899 {
7900 	kproject_t	*projp;
7901 
7902 	if (!cookie->lockmem_proj)
7903 		return;
7904 
7905 	projp = (kproject_t *)cookie->lockmem_proj;
7906 	i_ddi_decr_locked_memory(NULL, NULL, projp, NULL, cookie->size);
7907 
7908 	cookie->lockmem_proj = NULL;
7909 }
7910 
7911 /*
7912  * A consolidation private function which is essentially equivalent to
7913  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
7914  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
7915  * the ops_vector is valid.
7916  *
7917  * Lock the virtual address range in the current process and create a
7918  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
7919  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
7920  * to user space.
7921  *
7922  * Note: The resource control accounting currently uses a full charge model
7923  * in other words attempts to lock the same/overlapping areas of memory
7924  * will deduct the full size of the buffer from the projects running
7925  * counter for the device locked memory.
7926  *
7927  * addr, size should be PAGESIZE aligned
7928  *
7929  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
7930  *	identifies whether the locked memory will be read or written or both
7931  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
7932  * be maintained for an indefinitely long period (essentially permanent),
7933  * rather than for what would be required for a typical I/O completion.
7934  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
7935  * if the memory pertains to a regular file which is mapped MAP_SHARED.
7936  * This is to prevent a deadlock if a file truncation is attempted after
7937  * after the locking is done.
7938  *
7939  * Returns 0 on success
7940  *	EINVAL - for invalid parameters
7941  *	EPERM, ENOMEM and other error codes returned by as_pagelock
7942  *	ENOMEM - is returned if the current request to lock memory exceeds
7943  *		project.max-device-locked-memory resource control value.
7944  *      EFAULT - memory pertains to a regular file mapped shared and
7945  *		and DDI_UMEMLOCK_LONGTERM flag is set
7946  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
7947  */
7948 int
7949 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
7950 		struct umem_callback_ops *ops_vector,
7951 		proc_t *procp)
7952 {
7953 	int	error;
7954 	struct ddi_umem_cookie *p;
7955 	void	(*driver_callback)() = NULL;
7956 	struct as *as = procp->p_as;
7957 	struct seg		*seg;
7958 	vnode_t			*vp;
7959 
7960 	*cookie = NULL;		/* in case of any error return */
7961 
7962 	/* These are the only three valid flags */
7963 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
7964 	    DDI_UMEMLOCK_LONGTERM)) != 0)
7965 		return (EINVAL);
7966 
7967 	/* At least one (can be both) of the two access flags must be set */
7968 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
7969 		return (EINVAL);
7970 
7971 	/* addr and len must be page-aligned */
7972 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
7973 		return (EINVAL);
7974 
7975 	if ((len & PAGEOFFSET) != 0)
7976 		return (EINVAL);
7977 
7978 	/*
7979 	 * For longterm locking a driver callback must be specified; if
7980 	 * not longterm then a callback is optional.
7981 	 */
7982 	if (ops_vector != NULL) {
7983 		if (ops_vector->cbo_umem_callback_version !=
7984 		    UMEM_CALLBACK_VERSION)
7985 			return (EINVAL);
7986 		else
7987 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
7988 	}
7989 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
7990 		return (EINVAL);
7991 
7992 	/*
7993 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
7994 	 * be called on first ddi_umem_lock or umem_lockmemory call.
7995 	 */
7996 	if (ddi_umem_unlock_thread == NULL)
7997 		i_ddi_umem_unlock_thread_start();
7998 
7999 	/* Allocate memory for the cookie */
8000 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8001 
8002 	/* Convert the flags to seg_rw type */
8003 	if (flags & DDI_UMEMLOCK_WRITE) {
8004 		p->s_flags = S_WRITE;
8005 	} else {
8006 		p->s_flags = S_READ;
8007 	}
8008 
8009 	/* Store procp in cookie for later iosetup/unlock */
8010 	p->procp = (void *)procp;
8011 
8012 	/*
8013 	 * Store the struct as pointer in cookie for later use by
8014 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8015 	 * is called after relvm is called.
8016 	 */
8017 	p->asp = as;
8018 
8019 	/*
8020 	 * The size field is needed for lockmem accounting.
8021 	 */
8022 	p->size = len;
8023 
8024 	if (umem_incr_devlockmem(p) != 0) {
8025 		/*
8026 		 * The requested memory cannot be locked
8027 		 */
8028 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8029 		*cookie = (ddi_umem_cookie_t)NULL;
8030 		return (ENOMEM);
8031 	}
8032 	/*
8033 	 * umem_incr_devlockmem stashes the project ptr into the
8034 	 * cookie. This is needed during unlock since that can
8035 	 * happen in a non-USER context
8036 	 */
8037 	ASSERT(p->lockmem_proj);
8038 
8039 	/* Lock the pages corresponding to addr, len in memory */
8040 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8041 	if (error != 0) {
8042 		umem_decr_devlockmem(p);
8043 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8044 		*cookie = (ddi_umem_cookie_t)NULL;
8045 		return (error);
8046 	}
8047 
8048 	/*
8049 	 * For longterm locking the addr must pertain to a seg_vn segment or
8050 	 * or a seg_spt segment.
8051 	 * If the segment pertains to a regular file, it cannot be
8052 	 * mapped MAP_SHARED.
8053 	 * This is to prevent a deadlock if a file truncation is attempted
8054 	 * after the locking is done.
8055 	 * Doing this after as_pagelock guarantees persistence of the as; if
8056 	 * an unacceptable segment is found, the cleanup includes calling
8057 	 * as_pageunlock before returning EFAULT.
8058 	 */
8059 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8060 		extern  struct seg_ops segspt_shmops;
8061 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8062 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8063 			if (seg == NULL || seg->s_base > addr + len)
8064 				break;
8065 			if (((seg->s_ops != &segvn_ops) &&
8066 			    (seg->s_ops != &segspt_shmops)) ||
8067 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8068 			    vp != NULL && vp->v_type == VREG) &&
8069 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8070 				as_pageunlock(as, p->pparray,
8071 						addr, len, p->s_flags);
8072 				AS_LOCK_EXIT(as, &as->a_lock);
8073 				umem_decr_devlockmem(p);
8074 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8075 				*cookie = (ddi_umem_cookie_t)NULL;
8076 				return (EFAULT);
8077 			}
8078 		}
8079 		AS_LOCK_EXIT(as, &as->a_lock);
8080 	}
8081 
8082 
8083 	/* Initialize the fields in the ddi_umem_cookie */
8084 	p->cvaddr = addr;
8085 	p->type = UMEM_LOCKED;
8086 	if (driver_callback != NULL) {
8087 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8088 		p->cook_refcnt = 2;
8089 		p->callbacks = *ops_vector;
8090 	} else {
8091 		/* only i_ddi_umme_unlock needs the cookie */
8092 		p->cook_refcnt = 1;
8093 	}
8094 
8095 	*cookie = (ddi_umem_cookie_t)p;
8096 
8097 	/*
8098 	 * If a driver callback was specified, add an entry to the
8099 	 * as struct callback list. The as_pagelock above guarantees
8100 	 * the persistence of as.
8101 	 */
8102 	if (driver_callback) {
8103 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8104 						addr, len, KM_SLEEP);
8105 		if (error != 0) {
8106 			as_pageunlock(as, p->pparray,
8107 					addr, len, p->s_flags);
8108 			umem_decr_devlockmem(p);
8109 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8110 			*cookie = (ddi_umem_cookie_t)NULL;
8111 		}
8112 	}
8113 	return (error);
8114 }
8115 
8116 /*
8117  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8118  * the cookie.  Called from i_ddi_umem_unlock_thread.
8119  */
8120 
8121 static void
8122 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8123 {
8124 	uint_t	rc;
8125 
8126 	/*
8127 	 * There is no way to determine whether a callback to
8128 	 * umem_lock_undo was registered via as_add_callback.
8129 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8130 	 * a valid callback function structure.)  as_delete_callback
8131 	 * is called to delete a possible registered callback.  If the
8132 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8133 	 * indicates that there was a callback registered, and that is was
8134 	 * successfully deleted.  Thus, the cookie reference count
8135 	 * will never be decremented by umem_lock_undo.  Just return the
8136 	 * memory for the cookie, since both users of the cookie are done.
8137 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8138 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8139 	 * indicates that callback processing is taking place and, and
8140 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8141 	 * the cookie reference count when it is complete.
8142 	 *
8143 	 * This needs to be done before as_pageunlock so that the
8144 	 * persistence of as is guaranteed because of the locked pages.
8145 	 *
8146 	 */
8147 	rc = as_delete_callback(p->asp, p);
8148 
8149 
8150 	/*
8151 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8152 	 * after relvm is called so use p->asp.
8153 	 */
8154 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8155 
8156 	/*
8157 	 * Now that we have unlocked the memory decrement the
8158 	 * max-device-locked-memory rctl
8159 	 */
8160 	umem_decr_devlockmem(p);
8161 
8162 	if (rc == AS_CALLBACK_DELETED) {
8163 		/* umem_lock_undo will not happen, return the cookie memory */
8164 		ASSERT(p->cook_refcnt == 2);
8165 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8166 	} else {
8167 		/*
8168 		 * umem_undo_lock may happen if as_delete_callback returned
8169 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8170 		 * reference count, atomically, and return the cookie
8171 		 * memory if the reference count goes to zero.  The only
8172 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8173 		 * case, just return the cookie memory.
8174 		 */
8175 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8176 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8177 		    == 0)) {
8178 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8179 		}
8180 	}
8181 }
8182 
8183 /*
8184  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8185  *
8186  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8187  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8188  * via calls to ddi_umem_unlock.
8189  */
8190 
8191 static void
8192 i_ddi_umem_unlock_thread(void)
8193 {
8194 	struct ddi_umem_cookie	*ret_cookie;
8195 	callb_cpr_t	cprinfo;
8196 
8197 	/* process the ddi_umem_unlock list */
8198 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8199 	    callb_generic_cpr, "unlock_thread");
8200 	for (;;) {
8201 		mutex_enter(&ddi_umem_unlock_mutex);
8202 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8203 			ret_cookie = ddi_umem_unlock_head;
8204 			/* take if off the list */
8205 			if ((ddi_umem_unlock_head =
8206 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8207 				ddi_umem_unlock_tail = NULL;
8208 			}
8209 			mutex_exit(&ddi_umem_unlock_mutex);
8210 			/* unlock the pages in this cookie */
8211 			(void) i_ddi_umem_unlock(ret_cookie);
8212 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8213 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8214 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8215 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8216 			mutex_exit(&ddi_umem_unlock_mutex);
8217 		}
8218 	}
8219 	/* ddi_umem_unlock_thread does not exit */
8220 	/* NOTREACHED */
8221 }
8222 
8223 /*
8224  * Start the thread that will process the ddi_umem_unlock list if it is
8225  * not already started (i_ddi_umem_unlock_thread).
8226  */
8227 static void
8228 i_ddi_umem_unlock_thread_start(void)
8229 {
8230 	mutex_enter(&ddi_umem_unlock_mutex);
8231 	if (ddi_umem_unlock_thread == NULL) {
8232 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8233 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8234 		    TS_RUN, minclsyspri);
8235 	}
8236 	mutex_exit(&ddi_umem_unlock_mutex);
8237 }
8238 
8239 /*
8240  * Lock the virtual address range in the current process and create a
8241  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8242  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8243  * to user space.
8244  *
8245  * Note: The resource control accounting currently uses a full charge model
8246  * in other words attempts to lock the same/overlapping areas of memory
8247  * will deduct the full size of the buffer from the projects running
8248  * counter for the device locked memory. This applies to umem_lockmemory too.
8249  *
8250  * addr, size should be PAGESIZE aligned
8251  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8252  *	identifies whether the locked memory will be read or written or both
8253  *
8254  * Returns 0 on success
8255  *	EINVAL - for invalid parameters
8256  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8257  *	ENOMEM - is returned if the current request to lock memory exceeds
8258  *		project.max-device-locked-memory resource control value.
8259  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8260  */
8261 int
8262 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8263 {
8264 	int	error;
8265 	struct ddi_umem_cookie *p;
8266 
8267 	*cookie = NULL;		/* in case of any error return */
8268 
8269 	/* These are the only two valid flags */
8270 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8271 		return (EINVAL);
8272 	}
8273 
8274 	/* At least one of the two flags (or both) must be set */
8275 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8276 		return (EINVAL);
8277 	}
8278 
8279 	/* addr and len must be page-aligned */
8280 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8281 		return (EINVAL);
8282 	}
8283 
8284 	if ((len & PAGEOFFSET) != 0) {
8285 		return (EINVAL);
8286 	}
8287 
8288 	/*
8289 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8290 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8291 	 */
8292 	if (ddi_umem_unlock_thread == NULL)
8293 		i_ddi_umem_unlock_thread_start();
8294 
8295 	/* Allocate memory for the cookie */
8296 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8297 
8298 	/* Convert the flags to seg_rw type */
8299 	if (flags & DDI_UMEMLOCK_WRITE) {
8300 		p->s_flags = S_WRITE;
8301 	} else {
8302 		p->s_flags = S_READ;
8303 	}
8304 
8305 	/* Store curproc in cookie for later iosetup/unlock */
8306 	p->procp = (void *)curproc;
8307 
8308 	/*
8309 	 * Store the struct as pointer in cookie for later use by
8310 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8311 	 * is called after relvm is called.
8312 	 */
8313 	p->asp = curproc->p_as;
8314 	/*
8315 	 * The size field is needed for lockmem accounting.
8316 	 */
8317 	p->size = len;
8318 
8319 	if (umem_incr_devlockmem(p) != 0) {
8320 		/*
8321 		 * The requested memory cannot be locked
8322 		 */
8323 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8324 		*cookie = (ddi_umem_cookie_t)NULL;
8325 		return (ENOMEM);
8326 	}
8327 	/*
8328 	 * umem_incr_devlockmem stashes the project ptr into the
8329 	 * cookie. This is needed during unlock since that can
8330 	 * happen in a non-USER context
8331 	 */
8332 	ASSERT(p->lockmem_proj);
8333 
8334 	/* Lock the pages corresponding to addr, len in memory */
8335 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8336 	    addr, len, p->s_flags);
8337 	if (error != 0) {
8338 		umem_decr_devlockmem(p);
8339 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8340 		*cookie = (ddi_umem_cookie_t)NULL;
8341 		return (error);
8342 	}
8343 
8344 	/* Initialize the fields in the ddi_umem_cookie */
8345 	p->cvaddr = addr;
8346 	p->type = UMEM_LOCKED;
8347 	p->cook_refcnt = 1;
8348 
8349 	*cookie = (ddi_umem_cookie_t)p;
8350 	return (error);
8351 }
8352 
8353 /*
8354  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8355  * unlocked by i_ddi_umem_unlock_thread.
8356  */
8357 
8358 void
8359 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8360 {
8361 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8362 
8363 	ASSERT(p->type == UMEM_LOCKED);
8364 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8365 	ASSERT(ddi_umem_unlock_thread != NULL);
8366 
8367 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8368 	mutex_enter(&ddi_umem_unlock_mutex);
8369 	if (ddi_umem_unlock_head == NULL) {
8370 		ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8371 		cv_broadcast(&ddi_umem_unlock_cv);
8372 	} else {
8373 		ddi_umem_unlock_tail->unl_forw = p;
8374 		ddi_umem_unlock_tail = p;
8375 	}
8376 	mutex_exit(&ddi_umem_unlock_mutex);
8377 }
8378 
8379 /*
8380  * Create a buf structure from a ddi_umem_cookie
8381  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8382  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8383  * off, len - identifies the portion of the memory represented by the cookie
8384  *		that the buf points to.
8385  *	NOTE: off, len need to follow the alignment/size restrictions of the
8386  *		device (dev) that this buf will be passed to. Some devices
8387  *		will accept unrestricted alignment/size, whereas others (such as
8388  *		st) require some block-size alignment/size. It is the caller's
8389  *		responsibility to ensure that the alignment/size restrictions
8390  *		are met (we cannot assert as we do not know the restrictions)
8391  *
8392  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8393  *		the flags used in ddi_umem_lock
8394  *
8395  * The following three arguments are used to initialize fields in the
8396  * buf structure and are uninterpreted by this routine.
8397  *
8398  * dev
8399  * blkno
8400  * iodone
8401  *
8402  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8403  *
8404  * Returns a buf structure pointer on success (to be freed by freerbuf)
8405  *	NULL on any parameter error or memory alloc failure
8406  *
8407  */
8408 struct buf *
8409 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8410 	int direction, dev_t dev, daddr_t blkno,
8411 	int (*iodone)(struct buf *), int sleepflag)
8412 {
8413 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8414 	struct buf *bp;
8415 
8416 	/*
8417 	 * check for valid cookie offset, len
8418 	 */
8419 	if ((off + len) > p->size) {
8420 		return (NULL);
8421 	}
8422 
8423 	if (len > p->size) {
8424 		return (NULL);
8425 	}
8426 
8427 	/* direction has to be one of B_READ or B_WRITE */
8428 	if ((direction != B_READ) && (direction != B_WRITE)) {
8429 		return (NULL);
8430 	}
8431 
8432 	/* These are the only two valid sleepflags */
8433 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8434 		return (NULL);
8435 	}
8436 
8437 	/*
8438 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8439 	 */
8440 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8441 		return (NULL);
8442 	}
8443 
8444 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8445 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8446 		(p->procp == NULL) : (p->procp != NULL));
8447 
8448 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8449 	if (bp == NULL) {
8450 		return (NULL);
8451 	}
8452 	bioinit(bp);
8453 
8454 	bp->b_flags = B_BUSY | B_PHYS | direction;
8455 	bp->b_edev = dev;
8456 	bp->b_lblkno = blkno;
8457 	bp->b_iodone = iodone;
8458 	bp->b_bcount = len;
8459 	bp->b_proc = (proc_t *)p->procp;
8460 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8461 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8462 	if (p->pparray != NULL) {
8463 		bp->b_flags |= B_SHADOW;
8464 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8465 		bp->b_shadow = p->pparray + btop(off);
8466 	}
8467 	return (bp);
8468 }
8469 
8470 /*
8471  * Fault-handling and related routines
8472  */
8473 
8474 ddi_devstate_t
8475 ddi_get_devstate(dev_info_t *dip)
8476 {
8477 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8478 		return (DDI_DEVSTATE_OFFLINE);
8479 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8480 		return (DDI_DEVSTATE_DOWN);
8481 	else if (DEVI_IS_BUS_QUIESCED(dip))
8482 		return (DDI_DEVSTATE_QUIESCED);
8483 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8484 		return (DDI_DEVSTATE_DEGRADED);
8485 	else
8486 		return (DDI_DEVSTATE_UP);
8487 }
8488 
8489 void
8490 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8491 	ddi_fault_location_t location, const char *message)
8492 {
8493 	struct ddi_fault_event_data fd;
8494 	ddi_eventcookie_t ec;
8495 
8496 	/*
8497 	 * Assemble all the information into a fault-event-data structure
8498 	 */
8499 	fd.f_dip = dip;
8500 	fd.f_impact = impact;
8501 	fd.f_location = location;
8502 	fd.f_message = message;
8503 	fd.f_oldstate = ddi_get_devstate(dip);
8504 
8505 	/*
8506 	 * Get eventcookie from defining parent.
8507 	 */
8508 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8509 	    DDI_SUCCESS)
8510 		return;
8511 
8512 	(void) ndi_post_event(dip, dip, ec, &fd);
8513 }
8514 
8515 char *
8516 i_ddi_devi_class(dev_info_t *dip)
8517 {
8518 	return (DEVI(dip)->devi_device_class);
8519 }
8520 
8521 int
8522 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8523 {
8524 	struct dev_info *devi = DEVI(dip);
8525 
8526 	mutex_enter(&devi->devi_lock);
8527 
8528 	if (devi->devi_device_class)
8529 		kmem_free(devi->devi_device_class,
8530 		    strlen(devi->devi_device_class) + 1);
8531 
8532 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8533 	    != NULL) {
8534 		mutex_exit(&devi->devi_lock);
8535 		return (DDI_SUCCESS);
8536 	}
8537 
8538 	mutex_exit(&devi->devi_lock);
8539 
8540 	return (DDI_FAILURE);
8541 }
8542 
8543 
8544 /*
8545  * Task Queues DDI interfaces.
8546  */
8547 
8548 /* ARGSUSED */
8549 ddi_taskq_t *
8550 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8551     pri_t pri, uint_t cflags)
8552 {
8553 	char full_name[TASKQ_NAMELEN];
8554 	const char *tq_name;
8555 	int nodeid = 0;
8556 
8557 	if (dip == NULL)
8558 		tq_name = name;
8559 	else {
8560 		nodeid = ddi_get_instance(dip);
8561 
8562 		if (name == NULL)
8563 			name = "tq";
8564 
8565 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8566 		    ddi_driver_name(dip), name);
8567 
8568 		tq_name = full_name;
8569 	}
8570 
8571 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8572 		    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8573 		    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8574 }
8575 
8576 void
8577 ddi_taskq_destroy(ddi_taskq_t *tq)
8578 {
8579 	taskq_destroy((taskq_t *)tq);
8580 }
8581 
8582 int
8583 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8584     void *arg, uint_t dflags)
8585 {
8586 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8587 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8588 
8589 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8590 }
8591 
8592 void
8593 ddi_taskq_wait(ddi_taskq_t *tq)
8594 {
8595 	taskq_wait((taskq_t *)tq);
8596 }
8597 
8598 void
8599 ddi_taskq_suspend(ddi_taskq_t *tq)
8600 {
8601 	taskq_suspend((taskq_t *)tq);
8602 }
8603 
8604 boolean_t
8605 ddi_taskq_suspended(ddi_taskq_t *tq)
8606 {
8607 	return (taskq_suspended((taskq_t *)tq));
8608 }
8609 
8610 void
8611 ddi_taskq_resume(ddi_taskq_t *tq)
8612 {
8613 	taskq_resume((taskq_t *)tq);
8614 }
8615 
8616 int
8617 ddi_parse(
8618 	const char	*ifname,
8619 	char		*alnum,
8620 	uint_t		*nump)
8621 {
8622 	const char	*p;
8623 	int		l;
8624 	ulong_t		num;
8625 	boolean_t	nonum = B_TRUE;
8626 	char		c;
8627 
8628 	l = strlen(ifname);
8629 	for (p = ifname + l; p != ifname; l--) {
8630 		c = *--p;
8631 		if (!isdigit(c)) {
8632 			(void) strlcpy(alnum, ifname, l + 1);
8633 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8634 				return (DDI_FAILURE);
8635 			break;
8636 		}
8637 		nonum = B_FALSE;
8638 	}
8639 	if (l == 0 || nonum)
8640 		return (DDI_FAILURE);
8641 
8642 	*nump = num;
8643 	return (DDI_SUCCESS);
8644 }
8645