xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 5f61829a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2022 Garrett D'Amore
25  * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
26  * Copyright 2023 MNX Cloud, Inc.
27  * Copyright 2023 Oxide Computer Company
28  */
29 
30 #include <sys/note.h>
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/buf.h>
35 #include <sys/uio.h>
36 #include <sys/cred.h>
37 #include <sys/poll.h>
38 #include <sys/mman.h>
39 #include <sys/kmem.h>
40 #include <sys/model.h>
41 #include <sys/file.h>
42 #include <sys/proc.h>
43 #include <sys/open.h>
44 #include <sys/user.h>
45 #include <sys/t_lock.h>
46 #include <sys/vm.h>
47 #include <sys/stat.h>
48 #include <vm/hat.h>
49 #include <vm/seg.h>
50 #include <vm/seg_vn.h>
51 #include <vm/seg_dev.h>
52 #include <vm/as.h>
53 #include <sys/cmn_err.h>
54 #include <sys/cpuvar.h>
55 #include <sys/debug.h>
56 #include <sys/autoconf.h>
57 #include <sys/sunddi.h>
58 #include <sys/esunddi.h>
59 #include <sys/sunndi.h>
60 #include <sys/kstat.h>
61 #include <sys/conf.h>
62 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
63 #include <sys/ndi_impldefs.h>	/* include prototypes */
64 #include <sys/ddi_periodic.h>
65 #include <sys/hwconf.h>
66 #include <sys/pathname.h>
67 #include <sys/modctl.h>
68 #include <sys/epm.h>
69 #include <sys/devctl.h>
70 #include <sys/callb.h>
71 #include <sys/cladm.h>
72 #include <sys/sysevent.h>
73 #include <sys/dacf_impl.h>
74 #include <sys/ddidevmap.h>
75 #include <sys/bootconf.h>
76 #include <sys/disp.h>
77 #include <sys/atomic.h>
78 #include <sys/promif.h>
79 #include <sys/instance.h>
80 #include <sys/sysevent/eventdefs.h>
81 #include <sys/task.h>
82 #include <sys/project.h>
83 #include <sys/taskq.h>
84 #include <sys/devpolicy.h>
85 #include <sys/ctype.h>
86 #include <net/if.h>
87 #include <sys/rctl.h>
88 #include <sys/zone.h>
89 #include <sys/clock_impl.h>
90 #include <sys/ddi.h>
91 #include <sys/modhash.h>
92 #include <sys/sunldi_impl.h>
93 #include <sys/fs/dv_node.h>
94 #include <sys/fs/snode.h>
95 
96 extern	pri_t	minclsyspri;
97 
98 extern	rctl_hndl_t rc_project_locked_mem;
99 extern	rctl_hndl_t rc_zone_locked_mem;
100 
101 #ifdef DEBUG
102 static int sunddi_debug = 0;
103 #endif /* DEBUG */
104 
105 /* ddi_umem_unlock miscellaneous */
106 
107 static	void	i_ddi_umem_unlock_thread_start(void);
108 
109 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
110 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
111 static	kthread_t	*ddi_umem_unlock_thread;
112 /*
113  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
114  */
115 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
116 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
117 
118 /*
119  * DDI(Sun) Function and flag definitions:
120  */
121 
122 #if defined(__x86)
123 /*
124  * Used to indicate which entries were chosen from a range.
125  */
126 char	*chosen_reg = "chosen-reg";
127 #endif
128 
129 /*
130  * Function used to ring system console bell
131  */
132 void (*ddi_console_bell_func)(clock_t duration);
133 
134 /*
135  * Creating register mappings and handling interrupts:
136  */
137 
138 /*
139  * Generic ddi_map: Call parent to fulfill request...
140  */
141 
142 int
ddi_map(dev_info_t * dp,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * addrp)143 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
144     off_t len, caddr_t *addrp)
145 {
146 	dev_info_t *pdip;
147 
148 	ASSERT(dp);
149 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
150 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
151 	    dp, mp, offset, len, addrp));
152 }
153 
154 /*
155  * ddi_apply_range: (Called by nexi only.)
156  * Apply ranges in parent node dp, to child regspec rp...
157  */
158 
159 int
ddi_apply_range(dev_info_t * dp,dev_info_t * rdip,struct regspec * rp)160 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
161 {
162 	return (i_ddi_apply_range(dp, rdip, rp));
163 }
164 
165 int
ddi_map_regs(dev_info_t * dip,uint_t rnumber,caddr_t * kaddrp,off_t offset,off_t len)166 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
167     off_t len)
168 {
169 	ddi_map_req_t mr;
170 #if defined(__x86)
171 	struct {
172 		int	bus;
173 		int	addr;
174 		int	size;
175 	} reg, *reglist;
176 	uint_t	length;
177 	int	rc;
178 
179 	/*
180 	 * get the 'registers' or the 'reg' property.
181 	 * We look up the reg property as an array of
182 	 * int's.
183 	 */
184 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
186 	if (rc != DDI_PROP_SUCCESS)
187 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
188 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
189 	if (rc == DDI_PROP_SUCCESS) {
190 		/*
191 		 * point to the required entry.
192 		 */
193 		reg = reglist[rnumber];
194 		reg.addr += offset;
195 		if (len != 0)
196 			reg.size = len;
197 		/*
198 		 * make a new property containing ONLY the required tuple.
199 		 */
200 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
201 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
202 		    != DDI_PROP_SUCCESS) {
203 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
204 			    "property", DEVI(dip)->devi_name,
205 			    DEVI(dip)->devi_instance, chosen_reg);
206 		}
207 		/*
208 		 * free the memory allocated by
209 		 * ddi_prop_lookup_int_array ().
210 		 */
211 		ddi_prop_free((void *)reglist);
212 	}
213 #endif
214 	mr.map_op = DDI_MO_MAP_LOCKED;
215 	mr.map_type = DDI_MT_RNUMBER;
216 	mr.map_obj.rnumber = rnumber;
217 	mr.map_prot = PROT_READ | PROT_WRITE;
218 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
219 	mr.map_handlep = NULL;
220 	mr.map_vers = DDI_MAP_VERSION;
221 
222 	/*
223 	 * Call my parent to map in my regs.
224 	 */
225 
226 	return (ddi_map(dip, &mr, offset, len, kaddrp));
227 }
228 
229 void
ddi_unmap_regs(dev_info_t * dip,uint_t rnumber,caddr_t * kaddrp,off_t offset,off_t len)230 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
231     off_t len)
232 {
233 	ddi_map_req_t mr;
234 
235 	mr.map_op = DDI_MO_UNMAP;
236 	mr.map_type = DDI_MT_RNUMBER;
237 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
238 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
239 	mr.map_obj.rnumber = rnumber;
240 	mr.map_handlep = NULL;
241 	mr.map_vers = DDI_MAP_VERSION;
242 
243 	/*
244 	 * Call my parent to unmap my regs.
245 	 */
246 
247 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
248 	*kaddrp = (caddr_t)0;
249 #if defined(__x86)
250 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
251 #endif
252 }
253 
254 int
ddi_bus_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * vaddrp)255 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
256     off_t offset, off_t len, caddr_t *vaddrp)
257 {
258 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
259 }
260 
261 /*
262  * nullbusmap:	The/DDI default bus_map entry point for nexi
263  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
264  *		with no HAT/MMU layer to be programmed at this level.
265  *
266  *		If the call is to map by rnumber, return an error,
267  *		otherwise pass anything else up the tree to my parent.
268  */
269 int
nullbusmap(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * vaddrp)270 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
271     off_t offset, off_t len, caddr_t *vaddrp)
272 {
273 	_NOTE(ARGUNUSED(rdip))
274 	if (mp->map_type == DDI_MT_RNUMBER)
275 		return (DDI_ME_UNSUPPORTED);
276 
277 	return (ddi_map(dip, mp, offset, len, vaddrp));
278 }
279 
280 /*
281  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
282  *			   Only for use by nexi using the reg/range paradigm.
283  */
284 struct regspec *
ddi_rnumber_to_regspec(dev_info_t * dip,int rnumber)285 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
286 {
287 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
288 }
289 
290 
291 /*
292  * Note that we allow the dip to be nil because we may be called
293  * prior even to the instantiation of the devinfo tree itself - all
294  * regular leaf and nexus drivers should always use a non-nil dip!
295  *
296  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
297  * simply get a synchronous fault as soon as we touch a missing address.
298  *
299  * Poke is rather more carefully handled because we might poke to a write
300  * buffer, "succeed", then only find some time later that we got an
301  * asynchronous fault that indicated that the address we were writing to
302  * was not really backed by hardware.
303  */
304 
305 static int
i_ddi_peekpoke(dev_info_t * devi,ddi_ctl_enum_t cmd,size_t size,void * addr,void * value_p)306 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
307     void *addr, void *value_p)
308 {
309 	union {
310 		uint64_t	u64;
311 		uint32_t	u32;
312 		uint16_t	u16;
313 		uint8_t		u8;
314 	} peekpoke_value;
315 
316 	peekpoke_ctlops_t peekpoke_args;
317 	uint64_t dummy_result;
318 	int rval;
319 
320 	/* Note: size is assumed to be correct;  it is not checked. */
321 	peekpoke_args.size = size;
322 	peekpoke_args.dev_addr = (uintptr_t)addr;
323 	peekpoke_args.handle = NULL;
324 	peekpoke_args.repcount = 1;
325 	peekpoke_args.flags = 0;
326 
327 	if (cmd == DDI_CTLOPS_POKE) {
328 		switch (size) {
329 		case sizeof (uint8_t):
330 			peekpoke_value.u8 = *(uint8_t *)value_p;
331 			break;
332 		case sizeof (uint16_t):
333 			peekpoke_value.u16 = *(uint16_t *)value_p;
334 			break;
335 		case sizeof (uint32_t):
336 			peekpoke_value.u32 = *(uint32_t *)value_p;
337 			break;
338 		case sizeof (uint64_t):
339 			peekpoke_value.u64 = *(uint64_t *)value_p;
340 			break;
341 		}
342 	}
343 
344 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
345 
346 	if (devi != NULL)
347 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
348 		    &dummy_result);
349 	else
350 		rval = peekpoke_mem(cmd, &peekpoke_args);
351 
352 	/*
353 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
354 	 */
355 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
356 		switch (size) {
357 		case sizeof (uint8_t):
358 			*(uint8_t *)value_p = peekpoke_value.u8;
359 			break;
360 		case sizeof (uint16_t):
361 			*(uint16_t *)value_p = peekpoke_value.u16;
362 			break;
363 		case sizeof (uint32_t):
364 			*(uint32_t *)value_p = peekpoke_value.u32;
365 			break;
366 		case sizeof (uint64_t):
367 			*(uint64_t *)value_p = peekpoke_value.u64;
368 			break;
369 		}
370 	}
371 
372 	return (rval);
373 }
374 
375 /*
376  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
377  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
378  */
379 int
ddi_peek(dev_info_t * devi,size_t size,void * addr,void * value_p)380 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
381 {
382 	switch (size) {
383 	case sizeof (uint8_t):
384 	case sizeof (uint16_t):
385 	case sizeof (uint32_t):
386 	case sizeof (uint64_t):
387 		break;
388 	default:
389 		return (DDI_FAILURE);
390 	}
391 
392 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
393 }
394 
395 int
ddi_poke(dev_info_t * devi,size_t size,void * addr,void * value_p)396 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
397 {
398 	switch (size) {
399 	case sizeof (uint8_t):
400 	case sizeof (uint16_t):
401 	case sizeof (uint32_t):
402 	case sizeof (uint64_t):
403 		break;
404 	default:
405 		return (DDI_FAILURE);
406 	}
407 
408 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
409 }
410 
411 int
ddi_peek8(dev_info_t * dip,int8_t * addr,int8_t * val_p)412 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
413 {
414 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
415 	    val_p));
416 }
417 
418 int
ddi_peek16(dev_info_t * dip,int16_t * addr,int16_t * val_p)419 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
420 {
421 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
422 	    val_p));
423 }
424 
425 int
ddi_peek32(dev_info_t * dip,int32_t * addr,int32_t * val_p)426 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
427 {
428 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
429 	    val_p));
430 }
431 
432 int
ddi_peek64(dev_info_t * dip,int64_t * addr,int64_t * val_p)433 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
434 {
435 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
436 	    val_p));
437 }
438 
439 int
ddi_poke8(dev_info_t * dip,int8_t * addr,int8_t val)440 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
441 {
442 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
443 }
444 
445 int
ddi_poke16(dev_info_t * dip,int16_t * addr,int16_t val)446 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
447 {
448 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
449 }
450 
451 int
ddi_poke32(dev_info_t * dip,int32_t * addr,int32_t val)452 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
453 {
454 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
455 }
456 
457 int
ddi_poke64(dev_info_t * dip,int64_t * addr,int64_t val)458 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
459 {
460 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
461 }
462 
463 /*
464  * ddi_peekpokeio() is used primarily by the mem drivers for moving
465  * data to and from uio structures via peek and poke.  Note that we
466  * use "internal" routines ddi_peek and ddi_poke to make this go
467  * slightly faster, avoiding the call overhead ..
468  */
469 int
ddi_peekpokeio(dev_info_t * devi,struct uio * uio,enum uio_rw rw,caddr_t addr,size_t len,uint_t xfersize)470 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
471     caddr_t addr, size_t len, uint_t xfersize)
472 {
473 	int64_t	ibuffer;
474 	int8_t w8;
475 	size_t sz;
476 	int o;
477 
478 	if (xfersize > sizeof (long))
479 		xfersize = sizeof (long);
480 
481 	while (len != 0) {
482 		if ((len | (uintptr_t)addr) & 1) {
483 			sz = sizeof (int8_t);
484 			if (rw == UIO_WRITE) {
485 				if ((o = uwritec(uio)) == -1)
486 					return (DDI_FAILURE);
487 				if (ddi_poke8(devi, (int8_t *)addr,
488 				    (int8_t)o) != DDI_SUCCESS)
489 					return (DDI_FAILURE);
490 			} else {
491 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
492 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
493 					return (DDI_FAILURE);
494 				if (ureadc(w8, uio))
495 					return (DDI_FAILURE);
496 			}
497 		} else {
498 			switch (xfersize) {
499 			case sizeof (int64_t):
500 				if (((len | (uintptr_t)addr) &
501 				    (sizeof (int64_t) - 1)) == 0) {
502 					sz = xfersize;
503 					break;
504 				}
505 				/*FALLTHROUGH*/
506 			case sizeof (int32_t):
507 				if (((len | (uintptr_t)addr) &
508 				    (sizeof (int32_t) - 1)) == 0) {
509 					sz = xfersize;
510 					break;
511 				}
512 				/*FALLTHROUGH*/
513 			default:
514 				/*
515 				 * This still assumes that we might have an
516 				 * I/O bus out there that permits 16-bit
517 				 * transfers (and that it would be upset by
518 				 * 32-bit transfers from such locations).
519 				 */
520 				sz = sizeof (int16_t);
521 				break;
522 			}
523 
524 			if (rw == UIO_READ) {
525 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
526 				    addr, &ibuffer) != DDI_SUCCESS)
527 					return (DDI_FAILURE);
528 			}
529 
530 			if (uiomove(&ibuffer, sz, rw, uio))
531 				return (DDI_FAILURE);
532 
533 			if (rw == UIO_WRITE) {
534 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
535 				    addr, &ibuffer) != DDI_SUCCESS)
536 					return (DDI_FAILURE);
537 			}
538 		}
539 		addr += sz;
540 		len -= sz;
541 	}
542 	return (DDI_SUCCESS);
543 }
544 
545 /*
546  * These routines are used by drivers that do layered ioctls
547  * On sparc, they're implemented in assembler to avoid spilling
548  * register windows in the common (copyin) case ..
549  */
550 #if !defined(__sparc)
551 int
ddi_copyin(const void * buf,void * kernbuf,size_t size,int flags)552 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
553 {
554 	if (flags & FKIOCTL)
555 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
556 	return (copyin(buf, kernbuf, size));
557 }
558 
559 int
ddi_copyout(const void * buf,void * kernbuf,size_t size,int flags)560 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
561 {
562 	if (flags & FKIOCTL)
563 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
564 	return (copyout(buf, kernbuf, size));
565 }
566 #endif	/* !__sparc */
567 
568 /*
569  * Conversions in nexus pagesize units.  We don't duplicate the
570  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
571  * routines anyway.
572  */
573 unsigned long
ddi_btop(dev_info_t * dip,unsigned long bytes)574 ddi_btop(dev_info_t *dip, unsigned long bytes)
575 {
576 	unsigned long pages;
577 
578 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
579 	return (pages);
580 }
581 
582 unsigned long
ddi_btopr(dev_info_t * dip,unsigned long bytes)583 ddi_btopr(dev_info_t *dip, unsigned long bytes)
584 {
585 	unsigned long pages;
586 
587 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
588 	return (pages);
589 }
590 
591 unsigned long
ddi_ptob(dev_info_t * dip,unsigned long pages)592 ddi_ptob(dev_info_t *dip, unsigned long pages)
593 {
594 	unsigned long bytes;
595 
596 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
597 	return (bytes);
598 }
599 
600 unsigned int
ddi_enter_critical(void)601 ddi_enter_critical(void)
602 {
603 	return ((uint_t)spl7());
604 }
605 
606 void
ddi_exit_critical(unsigned int spl)607 ddi_exit_critical(unsigned int spl)
608 {
609 	splx((int)spl);
610 }
611 
612 /*
613  * Nexus ctlops punter
614  */
615 
616 #if !defined(__sparc)
617 /*
618  * Request bus_ctl parent to handle a bus_ctl request
619  *
620  * (The sparc version is in sparc_ddi.s)
621  */
622 int
ddi_ctlops(dev_info_t * d,dev_info_t * r,ddi_ctl_enum_t op,void * a,void * v)623 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
624 {
625 	int (*fp)();
626 
627 	if (!d || !r)
628 		return (DDI_FAILURE);
629 
630 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
631 		return (DDI_FAILURE);
632 
633 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
634 	return ((*fp)(d, r, op, a, v));
635 }
636 
637 #endif
638 
639 /*
640  * DMA/DVMA setup
641  */
642 
643 #if !defined(__sparc)
644 /*
645  * Request bus_dma_ctl parent to fiddle with a dma request.
646  *
647  * (The sparc version is in sparc_subr.s)
648  */
649 int
ddi_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)650 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
651     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
652     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
653 {
654 	int (*fp)();
655 
656 	if (dip != ddi_root_node())
657 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
658 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
659 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
660 }
661 #endif
662 
663 /*
664  * For all DMA control functions, call the DMA control
665  * routine and return status.
666  *
667  * Just plain assume that the parent is to be called.
668  * If a nexus driver or a thread outside the framework
669  * of a nexus driver or a leaf driver calls these functions,
670  * it is up to them to deal with the fact that the parent's
671  * bus_dma_ctl function will be the first one called.
672  */
673 
674 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
675 
676 /*
677  * This routine is left in place to satisfy link dependencies
678  * for any 3rd party nexus drivers that rely on it.  It is never
679  * called, though.
680  */
681 /*ARGSUSED*/
682 int
ddi_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareqp,ddi_dma_handle_t * handlep)683 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
684     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
685 {
686 	return (DDI_FAILURE);
687 }
688 
689 #if !defined(__sparc)
690 
691 /*
692  * The SPARC versions of these routines are done in assembler to
693  * save register windows, so they're in sparc_subr.s.
694  */
695 
696 int
ddi_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)697 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
698     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
699 {
700 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
701 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
702 
703 	if (dip != ddi_root_node())
704 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
705 
706 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
707 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
708 }
709 
710 int
ddi_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handlep)711 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
712 {
713 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
714 
715 	if (dip != ddi_root_node())
716 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
717 
718 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
719 	return ((*funcp)(dip, rdip, handlep));
720 }
721 
722 int
ddi_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)723 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
724     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
725     ddi_dma_cookie_t *cp, uint_t *ccountp)
726 {
727 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
728 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
729 
730 	if (dip != ddi_root_node())
731 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
732 
733 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
734 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
735 }
736 
737 int
ddi_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)738 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
739     ddi_dma_handle_t handle)
740 {
741 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
742 
743 	if (dip != ddi_root_node())
744 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
745 
746 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
747 	return ((*funcp)(dip, rdip, handle));
748 }
749 
750 
751 int
ddi_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)752 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
753     ddi_dma_handle_t handle, off_t off, size_t len,
754     uint_t cache_flags)
755 {
756 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
757 	    off_t, size_t, uint_t);
758 
759 	if (dip != ddi_root_node())
760 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
761 
762 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
763 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
764 }
765 
766 int
ddi_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)767 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
768     ddi_dma_handle_t handle, uint_t win, off_t *offp,
769     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
770 {
771 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
772 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
773 
774 	if (dip != ddi_root_node())
775 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
776 
777 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
778 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
779 	    cookiep, ccountp));
780 }
781 
782 int
ddi_dma_sync(ddi_dma_handle_t h,off_t o,size_t l,uint_t whom)783 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
784 {
785 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
786 	dev_info_t *dip, *rdip;
787 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
788 	    size_t, uint_t);
789 
790 	/*
791 	 * the DMA nexus driver will set DMP_NOSYNC if the
792 	 * platform does not require any sync operation. For
793 	 * example if the memory is uncached or consistent
794 	 * and without any I/O write buffers involved.
795 	 */
796 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
797 		return (DDI_SUCCESS);
798 
799 	dip = rdip = hp->dmai_rdip;
800 	if (dip != ddi_root_node())
801 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
802 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
803 	return ((*funcp)(dip, rdip, h, o, l, whom));
804 }
805 
806 int
ddi_dma_unbind_handle(ddi_dma_handle_t h)807 ddi_dma_unbind_handle(ddi_dma_handle_t h)
808 {
809 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
810 	dev_info_t *dip, *rdip;
811 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
812 
813 	dip = rdip = hp->dmai_rdip;
814 	if (dip != ddi_root_node())
815 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
816 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
817 	return ((*funcp)(dip, rdip, h));
818 }
819 
820 #endif	/* !__sparc */
821 
822 /*
823  * DMA burst sizes, and transfer minimums
824  */
825 
826 int
ddi_dma_burstsizes(ddi_dma_handle_t handle)827 ddi_dma_burstsizes(ddi_dma_handle_t handle)
828 {
829 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
830 
831 	if (!dimp)
832 		return (0);
833 	else
834 		return (dimp->dmai_burstsizes);
835 }
836 
837 /*
838  * Given two DMA attribute structures, apply the attributes
839  * of one to the other, following the rules of attributes
840  * and the wishes of the caller.
841  *
842  * The rules of DMA attribute structures are that you cannot
843  * make things *less* restrictive as you apply one set
844  * of attributes to another.
845  *
846  */
847 void
ddi_dma_attr_merge(ddi_dma_attr_t * attr,ddi_dma_attr_t * mod)848 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
849 {
850 	attr->dma_attr_addr_lo =
851 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
852 	attr->dma_attr_addr_hi =
853 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
854 	attr->dma_attr_count_max =
855 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
856 	attr->dma_attr_align =
857 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
858 	attr->dma_attr_burstsizes =
859 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
860 	attr->dma_attr_minxfer =
861 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
862 	attr->dma_attr_maxxfer =
863 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
864 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
865 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
866 	    (uint_t)mod->dma_attr_sgllen);
867 	attr->dma_attr_granular =
868 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
869 }
870 
871 /*
872  * mmap/segmap interface:
873  */
874 
875 /*
876  * ddi_segmap:		setup the default segment driver. Calls the drivers
877  *			XXmmap routine to validate the range to be mapped.
878  *			Return ENXIO of the range is not valid.  Create
879  *			a seg_dev segment that contains all of the
880  *			necessary information and will reference the
881  *			default segment driver routines. It returns zero
882  *			on success or non-zero on failure.
883  */
884 int
ddi_segmap(dev_t dev,off_t offset,struct as * asp,caddr_t * addrp,off_t len,uint_t prot,uint_t maxprot,uint_t flags,cred_t * credp)885 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
886     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
887 {
888 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
889 	    off_t, uint_t, uint_t, uint_t, struct cred *);
890 
891 	return (spec_segmap(dev, offset, asp, addrp, len,
892 	    prot, maxprot, flags, credp));
893 }
894 
895 /*
896  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
897  *			drivers. Allows each successive parent to resolve
898  *			address translations and add its mappings to the
899  *			mapping list supplied in the page structure. It
900  *			returns zero on success	or non-zero on failure.
901  */
902 
903 int
ddi_map_fault(dev_info_t * dip,struct hat * hat,struct seg * seg,caddr_t addr,struct devpage * dp,pfn_t pfn,uint_t prot,uint_t lock)904 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
905     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
906 {
907 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
908 }
909 
910 /*
911  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
912  *	Invokes platform specific DDI to determine whether attributes specified
913  *	in attr(9s) are	valid for the region of memory that will be made
914  *	available for direct access to user process via the mmap(2) system call.
915  */
916 int
ddi_device_mapping_check(dev_t dev,ddi_device_acc_attr_t * accattrp,uint_t rnumber,uint_t * hat_flags)917 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
918     uint_t rnumber, uint_t *hat_flags)
919 {
920 	ddi_acc_handle_t handle;
921 	ddi_map_req_t mr;
922 	ddi_acc_hdl_t *hp;
923 	int result;
924 	dev_info_t *dip;
925 
926 	/*
927 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
928 	 * release it immediately since it should already be held by
929 	 * a devfs vnode.
930 	 */
931 	if ((dip =
932 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
933 		return (-1);
934 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
935 
936 	/*
937 	 * Allocate and initialize the common elements of data
938 	 * access handle.
939 	 */
940 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
941 	if (handle == NULL)
942 		return (-1);
943 
944 	hp = impl_acc_hdl_get(handle);
945 	hp->ah_vers = VERS_ACCHDL;
946 	hp->ah_dip = dip;
947 	hp->ah_rnumber = rnumber;
948 	hp->ah_offset = 0;
949 	hp->ah_len = 0;
950 	hp->ah_acc = *accattrp;
951 
952 	/*
953 	 * Set up the mapping request and call to parent.
954 	 */
955 	mr.map_op = DDI_MO_MAP_HANDLE;
956 	mr.map_type = DDI_MT_RNUMBER;
957 	mr.map_obj.rnumber = rnumber;
958 	mr.map_prot = PROT_READ | PROT_WRITE;
959 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
960 	mr.map_handlep = hp;
961 	mr.map_vers = DDI_MAP_VERSION;
962 	result = ddi_map(dip, &mr, 0, 0, NULL);
963 
964 	/*
965 	 * Region must be mappable, pick up flags from the framework.
966 	 */
967 	*hat_flags = hp->ah_hat_flags;
968 
969 	impl_acc_hdl_free(handle);
970 
971 	/*
972 	 * check for end result.
973 	 */
974 	if (result != DDI_SUCCESS)
975 		return (-1);
976 	return (0);
977 }
978 
979 
980 /*
981  * Property functions:	 See also, ddipropdefs.h.
982  *
983  * These functions are the framework for the property functions,
984  * i.e. they support software defined properties.  All implementation
985  * specific property handling (i.e.: self-identifying devices and
986  * PROM defined properties are handled in the implementation specific
987  * functions (defined in ddi_implfuncs.h).
988  */
989 
990 /*
991  * nopropop:	Shouldn't be called, right?
992  */
993 int
nopropop(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)994 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
995     char *name, caddr_t valuep, int *lengthp)
996 {
997 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
998 	return (DDI_PROP_NOT_FOUND);
999 }
1000 
1001 #ifdef	DDI_PROP_DEBUG
1002 int ddi_prop_debug_flag = 0;
1003 
1004 int
ddi_prop_debug(int enable)1005 ddi_prop_debug(int enable)
1006 {
1007 	int prev = ddi_prop_debug_flag;
1008 
1009 	if ((enable != 0) || (prev != 0))
1010 		printf("ddi_prop_debug: debugging %s\n",
1011 		    enable ? "enabled" : "disabled");
1012 	ddi_prop_debug_flag = enable;
1013 	return (prev);
1014 }
1015 
1016 #endif	/* DDI_PROP_DEBUG */
1017 
1018 /*
1019  * Search a property list for a match, if found return pointer
1020  * to matching prop struct, else return NULL.
1021  */
1022 
1023 ddi_prop_t *
i_ddi_prop_search(dev_t dev,char * name,uint_t flags,ddi_prop_t ** list_head)1024 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1025 {
1026 	ddi_prop_t	*propp;
1027 
1028 	/*
1029 	 * find the property in child's devinfo:
1030 	 * Search order defined by this search function is first matching
1031 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1032 	 * dev == propp->prop_dev, name == propp->name, and the correct
1033 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1034 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1035 	 */
1036 	if (dev == DDI_DEV_T_NONE)
1037 		dev = DDI_DEV_T_ANY;
1038 
1039 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1040 
1041 		if (!DDI_STRSAME(propp->prop_name, name))
1042 			continue;
1043 
1044 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1045 			continue;
1046 
1047 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1048 			continue;
1049 
1050 		return (propp);
1051 	}
1052 
1053 	return ((ddi_prop_t *)0);
1054 }
1055 
1056 /*
1057  * Search for property within devnames structures
1058  */
1059 ddi_prop_t *
i_ddi_search_global_prop(dev_t dev,char * name,uint_t flags)1060 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1061 {
1062 	major_t		major;
1063 	struct devnames	*dnp;
1064 	ddi_prop_t	*propp;
1065 
1066 	/*
1067 	 * Valid dev_t value is needed to index into the
1068 	 * correct devnames entry, therefore a dev_t
1069 	 * value of DDI_DEV_T_ANY is not appropriate.
1070 	 */
1071 	ASSERT(dev != DDI_DEV_T_ANY);
1072 	if (dev == DDI_DEV_T_ANY) {
1073 		return ((ddi_prop_t *)0);
1074 	}
1075 
1076 	major = getmajor(dev);
1077 	dnp = &(devnamesp[major]);
1078 
1079 	if (dnp->dn_global_prop_ptr == NULL)
1080 		return ((ddi_prop_t *)0);
1081 
1082 	LOCK_DEV_OPS(&dnp->dn_lock);
1083 
1084 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1085 	    propp != NULL;
1086 	    propp = (ddi_prop_t *)propp->prop_next) {
1087 
1088 		if (!DDI_STRSAME(propp->prop_name, name))
1089 			continue;
1090 
1091 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1092 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1093 			continue;
1094 
1095 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1096 			continue;
1097 
1098 		/* Property found, return it */
1099 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1100 		return (propp);
1101 	}
1102 
1103 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1104 	return ((ddi_prop_t *)0);
1105 }
1106 
1107 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1108 
1109 /*
1110  * ddi_prop_search_global:
1111  *	Search the global property list within devnames
1112  *	for the named property.  Return the encoded value.
1113  */
1114 static int
i_ddi_prop_search_global(dev_t dev,uint_t flags,char * name,void * valuep,uint_t * lengthp)1115 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1116     void *valuep, uint_t *lengthp)
1117 {
1118 	ddi_prop_t	*propp;
1119 	caddr_t		buffer;
1120 
1121 	propp =  i_ddi_search_global_prop(dev, name, flags);
1122 
1123 	/* Property NOT found, bail */
1124 	if (propp == (ddi_prop_t *)0)
1125 		return (DDI_PROP_NOT_FOUND);
1126 
1127 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1128 		return (DDI_PROP_UNDEFINED);
1129 
1130 	if ((buffer = kmem_alloc(propp->prop_len,
1131 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1132 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1133 		return (DDI_PROP_NO_MEMORY);
1134 	}
1135 
1136 	/*
1137 	 * Return the encoded data
1138 	 */
1139 	*(caddr_t *)valuep = buffer;
1140 	*lengthp = propp->prop_len;
1141 	bcopy(propp->prop_val, buffer, propp->prop_len);
1142 
1143 	return (DDI_PROP_SUCCESS);
1144 }
1145 
1146 /*
1147  * ddi_prop_search_common:	Lookup and return the encoded value
1148  */
1149 int
ddi_prop_search_common(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,uint_t flags,char * name,void * valuep,uint_t * lengthp)1150 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1151     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1152 {
1153 	ddi_prop_t	*propp;
1154 	int		i;
1155 	caddr_t		buffer = NULL;
1156 	caddr_t		prealloc = NULL;
1157 	int		plength = 0;
1158 	dev_info_t	*pdip;
1159 	int		(*bop)();
1160 
1161 	/*CONSTANTCONDITION*/
1162 	while (1)  {
1163 
1164 		mutex_enter(&(DEVI(dip)->devi_lock));
1165 
1166 
1167 		/*
1168 		 * find the property in child's devinfo:
1169 		 * Search order is:
1170 		 *	1. driver defined properties
1171 		 *	2. system defined properties
1172 		 *	3. driver global properties
1173 		 *	4. boot defined properties
1174 		 */
1175 
1176 		propp = i_ddi_prop_search(dev, name, flags,
1177 		    &(DEVI(dip)->devi_drv_prop_ptr));
1178 		if (propp == NULL)  {
1179 			propp = i_ddi_prop_search(dev, name, flags,
1180 			    &(DEVI(dip)->devi_sys_prop_ptr));
1181 		}
1182 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1183 			propp = i_ddi_prop_search(dev, name, flags,
1184 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1185 		}
1186 
1187 		if (propp == NULL)  {
1188 			propp = i_ddi_prop_search(dev, name, flags,
1189 			    &(DEVI(dip)->devi_hw_prop_ptr));
1190 		}
1191 
1192 		/*
1193 		 * Software property found?
1194 		 */
1195 		if (propp != (ddi_prop_t *)0)	{
1196 
1197 			/*
1198 			 * If explicit undefine, return now.
1199 			 */
1200 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1201 				mutex_exit(&(DEVI(dip)->devi_lock));
1202 				if (prealloc)
1203 					kmem_free(prealloc, plength);
1204 				return (DDI_PROP_UNDEFINED);
1205 			}
1206 
1207 			/*
1208 			 * If we only want to know if it exists, return now
1209 			 */
1210 			if (prop_op == PROP_EXISTS) {
1211 				mutex_exit(&(DEVI(dip)->devi_lock));
1212 				ASSERT(prealloc == NULL);
1213 				return (DDI_PROP_SUCCESS);
1214 			}
1215 
1216 			/*
1217 			 * If length only request or prop length == 0,
1218 			 * service request and return now.
1219 			 */
1220 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1221 				*lengthp = propp->prop_len;
1222 
1223 				/*
1224 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1225 				 * that means prop_len is 0, so set valuep
1226 				 * also to NULL
1227 				 */
1228 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1229 					*(caddr_t *)valuep = NULL;
1230 
1231 				mutex_exit(&(DEVI(dip)->devi_lock));
1232 				if (prealloc)
1233 					kmem_free(prealloc, plength);
1234 				return (DDI_PROP_SUCCESS);
1235 			}
1236 
1237 			/*
1238 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1239 			 * drop the mutex, allocate the buffer, and go
1240 			 * through the loop again.  If we already allocated
1241 			 * the buffer, and the size of the property changed,
1242 			 * keep trying...
1243 			 */
1244 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1245 			    (flags & DDI_PROP_CANSLEEP))  {
1246 				if (prealloc && (propp->prop_len != plength)) {
1247 					kmem_free(prealloc, plength);
1248 					prealloc = NULL;
1249 				}
1250 				if (prealloc == NULL)  {
1251 					plength = propp->prop_len;
1252 					mutex_exit(&(DEVI(dip)->devi_lock));
1253 					prealloc = kmem_alloc(plength,
1254 					    KM_SLEEP);
1255 					continue;
1256 				}
1257 			}
1258 
1259 			/*
1260 			 * Allocate buffer, if required.  Either way,
1261 			 * set `buffer' variable.
1262 			 */
1263 			i = *lengthp;			/* Get callers length */
1264 			*lengthp = propp->prop_len;	/* Set callers length */
1265 
1266 			switch (prop_op) {
1267 
1268 			case PROP_LEN_AND_VAL_ALLOC:
1269 
1270 				if (prealloc == NULL) {
1271 					buffer = kmem_alloc(propp->prop_len,
1272 					    KM_NOSLEEP);
1273 				} else {
1274 					buffer = prealloc;
1275 				}
1276 
1277 				if (buffer == NULL)  {
1278 					mutex_exit(&(DEVI(dip)->devi_lock));
1279 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1280 					return (DDI_PROP_NO_MEMORY);
1281 				}
1282 				/* Set callers buf ptr */
1283 				*(caddr_t *)valuep = buffer;
1284 				break;
1285 
1286 			case PROP_LEN_AND_VAL_BUF:
1287 
1288 				if (propp->prop_len > (i)) {
1289 					mutex_exit(&(DEVI(dip)->devi_lock));
1290 					return (DDI_PROP_BUF_TOO_SMALL);
1291 				}
1292 
1293 				buffer = valuep;  /* Get callers buf ptr */
1294 				break;
1295 
1296 			default:
1297 				break;
1298 			}
1299 
1300 			/*
1301 			 * Do the copy.
1302 			 */
1303 			if (buffer != NULL)
1304 				bcopy(propp->prop_val, buffer, propp->prop_len);
1305 			mutex_exit(&(DEVI(dip)->devi_lock));
1306 			return (DDI_PROP_SUCCESS);
1307 		}
1308 
1309 		mutex_exit(&(DEVI(dip)->devi_lock));
1310 		if (prealloc)
1311 			kmem_free(prealloc, plength);
1312 		prealloc = NULL;
1313 
1314 		/*
1315 		 * Prop not found, call parent bus_ops to deal with possible
1316 		 * h/w layer (possible PROM defined props, etc.) and to
1317 		 * possibly ascend the hierarchy, if allowed by flags.
1318 		 */
1319 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1320 
1321 		/*
1322 		 * One last call for the root driver PROM props?
1323 		 */
1324 		if (dip == ddi_root_node())  {
1325 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1326 			    flags, name, valuep, (int *)lengthp));
1327 		}
1328 
1329 		/*
1330 		 * We may have been called to check for properties
1331 		 * within a single devinfo node that has no parent -
1332 		 * see make_prop()
1333 		 */
1334 		if (pdip == NULL) {
1335 			ASSERT((flags &
1336 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1337 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1338 			return (DDI_PROP_NOT_FOUND);
1339 		}
1340 
1341 		/*
1342 		 * Instead of recursing, we do iterative calls up the tree.
1343 		 * As a bit of optimization, skip the bus_op level if the
1344 		 * node is a s/w node and if the parent's bus_prop_op function
1345 		 * is `ddi_bus_prop_op', because we know that in this case,
1346 		 * this function does nothing.
1347 		 *
1348 		 * 4225415: If the parent isn't attached, or the child
1349 		 * hasn't been named by the parent yet, use the default
1350 		 * ddi_bus_prop_op as a proxy for the parent.  This
1351 		 * allows property lookups in any child/parent state to
1352 		 * include 'prom' and inherited properties, even when
1353 		 * there are no drivers attached to the child or parent.
1354 		 */
1355 
1356 		bop = ddi_bus_prop_op;
1357 		if (i_ddi_devi_attached(pdip) &&
1358 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1359 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1360 
1361 		i = DDI_PROP_NOT_FOUND;
1362 
1363 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1364 			i = (*bop)(dev, pdip, dip, prop_op,
1365 			    flags | DDI_PROP_DONTPASS,
1366 			    name, valuep, lengthp);
1367 		}
1368 
1369 		if ((flags & DDI_PROP_DONTPASS) ||
1370 		    (i != DDI_PROP_NOT_FOUND))
1371 			return (i);
1372 
1373 		dip = pdip;
1374 	}
1375 	/*NOTREACHED*/
1376 }
1377 
1378 
1379 /*
1380  * ddi_prop_op: The basic property operator for drivers.
1381  *
1382  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1383  *
1384  *	prop_op			valuep
1385  *	------			------
1386  *
1387  *	PROP_LEN		<unused>
1388  *
1389  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1390  *
1391  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1392  *				address of allocated buffer, if successful)
1393  */
1394 int
ddi_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)1395 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1396     char *name, caddr_t valuep, int *lengthp)
1397 {
1398 	int	i;
1399 
1400 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1401 
1402 	/*
1403 	 * If this was originally an LDI prop lookup then we bail here.
1404 	 * The reason is that the LDI property lookup interfaces first call
1405 	 * a drivers prop_op() entry point to allow it to override
1406 	 * properties.  But if we've made it here, then the driver hasn't
1407 	 * overriden any properties.  We don't want to continue with the
1408 	 * property search here because we don't have any type inforamtion.
1409 	 * When we return failure, the LDI interfaces will then proceed to
1410 	 * call the typed property interfaces to look up the property.
1411 	 */
1412 	if (mod_flags & DDI_PROP_DYNAMIC)
1413 		return (DDI_PROP_NOT_FOUND);
1414 
1415 	/*
1416 	 * check for pre-typed property consumer asking for typed property:
1417 	 * see e_ddi_getprop_int64.
1418 	 */
1419 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1420 		mod_flags |= DDI_PROP_TYPE_INT64;
1421 	mod_flags |= DDI_PROP_TYPE_ANY;
1422 
1423 	i = ddi_prop_search_common(dev, dip, prop_op,
1424 	    mod_flags, name, valuep, (uint_t *)lengthp);
1425 	if (i == DDI_PROP_FOUND_1275)
1426 		return (DDI_PROP_SUCCESS);
1427 	return (i);
1428 }
1429 
1430 /*
1431  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1432  * maintain size in number of blksize blocks.  Provides a dynamic property
1433  * implementation for size oriented properties based on nblocks64 and blksize
1434  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1435  * is too large.  This interface should not be used with a nblocks64 that
1436  * represents the driver's idea of how to represent unknown, if nblocks is
1437  * unknown use ddi_prop_op.
1438  */
1439 int
ddi_prop_op_nblocks_blksize(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t nblocks64,uint_t blksize)1440 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1441     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1442     uint64_t nblocks64, uint_t blksize)
1443 {
1444 	uint64_t size64;
1445 	int	blkshift;
1446 
1447 	/* convert block size to shift value */
1448 	ASSERT(BIT_ONLYONESET(blksize));
1449 	blkshift = highbit(blksize) - 1;
1450 
1451 	/*
1452 	 * There is no point in supporting nblocks64 values that don't have
1453 	 * an accurate uint64_t byte count representation.
1454 	 */
1455 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1456 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1457 		    name, valuep, lengthp));
1458 
1459 	size64 = nblocks64 << blkshift;
1460 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1461 	    name, valuep, lengthp, size64, blksize));
1462 }
1463 
1464 /*
1465  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1466  */
1467 int
ddi_prop_op_nblocks(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t nblocks64)1468 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1469     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1470 {
1471 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1472 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1473 }
1474 
1475 /*
1476  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1477  * maintain size in bytes. Provides a of dynamic property implementation for
1478  * size oriented properties based on size64 value and blksize passed in by the
1479  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1480  * should not be used with a size64 that represents the driver's idea of how
1481  * to represent unknown, if size is unknown use ddi_prop_op.
1482  *
1483  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1484  * integers. While the most likely interface to request them ([bc]devi_size)
1485  * is declared int (signed) there is no enforcement of this, which means we
1486  * can't enforce limitations here without risking regression.
1487  */
1488 int
ddi_prop_op_size_blksize(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t size64,uint_t blksize)1489 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1490     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1491     uint_t blksize)
1492 {
1493 	uint64_t nblocks64;
1494 	int	callers_length;
1495 	caddr_t	buffer;
1496 	int	blkshift;
1497 
1498 	/*
1499 	 * This is a kludge to support capture of size(9P) pure dynamic
1500 	 * properties in snapshots for non-cmlb code (without exposing
1501 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1502 	 * should be removed.
1503 	 */
1504 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1505 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1506 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1507 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1508 		    {NULL}
1509 		};
1510 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1511 	}
1512 
1513 	/* convert block size to shift value */
1514 	ASSERT(BIT_ONLYONESET(blksize));
1515 	blkshift = highbit(blksize) - 1;
1516 
1517 	/* compute DEV_BSIZE nblocks value */
1518 	nblocks64 = size64 >> blkshift;
1519 
1520 	/* get callers length, establish length of our dynamic properties */
1521 	callers_length = *lengthp;
1522 
1523 	if (strcmp(name, "Nblocks") == 0)
1524 		*lengthp = sizeof (uint64_t);
1525 	else if (strcmp(name, "Size") == 0)
1526 		*lengthp = sizeof (uint64_t);
1527 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1528 		*lengthp = sizeof (uint32_t);
1529 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1530 		*lengthp = sizeof (uint32_t);
1531 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1532 		*lengthp = sizeof (uint32_t);
1533 	else {
1534 		/* fallback to ddi_prop_op */
1535 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1536 		    name, valuep, lengthp));
1537 	}
1538 
1539 	/* service request for the length of the property */
1540 	if (prop_op == PROP_LEN)
1541 		return (DDI_PROP_SUCCESS);
1542 
1543 	switch (prop_op) {
1544 	case PROP_LEN_AND_VAL_ALLOC:
1545 		if ((buffer = kmem_alloc(*lengthp,
1546 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1547 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1548 			return (DDI_PROP_NO_MEMORY);
1549 
1550 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1551 		break;
1552 
1553 	case PROP_LEN_AND_VAL_BUF:
1554 		/* the length of the property and the request must match */
1555 		if (callers_length != *lengthp)
1556 			return (DDI_PROP_INVAL_ARG);
1557 
1558 		buffer = valuep;		/* get callers buf ptr */
1559 		break;
1560 
1561 	default:
1562 		return (DDI_PROP_INVAL_ARG);
1563 	}
1564 
1565 	/* transfer the value into the buffer */
1566 	if (strcmp(name, "Nblocks") == 0)
1567 		*((uint64_t *)buffer) = nblocks64;
1568 	else if (strcmp(name, "Size") == 0)
1569 		*((uint64_t *)buffer) = size64;
1570 	else if (strcmp(name, "nblocks") == 0)
1571 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1572 	else if (strcmp(name, "size") == 0)
1573 		*((uint32_t *)buffer) = (uint32_t)size64;
1574 	else if (strcmp(name, "blksize") == 0)
1575 		*((uint32_t *)buffer) = (uint32_t)blksize;
1576 	return (DDI_PROP_SUCCESS);
1577 }
1578 
1579 /*
1580  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1581  */
1582 int
ddi_prop_op_size(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp,uint64_t size64)1583 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1584     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1585 {
1586 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1587 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1588 }
1589 
1590 /*
1591  * Variable length props...
1592  */
1593 
1594 /*
1595  * ddi_getlongprop:	Get variable length property len+val into a buffer
1596  *		allocated by property provider via kmem_alloc. Requester
1597  *		is responsible for freeing returned property via kmem_free.
1598  *
1599  *	Arguments:
1600  *
1601  *	dev_t:	Input:	dev_t of property.
1602  *	dip:	Input:	dev_info_t pointer of child.
1603  *	flags:	Input:	Possible flag modifiers are:
1604  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1605  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1606  *	name:	Input:	name of property.
1607  *	valuep:	Output:	Addr of callers buffer pointer.
1608  *	lengthp:Output:	*lengthp will contain prop length on exit.
1609  *
1610  *	Possible Returns:
1611  *
1612  *		DDI_PROP_SUCCESS:	Prop found and returned.
1613  *		DDI_PROP_NOT_FOUND:	Prop not found
1614  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1615  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1616  */
1617 
1618 int
ddi_getlongprop(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t valuep,int * lengthp)1619 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1620     char *name, caddr_t valuep, int *lengthp)
1621 {
1622 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1623 	    flags, name, valuep, lengthp));
1624 }
1625 
1626 /*
1627  *
1628  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1629  *				buffer. (no memory allocation by provider).
1630  *
1631  *	dev_t:	Input:	dev_t of property.
1632  *	dip:	Input:	dev_info_t pointer of child.
1633  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1634  *	name:	Input:	name of property
1635  *	valuep:	Input:	ptr to callers buffer.
1636  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1637  *			actual length of property on exit.
1638  *
1639  *	Possible returns:
1640  *
1641  *		DDI_PROP_SUCCESS	Prop found and returned
1642  *		DDI_PROP_NOT_FOUND	Prop not found
1643  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1644  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1645  *					no value returned, but actual prop
1646  *					length returned in *lengthp
1647  *
1648  */
1649 
1650 int
ddi_getlongprop_buf(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t valuep,int * lengthp)1651 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1652     char *name, caddr_t valuep, int *lengthp)
1653 {
1654 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1655 	    flags, name, valuep, lengthp));
1656 }
1657 
1658 /*
1659  * Integer/boolean sized props.
1660  *
1661  * Call is value only... returns found boolean or int sized prop value or
1662  * defvalue if prop not found or is wrong length or is explicitly undefined.
1663  * Only flag is DDI_PROP_DONTPASS...
1664  *
1665  * By convention, this interface returns boolean (0) sized properties
1666  * as value (int)1.
1667  *
1668  * This never returns an error, if property not found or specifically
1669  * undefined, the input `defvalue' is returned.
1670  */
1671 
1672 int
ddi_getprop(dev_t dev,dev_info_t * dip,int flags,char * name,int defvalue)1673 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1674 {
1675 	int	propvalue = defvalue;
1676 	int	proplength = sizeof (int);
1677 	int	error;
1678 
1679 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1680 	    flags, name, (caddr_t)&propvalue, &proplength);
1681 
1682 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1683 		propvalue = 1;
1684 
1685 	return (propvalue);
1686 }
1687 
1688 /*
1689  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1690  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1691  */
1692 
1693 int
ddi_getproplen(dev_t dev,dev_info_t * dip,int flags,char * name,int * lengthp)1694 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1695 {
1696 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1697 }
1698 
1699 /*
1700  * Allocate a struct prop_driver_data, along with 'size' bytes
1701  * for decoded property data.  This structure is freed by
1702  * calling ddi_prop_free(9F).
1703  */
1704 static void *
ddi_prop_decode_alloc(size_t size,void (* prop_free)(struct prop_driver_data *))1705 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1706 {
1707 	struct prop_driver_data *pdd;
1708 
1709 	/*
1710 	 * Allocate a structure with enough memory to store the decoded data.
1711 	 */
1712 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1713 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1714 	pdd->pdd_prop_free = prop_free;
1715 
1716 	/*
1717 	 * Return a pointer to the location to put the decoded data.
1718 	 */
1719 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1720 }
1721 
1722 /*
1723  * Allocated the memory needed to store the encoded data in the property
1724  * handle.
1725  */
1726 static int
ddi_prop_encode_alloc(prop_handle_t * ph,size_t size)1727 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1728 {
1729 	/*
1730 	 * If size is zero, then set data to NULL and size to 0.  This
1731 	 * is a boolean property.
1732 	 */
1733 	if (size == 0) {
1734 		ph->ph_size = 0;
1735 		ph->ph_data = NULL;
1736 		ph->ph_cur_pos = NULL;
1737 		ph->ph_save_pos = NULL;
1738 	} else {
1739 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1740 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1741 			if (ph->ph_data == NULL)
1742 				return (DDI_PROP_NO_MEMORY);
1743 		} else
1744 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1745 		ph->ph_size = size;
1746 		ph->ph_cur_pos = ph->ph_data;
1747 		ph->ph_save_pos = ph->ph_data;
1748 	}
1749 	return (DDI_PROP_SUCCESS);
1750 }
1751 
1752 /*
1753  * Free the space allocated by the lookup routines.  Each lookup routine
1754  * returns a pointer to the decoded data to the driver.  The driver then
1755  * passes this pointer back to us.  This data actually lives in a struct
1756  * prop_driver_data.  We use negative indexing to find the beginning of
1757  * the structure and then free the entire structure using the size and
1758  * the free routine stored in the structure.
1759  */
1760 void
ddi_prop_free(void * datap)1761 ddi_prop_free(void *datap)
1762 {
1763 	struct prop_driver_data *pdd;
1764 
1765 	/*
1766 	 * Get the structure
1767 	 */
1768 	pdd = (struct prop_driver_data *)
1769 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
1770 	/*
1771 	 * Call the free routine to free it
1772 	 */
1773 	(*pdd->pdd_prop_free)(pdd);
1774 }
1775 
1776 /*
1777  * Free the data associated with an array of ints,
1778  * allocated with ddi_prop_decode_alloc().
1779  */
1780 static void
ddi_prop_free_ints(struct prop_driver_data * pdd)1781 ddi_prop_free_ints(struct prop_driver_data *pdd)
1782 {
1783 	kmem_free(pdd, pdd->pdd_size);
1784 }
1785 
1786 /*
1787  * Free a single string property or a single string contained within
1788  * the argv style return value of an array of strings.
1789  */
1790 static void
ddi_prop_free_string(struct prop_driver_data * pdd)1791 ddi_prop_free_string(struct prop_driver_data *pdd)
1792 {
1793 	kmem_free(pdd, pdd->pdd_size);
1794 
1795 }
1796 
1797 /*
1798  * Free an array of strings.
1799  */
1800 static void
ddi_prop_free_strings(struct prop_driver_data * pdd)1801 ddi_prop_free_strings(struct prop_driver_data *pdd)
1802 {
1803 	kmem_free(pdd, pdd->pdd_size);
1804 }
1805 
1806 /*
1807  * Free the data associated with an array of bytes.
1808  */
1809 static void
ddi_prop_free_bytes(struct prop_driver_data * pdd)1810 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1811 {
1812 	kmem_free(pdd, pdd->pdd_size);
1813 }
1814 
1815 /*
1816  * Reset the current location pointer in the property handle to the
1817  * beginning of the data.
1818  */
1819 void
ddi_prop_reset_pos(prop_handle_t * ph)1820 ddi_prop_reset_pos(prop_handle_t *ph)
1821 {
1822 	ph->ph_cur_pos = ph->ph_data;
1823 	ph->ph_save_pos = ph->ph_data;
1824 }
1825 
1826 /*
1827  * Restore the current location pointer in the property handle to the
1828  * saved position.
1829  */
1830 void
ddi_prop_save_pos(prop_handle_t * ph)1831 ddi_prop_save_pos(prop_handle_t *ph)
1832 {
1833 	ph->ph_save_pos = ph->ph_cur_pos;
1834 }
1835 
1836 /*
1837  * Save the location that the current location pointer is pointing to..
1838  */
1839 void
ddi_prop_restore_pos(prop_handle_t * ph)1840 ddi_prop_restore_pos(prop_handle_t *ph)
1841 {
1842 	ph->ph_cur_pos = ph->ph_save_pos;
1843 }
1844 
1845 /*
1846  * Property encode/decode functions
1847  */
1848 
1849 /*
1850  * Decode a single integer property
1851  */
1852 static int
ddi_prop_fm_decode_int(prop_handle_t * ph,void * data,uint_t * nelements)1853 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1854 {
1855 	int	i;
1856 	int	tmp;
1857 
1858 	/*
1859 	 * If there is nothing to decode return an error
1860 	 */
1861 	if (ph->ph_size == 0)
1862 		return (DDI_PROP_END_OF_DATA);
1863 
1864 	/*
1865 	 * Decode the property as a single integer and return it
1866 	 * in data if we were able to decode it.
1867 	 */
1868 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1869 	if (i < DDI_PROP_RESULT_OK) {
1870 		switch (i) {
1871 		case DDI_PROP_RESULT_EOF:
1872 			return (DDI_PROP_END_OF_DATA);
1873 
1874 		case DDI_PROP_RESULT_ERROR:
1875 			return (DDI_PROP_CANNOT_DECODE);
1876 		}
1877 	}
1878 
1879 	*(int *)data = tmp;
1880 	*nelements = 1;
1881 	return (DDI_PROP_SUCCESS);
1882 }
1883 
1884 /*
1885  * Decode a single 64 bit integer property
1886  */
1887 static int
ddi_prop_fm_decode_int64(prop_handle_t * ph,void * data,uint_t * nelements)1888 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1889 {
1890 	int	i;
1891 	int64_t	tmp;
1892 
1893 	/*
1894 	 * If there is nothing to decode return an error
1895 	 */
1896 	if (ph->ph_size == 0)
1897 		return (DDI_PROP_END_OF_DATA);
1898 
1899 	/*
1900 	 * Decode the property as a single integer and return it
1901 	 * in data if we were able to decode it.
1902 	 */
1903 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1904 	if (i < DDI_PROP_RESULT_OK) {
1905 		switch (i) {
1906 		case DDI_PROP_RESULT_EOF:
1907 			return (DDI_PROP_END_OF_DATA);
1908 
1909 		case DDI_PROP_RESULT_ERROR:
1910 			return (DDI_PROP_CANNOT_DECODE);
1911 		}
1912 	}
1913 
1914 	*(int64_t *)data = tmp;
1915 	*nelements = 1;
1916 	return (DDI_PROP_SUCCESS);
1917 }
1918 
1919 /*
1920  * Decode an array of integers property
1921  */
1922 static int
ddi_prop_fm_decode_ints(prop_handle_t * ph,void * data,uint_t * nelements)1923 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1924 {
1925 	int	i;
1926 	int	cnt = 0;
1927 	int	*tmp;
1928 	int	*intp;
1929 	int	n;
1930 
1931 	/*
1932 	 * Figure out how many array elements there are by going through the
1933 	 * data without decoding it first and counting.
1934 	 */
1935 	for (;;) {
1936 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
1937 		if (i < 0)
1938 			break;
1939 		cnt++;
1940 	}
1941 
1942 	/*
1943 	 * If there are no elements return an error
1944 	 */
1945 	if (cnt == 0)
1946 		return (DDI_PROP_END_OF_DATA);
1947 
1948 	/*
1949 	 * If we cannot skip through the data, we cannot decode it
1950 	 */
1951 	if (i == DDI_PROP_RESULT_ERROR)
1952 		return (DDI_PROP_CANNOT_DECODE);
1953 
1954 	/*
1955 	 * Reset the data pointer to the beginning of the encoded data
1956 	 */
1957 	ddi_prop_reset_pos(ph);
1958 
1959 	/*
1960 	 * Allocated memory to store the decoded value in.
1961 	 */
1962 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
1963 	    ddi_prop_free_ints);
1964 
1965 	/*
1966 	 * Decode each element and place it in the space we just allocated
1967 	 */
1968 	tmp = intp;
1969 	for (n = 0; n < cnt; n++, tmp++) {
1970 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
1971 		if (i < DDI_PROP_RESULT_OK) {
1972 			/*
1973 			 * Free the space we just allocated
1974 			 * and return an error.
1975 			 */
1976 			ddi_prop_free(intp);
1977 			switch (i) {
1978 			case DDI_PROP_RESULT_EOF:
1979 				return (DDI_PROP_END_OF_DATA);
1980 
1981 			case DDI_PROP_RESULT_ERROR:
1982 				return (DDI_PROP_CANNOT_DECODE);
1983 			}
1984 		}
1985 	}
1986 
1987 	*nelements = cnt;
1988 	*(int **)data = intp;
1989 
1990 	return (DDI_PROP_SUCCESS);
1991 }
1992 
1993 /*
1994  * Decode a 64 bit integer array property
1995  */
1996 static int
ddi_prop_fm_decode_int64_array(prop_handle_t * ph,void * data,uint_t * nelements)1997 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
1998 {
1999 	int	i;
2000 	int	n;
2001 	int	cnt = 0;
2002 	int64_t	*tmp;
2003 	int64_t	*intp;
2004 
2005 	/*
2006 	 * Count the number of array elements by going
2007 	 * through the data without decoding it.
2008 	 */
2009 	for (;;) {
2010 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2011 		if (i < 0)
2012 			break;
2013 		cnt++;
2014 	}
2015 
2016 	/*
2017 	 * If there are no elements return an error
2018 	 */
2019 	if (cnt == 0)
2020 		return (DDI_PROP_END_OF_DATA);
2021 
2022 	/*
2023 	 * If we cannot skip through the data, we cannot decode it
2024 	 */
2025 	if (i == DDI_PROP_RESULT_ERROR)
2026 		return (DDI_PROP_CANNOT_DECODE);
2027 
2028 	/*
2029 	 * Reset the data pointer to the beginning of the encoded data
2030 	 */
2031 	ddi_prop_reset_pos(ph);
2032 
2033 	/*
2034 	 * Allocate memory to store the decoded value.
2035 	 */
2036 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2037 	    ddi_prop_free_ints);
2038 
2039 	/*
2040 	 * Decode each element and place it in the space allocated
2041 	 */
2042 	tmp = intp;
2043 	for (n = 0; n < cnt; n++, tmp++) {
2044 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2045 		if (i < DDI_PROP_RESULT_OK) {
2046 			/*
2047 			 * Free the space we just allocated
2048 			 * and return an error.
2049 			 */
2050 			ddi_prop_free(intp);
2051 			switch (i) {
2052 			case DDI_PROP_RESULT_EOF:
2053 				return (DDI_PROP_END_OF_DATA);
2054 
2055 			case DDI_PROP_RESULT_ERROR:
2056 				return (DDI_PROP_CANNOT_DECODE);
2057 			}
2058 		}
2059 	}
2060 
2061 	*nelements = cnt;
2062 	*(int64_t **)data = intp;
2063 
2064 	return (DDI_PROP_SUCCESS);
2065 }
2066 
2067 /*
2068  * Encode an array of integers property (Can be one element)
2069  */
2070 int
ddi_prop_fm_encode_ints(prop_handle_t * ph,void * data,uint_t nelements)2071 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2072 {
2073 	int	i;
2074 	int	*tmp;
2075 	int	cnt;
2076 	int	size;
2077 
2078 	/*
2079 	 * If there is no data, we cannot do anything
2080 	 */
2081 	if (nelements == 0)
2082 		return (DDI_PROP_CANNOT_ENCODE);
2083 
2084 	/*
2085 	 * Get the size of an encoded int.
2086 	 */
2087 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2088 
2089 	if (size < DDI_PROP_RESULT_OK) {
2090 		switch (size) {
2091 		case DDI_PROP_RESULT_EOF:
2092 			return (DDI_PROP_END_OF_DATA);
2093 
2094 		case DDI_PROP_RESULT_ERROR:
2095 			return (DDI_PROP_CANNOT_ENCODE);
2096 		}
2097 	}
2098 
2099 	/*
2100 	 * Allocate space in the handle to store the encoded int.
2101 	 */
2102 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2103 	    DDI_PROP_SUCCESS)
2104 		return (DDI_PROP_NO_MEMORY);
2105 
2106 	/*
2107 	 * Encode the array of ints.
2108 	 */
2109 	tmp = (int *)data;
2110 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2111 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2112 		if (i < DDI_PROP_RESULT_OK) {
2113 			switch (i) {
2114 			case DDI_PROP_RESULT_EOF:
2115 				return (DDI_PROP_END_OF_DATA);
2116 
2117 			case DDI_PROP_RESULT_ERROR:
2118 				return (DDI_PROP_CANNOT_ENCODE);
2119 			}
2120 		}
2121 	}
2122 
2123 	return (DDI_PROP_SUCCESS);
2124 }
2125 
2126 
2127 /*
2128  * Encode a 64 bit integer array property
2129  */
2130 int
ddi_prop_fm_encode_int64(prop_handle_t * ph,void * data,uint_t nelements)2131 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2132 {
2133 	int i;
2134 	int cnt;
2135 	int size;
2136 	int64_t *tmp;
2137 
2138 	/*
2139 	 * If there is no data, we cannot do anything
2140 	 */
2141 	if (nelements == 0)
2142 		return (DDI_PROP_CANNOT_ENCODE);
2143 
2144 	/*
2145 	 * Get the size of an encoded 64 bit int.
2146 	 */
2147 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2148 
2149 	if (size < DDI_PROP_RESULT_OK) {
2150 		switch (size) {
2151 		case DDI_PROP_RESULT_EOF:
2152 			return (DDI_PROP_END_OF_DATA);
2153 
2154 		case DDI_PROP_RESULT_ERROR:
2155 			return (DDI_PROP_CANNOT_ENCODE);
2156 		}
2157 	}
2158 
2159 	/*
2160 	 * Allocate space in the handle to store the encoded int.
2161 	 */
2162 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2163 	    DDI_PROP_SUCCESS)
2164 		return (DDI_PROP_NO_MEMORY);
2165 
2166 	/*
2167 	 * Encode the array of ints.
2168 	 */
2169 	tmp = (int64_t *)data;
2170 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2171 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2172 		if (i < DDI_PROP_RESULT_OK) {
2173 			switch (i) {
2174 			case DDI_PROP_RESULT_EOF:
2175 				return (DDI_PROP_END_OF_DATA);
2176 
2177 			case DDI_PROP_RESULT_ERROR:
2178 				return (DDI_PROP_CANNOT_ENCODE);
2179 			}
2180 		}
2181 	}
2182 
2183 	return (DDI_PROP_SUCCESS);
2184 }
2185 
2186 /*
2187  * Decode a single string property
2188  */
2189 static int
ddi_prop_fm_decode_string(prop_handle_t * ph,void * data,uint_t * nelements)2190 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2191 {
2192 	char		*tmp;
2193 	char		*str;
2194 	int		i;
2195 	int		size;
2196 
2197 	/*
2198 	 * If there is nothing to decode return an error
2199 	 */
2200 	if (ph->ph_size == 0)
2201 		return (DDI_PROP_END_OF_DATA);
2202 
2203 	/*
2204 	 * Get the decoded size of the encoded string.
2205 	 */
2206 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2207 	if (size < DDI_PROP_RESULT_OK) {
2208 		switch (size) {
2209 		case DDI_PROP_RESULT_EOF:
2210 			return (DDI_PROP_END_OF_DATA);
2211 
2212 		case DDI_PROP_RESULT_ERROR:
2213 			return (DDI_PROP_CANNOT_DECODE);
2214 		}
2215 	}
2216 
2217 	/*
2218 	 * Allocated memory to store the decoded value in.
2219 	 */
2220 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2221 
2222 	ddi_prop_reset_pos(ph);
2223 
2224 	/*
2225 	 * Decode the str and place it in the space we just allocated
2226 	 */
2227 	tmp = str;
2228 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2229 	if (i < DDI_PROP_RESULT_OK) {
2230 		/*
2231 		 * Free the space we just allocated
2232 		 * and return an error.
2233 		 */
2234 		ddi_prop_free(str);
2235 		switch (i) {
2236 		case DDI_PROP_RESULT_EOF:
2237 			return (DDI_PROP_END_OF_DATA);
2238 
2239 		case DDI_PROP_RESULT_ERROR:
2240 			return (DDI_PROP_CANNOT_DECODE);
2241 		}
2242 	}
2243 
2244 	*(char **)data = str;
2245 	*nelements = 1;
2246 
2247 	return (DDI_PROP_SUCCESS);
2248 }
2249 
2250 /*
2251  * Decode an array of strings.
2252  */
2253 int
ddi_prop_fm_decode_strings(prop_handle_t * ph,void * data,uint_t * nelements)2254 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2255 {
2256 	int		cnt = 0;
2257 	char		**strs;
2258 	char		**tmp;
2259 	char		*ptr;
2260 	int		i;
2261 	int		n;
2262 	int		size;
2263 	size_t		nbytes;
2264 
2265 	/*
2266 	 * Figure out how many array elements there are by going through the
2267 	 * data without decoding it first and counting.
2268 	 */
2269 	for (;;) {
2270 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2271 		if (i < 0)
2272 			break;
2273 		cnt++;
2274 	}
2275 
2276 	/*
2277 	 * If there are no elements return an error
2278 	 */
2279 	if (cnt == 0)
2280 		return (DDI_PROP_END_OF_DATA);
2281 
2282 	/*
2283 	 * If we cannot skip through the data, we cannot decode it
2284 	 */
2285 	if (i == DDI_PROP_RESULT_ERROR)
2286 		return (DDI_PROP_CANNOT_DECODE);
2287 
2288 	/*
2289 	 * Reset the data pointer to the beginning of the encoded data
2290 	 */
2291 	ddi_prop_reset_pos(ph);
2292 
2293 	/*
2294 	 * Figure out how much memory we need for the sum total
2295 	 */
2296 	nbytes = (cnt + 1) * sizeof (char *);
2297 
2298 	for (n = 0; n < cnt; n++) {
2299 		/*
2300 		 * Get the decoded size of the current encoded string.
2301 		 */
2302 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2303 		if (size < DDI_PROP_RESULT_OK) {
2304 			switch (size) {
2305 			case DDI_PROP_RESULT_EOF:
2306 				return (DDI_PROP_END_OF_DATA);
2307 
2308 			case DDI_PROP_RESULT_ERROR:
2309 				return (DDI_PROP_CANNOT_DECODE);
2310 			}
2311 		}
2312 
2313 		nbytes += size;
2314 	}
2315 
2316 	/*
2317 	 * Allocate memory in which to store the decoded strings.
2318 	 */
2319 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2320 
2321 	/*
2322 	 * Set up pointers for each string by figuring out yet
2323 	 * again how long each string is.
2324 	 */
2325 	ddi_prop_reset_pos(ph);
2326 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2327 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2328 		/*
2329 		 * Get the decoded size of the current encoded string.
2330 		 */
2331 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2332 		if (size < DDI_PROP_RESULT_OK) {
2333 			ddi_prop_free(strs);
2334 			switch (size) {
2335 			case DDI_PROP_RESULT_EOF:
2336 				return (DDI_PROP_END_OF_DATA);
2337 
2338 			case DDI_PROP_RESULT_ERROR:
2339 				return (DDI_PROP_CANNOT_DECODE);
2340 			}
2341 		}
2342 
2343 		*tmp = ptr;
2344 		ptr += size;
2345 	}
2346 
2347 	/*
2348 	 * String array is terminated by a NULL
2349 	 */
2350 	*tmp = NULL;
2351 
2352 	/*
2353 	 * Finally, we can decode each string
2354 	 */
2355 	ddi_prop_reset_pos(ph);
2356 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2357 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2358 		if (i < DDI_PROP_RESULT_OK) {
2359 			/*
2360 			 * Free the space we just allocated
2361 			 * and return an error
2362 			 */
2363 			ddi_prop_free(strs);
2364 			switch (i) {
2365 			case DDI_PROP_RESULT_EOF:
2366 				return (DDI_PROP_END_OF_DATA);
2367 
2368 			case DDI_PROP_RESULT_ERROR:
2369 				return (DDI_PROP_CANNOT_DECODE);
2370 			}
2371 		}
2372 	}
2373 
2374 	*(char ***)data = strs;
2375 	*nelements = cnt;
2376 
2377 	return (DDI_PROP_SUCCESS);
2378 }
2379 
2380 /*
2381  * Encode a string.
2382  */
2383 int
ddi_prop_fm_encode_string(prop_handle_t * ph,void * data,uint_t nelements)2384 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2385 {
2386 	char		**tmp;
2387 	int		size;
2388 	int		i;
2389 
2390 	/*
2391 	 * If there is no data, we cannot do anything
2392 	 */
2393 	if (nelements == 0)
2394 		return (DDI_PROP_CANNOT_ENCODE);
2395 
2396 	/*
2397 	 * Get the size of the encoded string.
2398 	 */
2399 	tmp = (char **)data;
2400 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2401 	if (size < DDI_PROP_RESULT_OK) {
2402 		switch (size) {
2403 		case DDI_PROP_RESULT_EOF:
2404 			return (DDI_PROP_END_OF_DATA);
2405 
2406 		case DDI_PROP_RESULT_ERROR:
2407 			return (DDI_PROP_CANNOT_ENCODE);
2408 		}
2409 	}
2410 
2411 	/*
2412 	 * Allocate space in the handle to store the encoded string.
2413 	 */
2414 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2415 		return (DDI_PROP_NO_MEMORY);
2416 
2417 	ddi_prop_reset_pos(ph);
2418 
2419 	/*
2420 	 * Encode the string.
2421 	 */
2422 	tmp = (char **)data;
2423 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2424 	if (i < DDI_PROP_RESULT_OK) {
2425 		switch (i) {
2426 		case DDI_PROP_RESULT_EOF:
2427 			return (DDI_PROP_END_OF_DATA);
2428 
2429 		case DDI_PROP_RESULT_ERROR:
2430 			return (DDI_PROP_CANNOT_ENCODE);
2431 		}
2432 	}
2433 
2434 	return (DDI_PROP_SUCCESS);
2435 }
2436 
2437 
2438 /*
2439  * Encode an array of strings.
2440  */
2441 int
ddi_prop_fm_encode_strings(prop_handle_t * ph,void * data,uint_t nelements)2442 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2443 {
2444 	int		cnt = 0;
2445 	char		**tmp;
2446 	int		size;
2447 	uint_t		total_size;
2448 	int		i;
2449 
2450 	/*
2451 	 * If there is no data, we cannot do anything
2452 	 */
2453 	if (nelements == 0)
2454 		return (DDI_PROP_CANNOT_ENCODE);
2455 
2456 	/*
2457 	 * Get the total size required to encode all the strings.
2458 	 */
2459 	total_size = 0;
2460 	tmp = (char **)data;
2461 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2462 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2463 		if (size < DDI_PROP_RESULT_OK) {
2464 			switch (size) {
2465 			case DDI_PROP_RESULT_EOF:
2466 				return (DDI_PROP_END_OF_DATA);
2467 
2468 			case DDI_PROP_RESULT_ERROR:
2469 				return (DDI_PROP_CANNOT_ENCODE);
2470 			}
2471 		}
2472 		total_size += (uint_t)size;
2473 	}
2474 
2475 	/*
2476 	 * Allocate space in the handle to store the encoded strings.
2477 	 */
2478 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2479 		return (DDI_PROP_NO_MEMORY);
2480 
2481 	ddi_prop_reset_pos(ph);
2482 
2483 	/*
2484 	 * Encode the array of strings.
2485 	 */
2486 	tmp = (char **)data;
2487 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2488 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2489 		if (i < DDI_PROP_RESULT_OK) {
2490 			switch (i) {
2491 			case DDI_PROP_RESULT_EOF:
2492 				return (DDI_PROP_END_OF_DATA);
2493 
2494 			case DDI_PROP_RESULT_ERROR:
2495 				return (DDI_PROP_CANNOT_ENCODE);
2496 			}
2497 		}
2498 	}
2499 
2500 	return (DDI_PROP_SUCCESS);
2501 }
2502 
2503 
2504 /*
2505  * Decode an array of bytes.
2506  */
2507 static int
ddi_prop_fm_decode_bytes(prop_handle_t * ph,void * data,uint_t * nelements)2508 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2509 {
2510 	uchar_t		*tmp;
2511 	int		nbytes;
2512 	int		i;
2513 
2514 	/*
2515 	 * If there are no elements return an error
2516 	 */
2517 	if (ph->ph_size == 0)
2518 		return (DDI_PROP_END_OF_DATA);
2519 
2520 	/*
2521 	 * Get the size of the encoded array of bytes.
2522 	 */
2523 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2524 	    data, ph->ph_size);
2525 	if (nbytes < DDI_PROP_RESULT_OK) {
2526 		switch (nbytes) {
2527 		case DDI_PROP_RESULT_EOF:
2528 			return (DDI_PROP_END_OF_DATA);
2529 
2530 		case DDI_PROP_RESULT_ERROR:
2531 			return (DDI_PROP_CANNOT_DECODE);
2532 		}
2533 	}
2534 
2535 	/*
2536 	 * Allocated memory to store the decoded value in.
2537 	 */
2538 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2539 
2540 	/*
2541 	 * Decode each element and place it in the space we just allocated
2542 	 */
2543 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2544 	if (i < DDI_PROP_RESULT_OK) {
2545 		/*
2546 		 * Free the space we just allocated
2547 		 * and return an error
2548 		 */
2549 		ddi_prop_free(tmp);
2550 		switch (i) {
2551 		case DDI_PROP_RESULT_EOF:
2552 			return (DDI_PROP_END_OF_DATA);
2553 
2554 		case DDI_PROP_RESULT_ERROR:
2555 			return (DDI_PROP_CANNOT_DECODE);
2556 		}
2557 	}
2558 
2559 	*(uchar_t **)data = tmp;
2560 	*nelements = nbytes;
2561 
2562 	return (DDI_PROP_SUCCESS);
2563 }
2564 
2565 /*
2566  * Encode an array of bytes.
2567  */
2568 int
ddi_prop_fm_encode_bytes(prop_handle_t * ph,void * data,uint_t nelements)2569 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2570 {
2571 	int		size;
2572 	int		i;
2573 
2574 	/*
2575 	 * If there are no elements, then this is a boolean property,
2576 	 * so just create a property handle with no data and return.
2577 	 */
2578 	if (nelements == 0) {
2579 		(void) ddi_prop_encode_alloc(ph, 0);
2580 		return (DDI_PROP_SUCCESS);
2581 	}
2582 
2583 	/*
2584 	 * Get the size of the encoded array of bytes.
2585 	 */
2586 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2587 	    nelements);
2588 	if (size < DDI_PROP_RESULT_OK) {
2589 		switch (size) {
2590 		case DDI_PROP_RESULT_EOF:
2591 			return (DDI_PROP_END_OF_DATA);
2592 
2593 		case DDI_PROP_RESULT_ERROR:
2594 			return (DDI_PROP_CANNOT_DECODE);
2595 		}
2596 	}
2597 
2598 	/*
2599 	 * Allocate space in the handle to store the encoded bytes.
2600 	 */
2601 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2602 		return (DDI_PROP_NO_MEMORY);
2603 
2604 	/*
2605 	 * Encode the array of bytes.
2606 	 */
2607 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2608 	    nelements);
2609 	if (i < DDI_PROP_RESULT_OK) {
2610 		switch (i) {
2611 		case DDI_PROP_RESULT_EOF:
2612 			return (DDI_PROP_END_OF_DATA);
2613 
2614 		case DDI_PROP_RESULT_ERROR:
2615 			return (DDI_PROP_CANNOT_ENCODE);
2616 		}
2617 	}
2618 
2619 	return (DDI_PROP_SUCCESS);
2620 }
2621 
2622 /*
2623  * OBP 1275 integer, string and byte operators.
2624  *
2625  * DDI_PROP_CMD_DECODE:
2626  *
2627  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2628  *	DDI_PROP_RESULT_EOF:		end of data
2629  *	DDI_PROP_OK:			data was decoded
2630  *
2631  * DDI_PROP_CMD_ENCODE:
2632  *
2633  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2634  *	DDI_PROP_RESULT_EOF:		end of data
2635  *	DDI_PROP_OK:			data was encoded
2636  *
2637  * DDI_PROP_CMD_SKIP:
2638  *
2639  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2640  *	DDI_PROP_RESULT_EOF:		end of data
2641  *	DDI_PROP_OK:			data was skipped
2642  *
2643  * DDI_PROP_CMD_GET_ESIZE:
2644  *
2645  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2646  *	DDI_PROP_RESULT_EOF:		end of data
2647  *	> 0:				the encoded size
2648  *
2649  * DDI_PROP_CMD_GET_DSIZE:
2650  *
2651  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2652  *	DDI_PROP_RESULT_EOF:		end of data
2653  *	> 0:				the decoded size
2654  */
2655 
2656 /*
2657  * OBP 1275 integer operator
2658  *
2659  * OBP properties are a byte stream of data, so integers may not be
2660  * properly aligned.  Therefore we need to copy them one byte at a time.
2661  */
2662 int
ddi_prop_1275_int(prop_handle_t * ph,uint_t cmd,int * data)2663 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2664 {
2665 	int	i;
2666 
2667 	switch (cmd) {
2668 	case DDI_PROP_CMD_DECODE:
2669 		/*
2670 		 * Check that there is encoded data
2671 		 */
2672 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2673 			return (DDI_PROP_RESULT_ERROR);
2674 		if (ph->ph_flags & PH_FROM_PROM) {
2675 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2676 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2677 			    ph->ph_size - i))
2678 				return (DDI_PROP_RESULT_ERROR);
2679 		} else {
2680 			if (ph->ph_size < sizeof (int) ||
2681 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2682 			    ph->ph_size - sizeof (int))))
2683 				return (DDI_PROP_RESULT_ERROR);
2684 		}
2685 
2686 		/*
2687 		 * Copy the integer, using the implementation-specific
2688 		 * copy function if the property is coming from the PROM.
2689 		 */
2690 		if (ph->ph_flags & PH_FROM_PROM) {
2691 			*data = impl_ddi_prop_int_from_prom(
2692 			    (uchar_t *)ph->ph_cur_pos,
2693 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
2694 			    ph->ph_size : PROP_1275_INT_SIZE);
2695 		} else {
2696 			bcopy(ph->ph_cur_pos, data, sizeof (int));
2697 		}
2698 
2699 		/*
2700 		 * Move the current location to the start of the next
2701 		 * bit of undecoded data.
2702 		 */
2703 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2704 		    PROP_1275_INT_SIZE;
2705 		return (DDI_PROP_RESULT_OK);
2706 
2707 	case DDI_PROP_CMD_ENCODE:
2708 		/*
2709 		 * Check that there is room to encoded the data
2710 		 */
2711 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2712 		    ph->ph_size < PROP_1275_INT_SIZE ||
2713 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2714 		    ph->ph_size - sizeof (int))))
2715 			return (DDI_PROP_RESULT_ERROR);
2716 
2717 		/*
2718 		 * Encode the integer into the byte stream one byte at a
2719 		 * time.
2720 		 */
2721 		bcopy(data, ph->ph_cur_pos, sizeof (int));
2722 
2723 		/*
2724 		 * Move the current location to the start of the next bit of
2725 		 * space where we can store encoded data.
2726 		 */
2727 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2728 		return (DDI_PROP_RESULT_OK);
2729 
2730 	case DDI_PROP_CMD_SKIP:
2731 		/*
2732 		 * Check that there is encoded data
2733 		 */
2734 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2735 		    ph->ph_size < PROP_1275_INT_SIZE)
2736 			return (DDI_PROP_RESULT_ERROR);
2737 
2738 
2739 		if ((caddr_t)ph->ph_cur_pos ==
2740 		    (caddr_t)ph->ph_data + ph->ph_size) {
2741 			return (DDI_PROP_RESULT_EOF);
2742 		} else if ((caddr_t)ph->ph_cur_pos >
2743 		    (caddr_t)ph->ph_data + ph->ph_size) {
2744 			return (DDI_PROP_RESULT_EOF);
2745 		}
2746 
2747 		/*
2748 		 * Move the current location to the start of the next bit of
2749 		 * undecoded data.
2750 		 */
2751 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2752 		return (DDI_PROP_RESULT_OK);
2753 
2754 	case DDI_PROP_CMD_GET_ESIZE:
2755 		/*
2756 		 * Return the size of an encoded integer on OBP
2757 		 */
2758 		return (PROP_1275_INT_SIZE);
2759 
2760 	case DDI_PROP_CMD_GET_DSIZE:
2761 		/*
2762 		 * Return the size of a decoded integer on the system.
2763 		 */
2764 		return (sizeof (int));
2765 
2766 	default:
2767 #ifdef DEBUG
2768 		panic("ddi_prop_1275_int: %x impossible", cmd);
2769 		/*NOTREACHED*/
2770 #else
2771 		return (DDI_PROP_RESULT_ERROR);
2772 #endif	/* DEBUG */
2773 	}
2774 }
2775 
2776 /*
2777  * 64 bit integer operator.
2778  *
2779  * This is an extension, defined by Sun, to the 1275 integer
2780  * operator.  This routine handles the encoding/decoding of
2781  * 64 bit integer properties.
2782  */
2783 int
ddi_prop_int64_op(prop_handle_t * ph,uint_t cmd,int64_t * data)2784 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2785 {
2786 
2787 	switch (cmd) {
2788 	case DDI_PROP_CMD_DECODE:
2789 		/*
2790 		 * Check that there is encoded data
2791 		 */
2792 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2793 			return (DDI_PROP_RESULT_ERROR);
2794 		if (ph->ph_flags & PH_FROM_PROM) {
2795 			return (DDI_PROP_RESULT_ERROR);
2796 		} else {
2797 			if (ph->ph_size < sizeof (int64_t) ||
2798 			    ((int64_t *)ph->ph_cur_pos >
2799 			    ((int64_t *)ph->ph_data +
2800 			    ph->ph_size - sizeof (int64_t))))
2801 				return (DDI_PROP_RESULT_ERROR);
2802 		}
2803 		/*
2804 		 * Copy the integer, using the implementation-specific
2805 		 * copy function if the property is coming from the PROM.
2806 		 */
2807 		if (ph->ph_flags & PH_FROM_PROM) {
2808 			return (DDI_PROP_RESULT_ERROR);
2809 		} else {
2810 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2811 		}
2812 
2813 		/*
2814 		 * Move the current location to the start of the next
2815 		 * bit of undecoded data.
2816 		 */
2817 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2818 		    sizeof (int64_t);
2819 		return (DDI_PROP_RESULT_OK);
2820 
2821 	case DDI_PROP_CMD_ENCODE:
2822 		/*
2823 		 * Check that there is room to encoded the data
2824 		 */
2825 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2826 		    ph->ph_size < sizeof (int64_t) ||
2827 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2828 		    ph->ph_size - sizeof (int64_t))))
2829 			return (DDI_PROP_RESULT_ERROR);
2830 
2831 		/*
2832 		 * Encode the integer into the byte stream one byte at a
2833 		 * time.
2834 		 */
2835 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2836 
2837 		/*
2838 		 * Move the current location to the start of the next bit of
2839 		 * space where we can store encoded data.
2840 		 */
2841 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2842 		    sizeof (int64_t);
2843 		return (DDI_PROP_RESULT_OK);
2844 
2845 	case DDI_PROP_CMD_SKIP:
2846 		/*
2847 		 * Check that there is encoded data
2848 		 */
2849 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2850 		    ph->ph_size < sizeof (int64_t))
2851 			return (DDI_PROP_RESULT_ERROR);
2852 
2853 		if ((caddr_t)ph->ph_cur_pos ==
2854 		    (caddr_t)ph->ph_data + ph->ph_size) {
2855 			return (DDI_PROP_RESULT_EOF);
2856 		} else if ((caddr_t)ph->ph_cur_pos >
2857 		    (caddr_t)ph->ph_data + ph->ph_size) {
2858 			return (DDI_PROP_RESULT_EOF);
2859 		}
2860 
2861 		/*
2862 		 * Move the current location to the start of
2863 		 * the next bit of undecoded data.
2864 		 */
2865 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2866 		    sizeof (int64_t);
2867 		return (DDI_PROP_RESULT_OK);
2868 
2869 	case DDI_PROP_CMD_GET_ESIZE:
2870 		/*
2871 		 * Return the size of an encoded integer on OBP
2872 		 */
2873 		return (sizeof (int64_t));
2874 
2875 	case DDI_PROP_CMD_GET_DSIZE:
2876 		/*
2877 		 * Return the size of a decoded integer on the system.
2878 		 */
2879 		return (sizeof (int64_t));
2880 
2881 	default:
2882 #ifdef DEBUG
2883 		panic("ddi_prop_int64_op: %x impossible", cmd);
2884 		/*NOTREACHED*/
2885 #else
2886 		return (DDI_PROP_RESULT_ERROR);
2887 #endif  /* DEBUG */
2888 	}
2889 }
2890 
2891 /*
2892  * OBP 1275 string operator.
2893  *
2894  * OBP strings are NULL terminated.
2895  */
2896 int
ddi_prop_1275_string(prop_handle_t * ph,uint_t cmd,char * data)2897 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2898 {
2899 	int	n;
2900 	char	*p;
2901 	char	*end;
2902 
2903 	switch (cmd) {
2904 	case DDI_PROP_CMD_DECODE:
2905 		/*
2906 		 * Check that there is encoded data
2907 		 */
2908 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2909 			return (DDI_PROP_RESULT_ERROR);
2910 		}
2911 
2912 		/*
2913 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2914 		 * how to NULL terminate result.
2915 		 */
2916 		p = (char *)ph->ph_cur_pos;
2917 		end = (char *)ph->ph_data + ph->ph_size;
2918 		if (p >= end)
2919 			return (DDI_PROP_RESULT_EOF);
2920 
2921 		while (p < end) {
2922 			*data++ = *p;
2923 			if (*p++ == 0) {	/* NULL from OBP */
2924 				ph->ph_cur_pos = p;
2925 				return (DDI_PROP_RESULT_OK);
2926 			}
2927 		}
2928 
2929 		/*
2930 		 * If OBP did not NULL terminate string, which happens
2931 		 * (at least) for 'true'/'false' boolean values, account for
2932 		 * the space and store null termination on decode.
2933 		 */
2934 		ph->ph_cur_pos = p;
2935 		*data = 0;
2936 		return (DDI_PROP_RESULT_OK);
2937 
2938 	case DDI_PROP_CMD_ENCODE:
2939 		/*
2940 		 * Check that there is room to encoded the data
2941 		 */
2942 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2943 			return (DDI_PROP_RESULT_ERROR);
2944 		}
2945 
2946 		n = strlen(data) + 1;
2947 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
2948 		    ph->ph_size - n)) {
2949 			return (DDI_PROP_RESULT_ERROR);
2950 		}
2951 
2952 		/*
2953 		 * Copy the NULL terminated string
2954 		 */
2955 		bcopy(data, ph->ph_cur_pos, n);
2956 
2957 		/*
2958 		 * Move the current location to the start of the next bit of
2959 		 * space where we can store encoded data.
2960 		 */
2961 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
2962 		return (DDI_PROP_RESULT_OK);
2963 
2964 	case DDI_PROP_CMD_SKIP:
2965 		/*
2966 		 * Check that there is encoded data
2967 		 */
2968 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2969 			return (DDI_PROP_RESULT_ERROR);
2970 		}
2971 
2972 		/*
2973 		 * Return the string length plus one for the NULL
2974 		 * We know the size of the property, we need to
2975 		 * ensure that the string is properly formatted,
2976 		 * since we may be looking up random OBP data.
2977 		 */
2978 		p = (char *)ph->ph_cur_pos;
2979 		end = (char *)ph->ph_data + ph->ph_size;
2980 		if (p >= end)
2981 			return (DDI_PROP_RESULT_EOF);
2982 
2983 		while (p < end) {
2984 			if (*p++ == 0) {	/* NULL from OBP */
2985 				ph->ph_cur_pos = p;
2986 				return (DDI_PROP_RESULT_OK);
2987 			}
2988 		}
2989 
2990 		/*
2991 		 * Accommodate the fact that OBP does not always NULL
2992 		 * terminate strings.
2993 		 */
2994 		ph->ph_cur_pos = p;
2995 		return (DDI_PROP_RESULT_OK);
2996 
2997 	case DDI_PROP_CMD_GET_ESIZE:
2998 		/*
2999 		 * Return the size of the encoded string on OBP.
3000 		 */
3001 		return (strlen(data) + 1);
3002 
3003 	case DDI_PROP_CMD_GET_DSIZE:
3004 		/*
3005 		 * Return the string length plus one for the NULL.
3006 		 * We know the size of the property, we need to
3007 		 * ensure that the string is properly formatted,
3008 		 * since we may be looking up random OBP data.
3009 		 */
3010 		p = (char *)ph->ph_cur_pos;
3011 		end = (char *)ph->ph_data + ph->ph_size;
3012 		if (p >= end)
3013 			return (DDI_PROP_RESULT_EOF);
3014 
3015 		for (n = 0; p < end; n++) {
3016 			if (*p++ == 0) {	/* NULL from OBP */
3017 				ph->ph_cur_pos = p;
3018 				return (n + 1);
3019 			}
3020 		}
3021 
3022 		/*
3023 		 * If OBP did not NULL terminate string, which happens for
3024 		 * 'true'/'false' boolean values, account for the space
3025 		 * to store null termination here.
3026 		 */
3027 		ph->ph_cur_pos = p;
3028 		return (n + 1);
3029 
3030 	default:
3031 #ifdef DEBUG
3032 		panic("ddi_prop_1275_string: %x impossible", cmd);
3033 		/*NOTREACHED*/
3034 #else
3035 		return (DDI_PROP_RESULT_ERROR);
3036 #endif	/* DEBUG */
3037 	}
3038 }
3039 
3040 /*
3041  * OBP 1275 byte operator
3042  *
3043  * Caller must specify the number of bytes to get.  OBP encodes bytes
3044  * as a byte so there is a 1-to-1 translation.
3045  */
3046 int
ddi_prop_1275_bytes(prop_handle_t * ph,uint_t cmd,uchar_t * data,uint_t nelements)3047 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3048     uint_t nelements)
3049 {
3050 	switch (cmd) {
3051 	case DDI_PROP_CMD_DECODE:
3052 		/*
3053 		 * Check that there is encoded data
3054 		 */
3055 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3056 		    ph->ph_size < nelements ||
3057 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3058 		    ph->ph_size - nelements)))
3059 			return (DDI_PROP_RESULT_ERROR);
3060 
3061 		/*
3062 		 * Copy out the bytes
3063 		 */
3064 		bcopy(ph->ph_cur_pos, data, nelements);
3065 
3066 		/*
3067 		 * Move the current location
3068 		 */
3069 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3070 		return (DDI_PROP_RESULT_OK);
3071 
3072 	case DDI_PROP_CMD_ENCODE:
3073 		/*
3074 		 * Check that there is room to encode the data
3075 		 */
3076 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3077 		    ph->ph_size < nelements ||
3078 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3079 		    ph->ph_size - nelements)))
3080 			return (DDI_PROP_RESULT_ERROR);
3081 
3082 		/*
3083 		 * Copy in the bytes
3084 		 */
3085 		bcopy(data, ph->ph_cur_pos, nelements);
3086 
3087 		/*
3088 		 * Move the current location to the start of the next bit of
3089 		 * space where we can store encoded data.
3090 		 */
3091 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3092 		return (DDI_PROP_RESULT_OK);
3093 
3094 	case DDI_PROP_CMD_SKIP:
3095 		/*
3096 		 * Check that there is encoded data
3097 		 */
3098 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3099 		    ph->ph_size < nelements)
3100 			return (DDI_PROP_RESULT_ERROR);
3101 
3102 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3103 		    ph->ph_size - nelements))
3104 			return (DDI_PROP_RESULT_EOF);
3105 
3106 		/*
3107 		 * Move the current location
3108 		 */
3109 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3110 		return (DDI_PROP_RESULT_OK);
3111 
3112 	case DDI_PROP_CMD_GET_ESIZE:
3113 		/*
3114 		 * The size in bytes of the encoded size is the
3115 		 * same as the decoded size provided by the caller.
3116 		 */
3117 		return (nelements);
3118 
3119 	case DDI_PROP_CMD_GET_DSIZE:
3120 		/*
3121 		 * Just return the number of bytes specified by the caller.
3122 		 */
3123 		return (nelements);
3124 
3125 	default:
3126 #ifdef DEBUG
3127 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3128 		/*NOTREACHED*/
3129 #else
3130 		return (DDI_PROP_RESULT_ERROR);
3131 #endif	/* DEBUG */
3132 	}
3133 }
3134 
3135 /*
3136  * Used for properties that come from the OBP, hardware configuration files,
3137  * or that are created by calls to ddi_prop_update(9F).
3138  */
3139 static struct prop_handle_ops prop_1275_ops = {
3140 	ddi_prop_1275_int,
3141 	ddi_prop_1275_string,
3142 	ddi_prop_1275_bytes,
3143 	ddi_prop_int64_op
3144 };
3145 
3146 
3147 /*
3148  * Interface to create/modify a managed property on child's behalf...
3149  * Flags interpreted are:
3150  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3151  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3152  *
3153  * Use same dev_t when modifying or undefining a property.
3154  * Search for properties with DDI_DEV_T_ANY to match first named
3155  * property on the list.
3156  *
3157  * Properties are stored LIFO and subsequently will match the first
3158  * `matching' instance.
3159  */
3160 
3161 /*
3162  * ddi_prop_add:	Add a software defined property
3163  */
3164 
3165 /*
3166  * define to get a new ddi_prop_t.
3167  * km_flags are KM_SLEEP or KM_NOSLEEP.
3168  */
3169 
3170 #define	DDI_NEW_PROP_T(km_flags)	\
3171 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3172 
3173 static int
ddi_prop_add(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t value,int length)3174 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3175     char *name, caddr_t value, int length)
3176 {
3177 	ddi_prop_t	*new_propp, *propp;
3178 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3179 	int		km_flags = KM_NOSLEEP;
3180 	int		name_buf_len;
3181 
3182 	/*
3183 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3184 	 */
3185 
3186 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3187 		return (DDI_PROP_INVAL_ARG);
3188 
3189 	if (flags & DDI_PROP_CANSLEEP)
3190 		km_flags = KM_SLEEP;
3191 
3192 	if (flags & DDI_PROP_SYSTEM_DEF)
3193 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3194 	else if (flags & DDI_PROP_HW_DEF)
3195 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3196 
3197 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3198 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3199 		return (DDI_PROP_NO_MEMORY);
3200 	}
3201 
3202 	/*
3203 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3204 	 * to get the real major number for the device.  This needs to be
3205 	 * done because some drivers need to call ddi_prop_create in their
3206 	 * attach routines but they don't have a dev.  By creating the dev
3207 	 * ourself if the major number is 0, drivers will not have to know what
3208 	 * their major number.	They can just create a dev with major number
3209 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3210 	 * work by recreating the same dev that we already have, but its the
3211 	 * price you pay :-).
3212 	 *
3213 	 * This fixes bug #1098060.
3214 	 */
3215 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3216 		new_propp->prop_dev =
3217 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3218 		    getminor(dev));
3219 	} else
3220 		new_propp->prop_dev = dev;
3221 
3222 	/*
3223 	 * Allocate space for property name and copy it in...
3224 	 */
3225 
3226 	name_buf_len = strlen(name) + 1;
3227 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3228 	if (new_propp->prop_name == 0)	{
3229 		kmem_free(new_propp, sizeof (ddi_prop_t));
3230 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3231 		return (DDI_PROP_NO_MEMORY);
3232 	}
3233 	bcopy(name, new_propp->prop_name, name_buf_len);
3234 
3235 	/*
3236 	 * Set the property type
3237 	 */
3238 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3239 
3240 	/*
3241 	 * Set length and value ONLY if not an explicit property undefine:
3242 	 * NOTE: value and length are zero for explicit undefines.
3243 	 */
3244 
3245 	if (flags & DDI_PROP_UNDEF_IT) {
3246 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3247 	} else {
3248 		if ((new_propp->prop_len = length) != 0) {
3249 			new_propp->prop_val = kmem_alloc(length, km_flags);
3250 			if (new_propp->prop_val == 0)  {
3251 				kmem_free(new_propp->prop_name, name_buf_len);
3252 				kmem_free(new_propp, sizeof (ddi_prop_t));
3253 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3254 				return (DDI_PROP_NO_MEMORY);
3255 			}
3256 			bcopy(value, new_propp->prop_val, length);
3257 		}
3258 	}
3259 
3260 	/*
3261 	 * Link property into beginning of list. (Properties are LIFO order.)
3262 	 */
3263 
3264 	mutex_enter(&(DEVI(dip)->devi_lock));
3265 	propp = *list_head;
3266 	new_propp->prop_next = propp;
3267 	*list_head = new_propp;
3268 	mutex_exit(&(DEVI(dip)->devi_lock));
3269 	return (DDI_PROP_SUCCESS);
3270 }
3271 
3272 
3273 /*
3274  * ddi_prop_change:	Modify a software managed property value
3275  *
3276  *			Set new length and value if found.
3277  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3278  *			input name is the NULL string.
3279  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3280  *
3281  *			Note: an undef can be modified to be a define,
3282  *			(you can't go the other way.)
3283  */
3284 
3285 static int
ddi_prop_change(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t value,int length)3286 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3287     char *name, caddr_t value, int length)
3288 {
3289 	ddi_prop_t	*propp;
3290 	ddi_prop_t	**ppropp;
3291 	caddr_t		p = NULL;
3292 
3293 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3294 		return (DDI_PROP_INVAL_ARG);
3295 
3296 	/*
3297 	 * Preallocate buffer, even if we don't need it...
3298 	 */
3299 	if (length != 0)  {
3300 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3301 		    KM_SLEEP : KM_NOSLEEP);
3302 		if (p == NULL)	{
3303 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3304 			return (DDI_PROP_NO_MEMORY);
3305 		}
3306 	}
3307 
3308 	/*
3309 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3310 	 * number, a real dev_t value should be created based upon the dip's
3311 	 * binding driver.  See ddi_prop_add...
3312 	 */
3313 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3314 		dev = makedevice(
3315 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3316 		    getminor(dev));
3317 
3318 	/*
3319 	 * Check to see if the property exists.  If so we modify it.
3320 	 * Else we create it by calling ddi_prop_add().
3321 	 */
3322 	mutex_enter(&(DEVI(dip)->devi_lock));
3323 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3324 	if (flags & DDI_PROP_SYSTEM_DEF)
3325 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3326 	else if (flags & DDI_PROP_HW_DEF)
3327 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3328 
3329 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3330 		/*
3331 		 * Need to reallocate buffer?  If so, do it
3332 		 * carefully (reuse same space if new prop
3333 		 * is same size and non-NULL sized).
3334 		 */
3335 		if (length != 0)
3336 			bcopy(value, p, length);
3337 
3338 		if (propp->prop_len != 0)
3339 			kmem_free(propp->prop_val, propp->prop_len);
3340 
3341 		propp->prop_len = length;
3342 		propp->prop_val = p;
3343 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3344 		mutex_exit(&(DEVI(dip)->devi_lock));
3345 		return (DDI_PROP_SUCCESS);
3346 	}
3347 
3348 	mutex_exit(&(DEVI(dip)->devi_lock));
3349 	if (length != 0)
3350 		kmem_free(p, length);
3351 
3352 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3353 }
3354 
3355 /*
3356  * Common update routine used to update and encode a property.	Creates
3357  * a property handle, calls the property encode routine, figures out if
3358  * the property already exists and updates if it does.	Otherwise it
3359  * creates if it does not exist.
3360  */
3361 int
ddi_prop_update_common(dev_t match_dev,dev_info_t * dip,int flags,char * name,void * data,uint_t nelements,int (* prop_create)(prop_handle_t *,void * data,uint_t nelements))3362 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3363     char *name, void *data, uint_t nelements,
3364     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3365 {
3366 	prop_handle_t	ph;
3367 	int		rval;
3368 	uint_t		ourflags;
3369 
3370 	/*
3371 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3372 	 * return error.
3373 	 */
3374 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3375 		return (DDI_PROP_INVAL_ARG);
3376 
3377 	/*
3378 	 * Create the handle
3379 	 */
3380 	ph.ph_data = NULL;
3381 	ph.ph_cur_pos = NULL;
3382 	ph.ph_save_pos = NULL;
3383 	ph.ph_size = 0;
3384 	ph.ph_ops = &prop_1275_ops;
3385 
3386 	/*
3387 	 * ourflags:
3388 	 * For compatibility with the old interfaces.  The old interfaces
3389 	 * didn't sleep by default and slept when the flag was set.  These
3390 	 * interfaces to the opposite.	So the old interfaces now set the
3391 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3392 	 *
3393 	 * ph.ph_flags:
3394 	 * Blocked data or unblocked data allocation
3395 	 * for ph.ph_data in ddi_prop_encode_alloc()
3396 	 */
3397 	if (flags & DDI_PROP_DONTSLEEP) {
3398 		ourflags = flags;
3399 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3400 	} else {
3401 		ourflags = flags | DDI_PROP_CANSLEEP;
3402 		ph.ph_flags = DDI_PROP_CANSLEEP;
3403 	}
3404 
3405 	/*
3406 	 * Encode the data and store it in the property handle by
3407 	 * calling the prop_encode routine.
3408 	 */
3409 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3410 	    DDI_PROP_SUCCESS) {
3411 		if (rval == DDI_PROP_NO_MEMORY)
3412 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3413 		if (ph.ph_size != 0)
3414 			kmem_free(ph.ph_data, ph.ph_size);
3415 		return (rval);
3416 	}
3417 
3418 	/*
3419 	 * The old interfaces use a stacking approach to creating
3420 	 * properties.	If we are being called from the old interfaces,
3421 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3422 	 * create without checking.
3423 	 */
3424 	if (flags & DDI_PROP_STACK_CREATE) {
3425 		rval = ddi_prop_add(match_dev, dip,
3426 		    ourflags, name, ph.ph_data, ph.ph_size);
3427 	} else {
3428 		rval = ddi_prop_change(match_dev, dip,
3429 		    ourflags, name, ph.ph_data, ph.ph_size);
3430 	}
3431 
3432 	/*
3433 	 * Free the encoded data allocated in the prop_encode routine.
3434 	 */
3435 	if (ph.ph_size != 0)
3436 		kmem_free(ph.ph_data, ph.ph_size);
3437 
3438 	return (rval);
3439 }
3440 
3441 
3442 /*
3443  * ddi_prop_create:	Define a managed property:
3444  *			See above for details.
3445  */
3446 
3447 int
ddi_prop_create(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3448 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3449     char *name, caddr_t value, int length)
3450 {
3451 	if (!(flag & DDI_PROP_CANSLEEP)) {
3452 		flag |= DDI_PROP_DONTSLEEP;
3453 #ifdef DDI_PROP_DEBUG
3454 		if (length != 0)
3455 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3456 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3457 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3458 #endif /* DDI_PROP_DEBUG */
3459 	}
3460 	flag &= ~DDI_PROP_SYSTEM_DEF;
3461 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3462 	return (ddi_prop_update_common(dev, dip, flag, name,
3463 	    value, length, ddi_prop_fm_encode_bytes));
3464 }
3465 
3466 int
e_ddi_prop_create(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3467 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3468     char *name, caddr_t value, int length)
3469 {
3470 	if (!(flag & DDI_PROP_CANSLEEP))
3471 		flag |= DDI_PROP_DONTSLEEP;
3472 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3473 	return (ddi_prop_update_common(dev, dip, flag,
3474 	    name, value, length, ddi_prop_fm_encode_bytes));
3475 }
3476 
3477 int
ddi_prop_modify(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3478 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3479     char *name, caddr_t value, int length)
3480 {
3481 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3482 
3483 	/*
3484 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3485 	 * return error.
3486 	 */
3487 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3488 		return (DDI_PROP_INVAL_ARG);
3489 
3490 	if (!(flag & DDI_PROP_CANSLEEP))
3491 		flag |= DDI_PROP_DONTSLEEP;
3492 	flag &= ~DDI_PROP_SYSTEM_DEF;
3493 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3494 		return (DDI_PROP_NOT_FOUND);
3495 
3496 	return (ddi_prop_update_common(dev, dip,
3497 	    (flag | DDI_PROP_TYPE_BYTE), name,
3498 	    value, length, ddi_prop_fm_encode_bytes));
3499 }
3500 
3501 int
e_ddi_prop_modify(dev_t dev,dev_info_t * dip,int flag,char * name,caddr_t value,int length)3502 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3503     char *name, caddr_t value, int length)
3504 {
3505 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3506 
3507 	/*
3508 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3509 	 * return error.
3510 	 */
3511 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3512 		return (DDI_PROP_INVAL_ARG);
3513 
3514 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3515 		return (DDI_PROP_NOT_FOUND);
3516 
3517 	if (!(flag & DDI_PROP_CANSLEEP))
3518 		flag |= DDI_PROP_DONTSLEEP;
3519 	return (ddi_prop_update_common(dev, dip,
3520 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3521 	    name, value, length, ddi_prop_fm_encode_bytes));
3522 }
3523 
3524 
3525 /*
3526  * Common lookup routine used to lookup and decode a property.
3527  * Creates a property handle, searches for the raw encoded data,
3528  * fills in the handle, and calls the property decode functions
3529  * passed in.
3530  *
3531  * This routine is not static because ddi_bus_prop_op() which lives in
3532  * ddi_impl.c calls it.  No driver should be calling this routine.
3533  */
3534 int
ddi_prop_lookup_common(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,void * data,uint_t * nelements,int (* prop_decoder)(prop_handle_t *,void * data,uint_t * nelements))3535 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3536     uint_t flags, char *name, void *data, uint_t *nelements,
3537     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3538 {
3539 	int		rval;
3540 	uint_t		ourflags;
3541 	prop_handle_t	ph;
3542 
3543 	if ((match_dev == DDI_DEV_T_NONE) ||
3544 	    (name == NULL) || (strlen(name) == 0))
3545 		return (DDI_PROP_INVAL_ARG);
3546 
3547 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3548 	    flags | DDI_PROP_CANSLEEP;
3549 
3550 	/*
3551 	 * Get the encoded data
3552 	 */
3553 	bzero(&ph, sizeof (prop_handle_t));
3554 
3555 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3556 		/*
3557 		 * For rootnex and unbound dlpi style-2 devices, index into
3558 		 * the devnames' array and search the global
3559 		 * property list.
3560 		 */
3561 		ourflags &= ~DDI_UNBND_DLPI2;
3562 		rval = i_ddi_prop_search_global(match_dev,
3563 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3564 	} else {
3565 		rval = ddi_prop_search_common(match_dev, dip,
3566 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3567 		    &ph.ph_data, &ph.ph_size);
3568 
3569 	}
3570 
3571 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3572 		ASSERT(ph.ph_data == NULL);
3573 		ASSERT(ph.ph_size == 0);
3574 		return (rval);
3575 	}
3576 
3577 	/*
3578 	 * If the encoded data came from a OBP or software
3579 	 * use the 1275 OBP decode/encode routines.
3580 	 */
3581 	ph.ph_cur_pos = ph.ph_data;
3582 	ph.ph_save_pos = ph.ph_data;
3583 	ph.ph_ops = &prop_1275_ops;
3584 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3585 
3586 	rval = (*prop_decoder)(&ph, data, nelements);
3587 
3588 	/*
3589 	 * Free the encoded data
3590 	 */
3591 	if (ph.ph_size != 0)
3592 		kmem_free(ph.ph_data, ph.ph_size);
3593 
3594 	return (rval);
3595 }
3596 
3597 /*
3598  * Lookup and return an array of composite properties.  The driver must
3599  * provide the decode routine.
3600  */
3601 int
ddi_prop_lookup(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,void * data,uint_t * nelements,int (* prop_decoder)(prop_handle_t *,void * data,uint_t * nelements))3602 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3603     uint_t flags, char *name, void *data, uint_t *nelements,
3604     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3605 {
3606 	return (ddi_prop_lookup_common(match_dev, dip,
3607 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3608 	    data, nelements, prop_decoder));
3609 }
3610 
3611 /*
3612  * Return 1 if a property exists (no type checking done).
3613  * Return 0 if it does not exist.
3614  */
3615 int
ddi_prop_exists(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name)3616 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3617 {
3618 	int	i;
3619 	uint_t	x = 0;
3620 
3621 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3622 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3623 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3624 }
3625 
3626 
3627 /*
3628  * Update an array of composite properties.  The driver must
3629  * provide the encode routine.
3630  */
3631 int
ddi_prop_update(dev_t match_dev,dev_info_t * dip,char * name,void * data,uint_t nelements,int (* prop_create)(prop_handle_t *,void * data,uint_t nelements))3632 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3633     char *name, void *data, uint_t nelements,
3634     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3635 {
3636 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3637 	    name, data, nelements, prop_create));
3638 }
3639 
3640 /*
3641  * Get a single integer or boolean property and return it.
3642  * If the property does not exists, or cannot be decoded,
3643  * then return the defvalue passed in.
3644  *
3645  * This routine always succeeds.
3646  */
3647 int
ddi_prop_get_int(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int defvalue)3648 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3649     char *name, int defvalue)
3650 {
3651 	int	data;
3652 	uint_t	nelements;
3653 	int	rval;
3654 
3655 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3656 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3657 #ifdef DEBUG
3658 		if (dip != NULL) {
3659 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3660 			    " 0x%x (prop = %s, node = %s%d)", flags,
3661 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3662 		}
3663 #endif /* DEBUG */
3664 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3665 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3666 	}
3667 
3668 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3669 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3670 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3671 		if (rval == DDI_PROP_END_OF_DATA)
3672 			data = 1;
3673 		else
3674 			data = defvalue;
3675 	}
3676 	return (data);
3677 }
3678 
3679 /*
3680  * Get a single 64 bit integer or boolean property and return it.
3681  * If the property does not exists, or cannot be decoded,
3682  * then return the defvalue passed in.
3683  *
3684  * This routine always succeeds.
3685  */
3686 int64_t
ddi_prop_get_int64(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int64_t defvalue)3687 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3688     char *name, int64_t defvalue)
3689 {
3690 	int64_t	data;
3691 	uint_t	nelements;
3692 	int	rval;
3693 
3694 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3695 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3696 #ifdef DEBUG
3697 		if (dip != NULL) {
3698 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3699 			    " 0x%x (prop = %s, node = %s%d)", flags,
3700 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3701 		}
3702 #endif /* DEBUG */
3703 		return (DDI_PROP_INVAL_ARG);
3704 	}
3705 
3706 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3707 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3708 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
3709 	    != DDI_PROP_SUCCESS) {
3710 		if (rval == DDI_PROP_END_OF_DATA)
3711 			data = 1;
3712 		else
3713 			data = defvalue;
3714 	}
3715 	return (data);
3716 }
3717 
3718 /*
3719  * Get an array of integer property
3720  */
3721 int
ddi_prop_lookup_int_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int ** data,uint_t * nelements)3722 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3723     char *name, int **data, uint_t *nelements)
3724 {
3725 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3726 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3727 #ifdef DEBUG
3728 		if (dip != NULL) {
3729 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3730 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3731 			    flags, name, ddi_driver_name(dip),
3732 			    ddi_get_instance(dip));
3733 		}
3734 #endif /* DEBUG */
3735 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3736 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3737 	}
3738 
3739 	return (ddi_prop_lookup_common(match_dev, dip,
3740 	    (flags | DDI_PROP_TYPE_INT), name, data,
3741 	    nelements, ddi_prop_fm_decode_ints));
3742 }
3743 
3744 /*
3745  * Get an array of 64 bit integer properties
3746  */
3747 int
ddi_prop_lookup_int64_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,int64_t ** data,uint_t * nelements)3748 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3749     char *name, int64_t **data, uint_t *nelements)
3750 {
3751 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3752 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3753 #ifdef DEBUG
3754 		if (dip != NULL) {
3755 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3756 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3757 			    flags, name, ddi_driver_name(dip),
3758 			    ddi_get_instance(dip));
3759 		}
3760 #endif /* DEBUG */
3761 		return (DDI_PROP_INVAL_ARG);
3762 	}
3763 
3764 	return (ddi_prop_lookup_common(match_dev, dip,
3765 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3766 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
3767 }
3768 
3769 /*
3770  * Update a single integer property.  If the property exists on the drivers
3771  * property list it updates, else it creates it.
3772  */
3773 int
ddi_prop_update_int(dev_t match_dev,dev_info_t * dip,char * name,int data)3774 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3775     char *name, int data)
3776 {
3777 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3778 	    name, &data, 1, ddi_prop_fm_encode_ints));
3779 }
3780 
3781 /*
3782  * Update a single 64 bit integer property.
3783  * Update the driver property list if it exists, else create it.
3784  */
3785 int
ddi_prop_update_int64(dev_t match_dev,dev_info_t * dip,char * name,int64_t data)3786 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3787     char *name, int64_t data)
3788 {
3789 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3790 	    name, &data, 1, ddi_prop_fm_encode_int64));
3791 }
3792 
3793 int
e_ddi_prop_update_int(dev_t match_dev,dev_info_t * dip,char * name,int data)3794 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3795     char *name, int data)
3796 {
3797 	return (ddi_prop_update_common(match_dev, dip,
3798 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3799 	    name, &data, 1, ddi_prop_fm_encode_ints));
3800 }
3801 
3802 int
e_ddi_prop_update_int64(dev_t match_dev,dev_info_t * dip,char * name,int64_t data)3803 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3804     char *name, int64_t data)
3805 {
3806 	return (ddi_prop_update_common(match_dev, dip,
3807 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3808 	    name, &data, 1, ddi_prop_fm_encode_int64));
3809 }
3810 
3811 /*
3812  * Update an array of integer property.  If the property exists on the drivers
3813  * property list it updates, else it creates it.
3814  */
3815 int
ddi_prop_update_int_array(dev_t match_dev,dev_info_t * dip,char * name,int * data,uint_t nelements)3816 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3817     char *name, int *data, uint_t nelements)
3818 {
3819 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3820 	    name, data, nelements, ddi_prop_fm_encode_ints));
3821 }
3822 
3823 /*
3824  * Update an array of 64 bit integer properties.
3825  * Update the driver property list if it exists, else create it.
3826  */
3827 int
ddi_prop_update_int64_array(dev_t match_dev,dev_info_t * dip,char * name,int64_t * data,uint_t nelements)3828 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3829     char *name, int64_t *data, uint_t nelements)
3830 {
3831 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3832 	    name, data, nelements, ddi_prop_fm_encode_int64));
3833 }
3834 
3835 int
e_ddi_prop_update_int64_array(dev_t match_dev,dev_info_t * dip,char * name,int64_t * data,uint_t nelements)3836 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3837     char *name, int64_t *data, uint_t nelements)
3838 {
3839 	return (ddi_prop_update_common(match_dev, dip,
3840 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3841 	    name, data, nelements, ddi_prop_fm_encode_int64));
3842 }
3843 
3844 int
e_ddi_prop_update_int_array(dev_t match_dev,dev_info_t * dip,char * name,int * data,uint_t nelements)3845 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3846     char *name, int *data, uint_t nelements)
3847 {
3848 	return (ddi_prop_update_common(match_dev, dip,
3849 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3850 	    name, data, nelements, ddi_prop_fm_encode_ints));
3851 }
3852 
3853 /*
3854  * Get a single string property.
3855  */
3856 int
ddi_prop_lookup_string(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,char ** data)3857 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3858     char *name, char **data)
3859 {
3860 	uint_t x;
3861 
3862 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3863 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3864 #ifdef DEBUG
3865 		if (dip != NULL) {
3866 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3867 			    "(prop = %s, node = %s%d); invalid bits ignored",
3868 			    "ddi_prop_lookup_string", flags, name,
3869 			    ddi_driver_name(dip), ddi_get_instance(dip));
3870 		}
3871 #endif /* DEBUG */
3872 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3873 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3874 	}
3875 
3876 	return (ddi_prop_lookup_common(match_dev, dip,
3877 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3878 	    &x, ddi_prop_fm_decode_string));
3879 }
3880 
3881 /*
3882  * Get an array of strings property.
3883  */
3884 int
ddi_prop_lookup_string_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,char *** data,uint_t * nelements)3885 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3886     char *name, char ***data, uint_t *nelements)
3887 {
3888 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3889 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3890 #ifdef DEBUG
3891 		if (dip != NULL) {
3892 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3893 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3894 			    flags, name, ddi_driver_name(dip),
3895 			    ddi_get_instance(dip));
3896 		}
3897 #endif /* DEBUG */
3898 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3899 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3900 	}
3901 
3902 	return (ddi_prop_lookup_common(match_dev, dip,
3903 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3904 	    nelements, ddi_prop_fm_decode_strings));
3905 }
3906 
3907 /*
3908  * Update a single string property.
3909  */
3910 int
ddi_prop_update_string(dev_t match_dev,dev_info_t * dip,char * name,char * data)3911 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3912     char *name, char *data)
3913 {
3914 	return (ddi_prop_update_common(match_dev, dip,
3915 	    DDI_PROP_TYPE_STRING, name, &data, 1,
3916 	    ddi_prop_fm_encode_string));
3917 }
3918 
3919 int
e_ddi_prop_update_string(dev_t match_dev,dev_info_t * dip,char * name,char * data)3920 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3921     char *name, char *data)
3922 {
3923 	return (ddi_prop_update_common(match_dev, dip,
3924 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3925 	    name, &data, 1, ddi_prop_fm_encode_string));
3926 }
3927 
3928 
3929 /*
3930  * Update an array of strings property.
3931  */
3932 int
ddi_prop_update_string_array(dev_t match_dev,dev_info_t * dip,char * name,char ** data,uint_t nelements)3933 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3934     char *name, char **data, uint_t nelements)
3935 {
3936 	return (ddi_prop_update_common(match_dev, dip,
3937 	    DDI_PROP_TYPE_STRING, name, data, nelements,
3938 	    ddi_prop_fm_encode_strings));
3939 }
3940 
3941 int
e_ddi_prop_update_string_array(dev_t match_dev,dev_info_t * dip,char * name,char ** data,uint_t nelements)3942 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3943     char *name, char **data, uint_t nelements)
3944 {
3945 	return (ddi_prop_update_common(match_dev, dip,
3946 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3947 	    name, data, nelements,
3948 	    ddi_prop_fm_encode_strings));
3949 }
3950 
3951 
3952 /*
3953  * Get an array of bytes property.
3954  */
3955 int
ddi_prop_lookup_byte_array(dev_t match_dev,dev_info_t * dip,uint_t flags,char * name,uchar_t ** data,uint_t * nelements)3956 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3957     char *name, uchar_t **data, uint_t *nelements)
3958 {
3959 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3960 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3961 #ifdef DEBUG
3962 		if (dip != NULL) {
3963 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
3964 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
3965 			    flags, name, ddi_driver_name(dip),
3966 			    ddi_get_instance(dip));
3967 		}
3968 #endif /* DEBUG */
3969 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3970 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3971 	}
3972 
3973 	return (ddi_prop_lookup_common(match_dev, dip,
3974 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
3975 	    nelements, ddi_prop_fm_decode_bytes));
3976 }
3977 
3978 /*
3979  * Update an array of bytes property.
3980  */
3981 int
ddi_prop_update_byte_array(dev_t match_dev,dev_info_t * dip,char * name,uchar_t * data,uint_t nelements)3982 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3983     char *name, uchar_t *data, uint_t nelements)
3984 {
3985 	if (nelements == 0)
3986 		return (DDI_PROP_INVAL_ARG);
3987 
3988 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
3989 	    name, data, nelements, ddi_prop_fm_encode_bytes));
3990 }
3991 
3992 
3993 int
e_ddi_prop_update_byte_array(dev_t match_dev,dev_info_t * dip,char * name,uchar_t * data,uint_t nelements)3994 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3995     char *name, uchar_t *data, uint_t nelements)
3996 {
3997 	if (nelements == 0)
3998 		return (DDI_PROP_INVAL_ARG);
3999 
4000 	return (ddi_prop_update_common(match_dev, dip,
4001 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4002 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4003 }
4004 
4005 
4006 /*
4007  * ddi_prop_remove_common:	Undefine a managed property:
4008  *			Input dev_t must match dev_t when defined.
4009  *			Returns DDI_PROP_NOT_FOUND, possibly.
4010  *			DDI_PROP_INVAL_ARG is also possible if dev is
4011  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4012  */
4013 int
ddi_prop_remove_common(dev_t dev,dev_info_t * dip,char * name,int flag)4014 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4015 {
4016 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4017 	ddi_prop_t	*propp;
4018 	ddi_prop_t	*lastpropp = NULL;
4019 
4020 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4021 	    (strlen(name) == 0)) {
4022 		return (DDI_PROP_INVAL_ARG);
4023 	}
4024 
4025 	if (flag & DDI_PROP_SYSTEM_DEF)
4026 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4027 	else if (flag & DDI_PROP_HW_DEF)
4028 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4029 
4030 	mutex_enter(&(DEVI(dip)->devi_lock));
4031 
4032 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4033 		if (DDI_STRSAME(propp->prop_name, name) &&
4034 		    (dev == propp->prop_dev)) {
4035 			/*
4036 			 * Unlink this propp allowing for it to
4037 			 * be first in the list:
4038 			 */
4039 
4040 			if (lastpropp == NULL)
4041 				*list_head = propp->prop_next;
4042 			else
4043 				lastpropp->prop_next = propp->prop_next;
4044 
4045 			mutex_exit(&(DEVI(dip)->devi_lock));
4046 
4047 			/*
4048 			 * Free memory and return...
4049 			 */
4050 			kmem_free(propp->prop_name,
4051 			    strlen(propp->prop_name) + 1);
4052 			if (propp->prop_len != 0)
4053 				kmem_free(propp->prop_val, propp->prop_len);
4054 			kmem_free(propp, sizeof (ddi_prop_t));
4055 			return (DDI_PROP_SUCCESS);
4056 		}
4057 		lastpropp = propp;
4058 	}
4059 	mutex_exit(&(DEVI(dip)->devi_lock));
4060 	return (DDI_PROP_NOT_FOUND);
4061 }
4062 
4063 int
ddi_prop_remove(dev_t dev,dev_info_t * dip,char * name)4064 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4065 {
4066 	return (ddi_prop_remove_common(dev, dip, name, 0));
4067 }
4068 
4069 int
e_ddi_prop_remove(dev_t dev,dev_info_t * dip,char * name)4070 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4071 {
4072 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4073 }
4074 
4075 /*
4076  * e_ddi_prop_list_delete: remove a list of properties
4077  *	Note that the caller needs to provide the required protection
4078  *	(eg. devi_lock if these properties are still attached to a devi)
4079  */
4080 void
e_ddi_prop_list_delete(ddi_prop_t * props)4081 e_ddi_prop_list_delete(ddi_prop_t *props)
4082 {
4083 	i_ddi_prop_list_delete(props);
4084 }
4085 
4086 /*
4087  * ddi_prop_remove_all_common:
4088  *	Used before unloading a driver to remove
4089  *	all properties. (undefines all dev_t's props.)
4090  *	Also removes `explicitly undefined' props.
4091  *	No errors possible.
4092  */
4093 void
ddi_prop_remove_all_common(dev_info_t * dip,int flag)4094 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4095 {
4096 	ddi_prop_t	**list_head;
4097 
4098 	mutex_enter(&(DEVI(dip)->devi_lock));
4099 	if (flag & DDI_PROP_SYSTEM_DEF) {
4100 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4101 	} else if (flag & DDI_PROP_HW_DEF) {
4102 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4103 	} else {
4104 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4105 	}
4106 	i_ddi_prop_list_delete(*list_head);
4107 	*list_head = NULL;
4108 	mutex_exit(&(DEVI(dip)->devi_lock));
4109 }
4110 
4111 
4112 /*
4113  * ddi_prop_remove_all:		Remove all driver prop definitions.
4114  */
4115 
4116 void
ddi_prop_remove_all(dev_info_t * dip)4117 ddi_prop_remove_all(dev_info_t *dip)
4118 {
4119 	i_ddi_prop_dyn_driver_set(dip, NULL);
4120 	ddi_prop_remove_all_common(dip, 0);
4121 }
4122 
4123 /*
4124  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4125  */
4126 
4127 void
e_ddi_prop_remove_all(dev_info_t * dip)4128 e_ddi_prop_remove_all(dev_info_t *dip)
4129 {
4130 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4131 }
4132 
4133 
4134 /*
4135  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4136  *			searches which match this property return
4137  *			the error code DDI_PROP_UNDEFINED.
4138  *
4139  *			Use ddi_prop_remove to negate effect of
4140  *			ddi_prop_undefine
4141  *
4142  *			See above for error returns.
4143  */
4144 
4145 int
ddi_prop_undefine(dev_t dev,dev_info_t * dip,int flag,char * name)4146 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4147 {
4148 	if (!(flag & DDI_PROP_CANSLEEP))
4149 		flag |= DDI_PROP_DONTSLEEP;
4150 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4151 	return (ddi_prop_update_common(dev, dip, flag,
4152 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4153 }
4154 
4155 int
e_ddi_prop_undefine(dev_t dev,dev_info_t * dip,int flag,char * name)4156 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4157 {
4158 	if (!(flag & DDI_PROP_CANSLEEP))
4159 		flag |= DDI_PROP_DONTSLEEP;
4160 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4161 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4162 	return (ddi_prop_update_common(dev, dip, flag,
4163 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4164 }
4165 
4166 /*
4167  * Support for gathering dynamic properties in devinfo snapshot.
4168  */
4169 void
i_ddi_prop_dyn_driver_set(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4170 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4171 {
4172 	DEVI(dip)->devi_prop_dyn_driver = dp;
4173 }
4174 
4175 i_ddi_prop_dyn_t *
i_ddi_prop_dyn_driver_get(dev_info_t * dip)4176 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4177 {
4178 	return (DEVI(dip)->devi_prop_dyn_driver);
4179 }
4180 
4181 void
i_ddi_prop_dyn_parent_set(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4182 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4183 {
4184 	DEVI(dip)->devi_prop_dyn_parent = dp;
4185 }
4186 
4187 i_ddi_prop_dyn_t *
i_ddi_prop_dyn_parent_get(dev_info_t * dip)4188 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4189 {
4190 	return (DEVI(dip)->devi_prop_dyn_parent);
4191 }
4192 
4193 void
i_ddi_prop_dyn_cache_invalidate(dev_info_t * dip,i_ddi_prop_dyn_t * dp)4194 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4195 {
4196 	/* for now we invalidate the entire cached snapshot */
4197 	if (dip && dp)
4198 		i_ddi_di_cache_invalidate();
4199 }
4200 
4201 /* ARGSUSED */
4202 void
ddi_prop_cache_invalidate(dev_t dev,dev_info_t * dip,char * name,int flags)4203 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4204 {
4205 	/* for now we invalidate the entire cached snapshot */
4206 	i_ddi_di_cache_invalidate();
4207 }
4208 
4209 
4210 /*
4211  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4212  *
4213  * if input dip != child_dip, then call is on behalf of child
4214  * to search PROM, do it via ddi_prop_search_common() and ascend only
4215  * if allowed.
4216  *
4217  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4218  * to search for PROM defined props only.
4219  *
4220  * Note that the PROM search is done only if the requested dev
4221  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4222  * have no associated dev, thus are automatically associated with
4223  * DDI_DEV_T_NONE.
4224  *
4225  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4226  *
4227  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4228  * that the property resides in the prom.
4229  */
4230 int
impl_ddi_bus_prop_op(dev_t dev,dev_info_t * dip,dev_info_t * ch_dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)4231 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4232     ddi_prop_op_t prop_op, int mod_flags,
4233     char *name, caddr_t valuep, int *lengthp)
4234 {
4235 	int	len;
4236 	caddr_t buffer = NULL;
4237 
4238 	/*
4239 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4240 	 * look in caller's PROM if it's a self identifying device...
4241 	 *
4242 	 * Note that this is very similar to ddi_prop_op, but we
4243 	 * search the PROM instead of the s/w defined properties,
4244 	 * and we are called on by the parent driver to do this for
4245 	 * the child.
4246 	 */
4247 
4248 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4249 	    ndi_dev_is_prom_node(ch_dip) &&
4250 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4251 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4252 		if (len == -1) {
4253 			return (DDI_PROP_NOT_FOUND);
4254 		}
4255 
4256 		/*
4257 		 * If exists only request, we're done
4258 		 */
4259 		if (prop_op == PROP_EXISTS) {
4260 			return (DDI_PROP_FOUND_1275);
4261 		}
4262 
4263 		/*
4264 		 * If length only request or prop length == 0, get out
4265 		 */
4266 		if ((prop_op == PROP_LEN) || (len == 0)) {
4267 			*lengthp = len;
4268 			return (DDI_PROP_FOUND_1275);
4269 		}
4270 
4271 		/*
4272 		 * Allocate buffer if required... (either way `buffer'
4273 		 * is receiving address).
4274 		 */
4275 
4276 		switch (prop_op) {
4277 
4278 		case PROP_LEN_AND_VAL_ALLOC:
4279 
4280 			buffer = kmem_alloc((size_t)len,
4281 			    mod_flags & DDI_PROP_CANSLEEP ?
4282 			    KM_SLEEP : KM_NOSLEEP);
4283 			if (buffer == NULL) {
4284 				return (DDI_PROP_NO_MEMORY);
4285 			}
4286 			*(caddr_t *)valuep = buffer;
4287 			break;
4288 
4289 		case PROP_LEN_AND_VAL_BUF:
4290 
4291 			if (len > (*lengthp)) {
4292 				*lengthp = len;
4293 				return (DDI_PROP_BUF_TOO_SMALL);
4294 			}
4295 
4296 			buffer = valuep;
4297 			break;
4298 
4299 		default:
4300 			break;
4301 		}
4302 
4303 		/*
4304 		 * Call the PROM function to do the copy.
4305 		 */
4306 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4307 		    name, buffer);
4308 
4309 		*lengthp = len; /* return the actual length to the caller */
4310 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4311 		return (DDI_PROP_FOUND_1275);
4312 	}
4313 
4314 	return (DDI_PROP_NOT_FOUND);
4315 }
4316 
4317 /*
4318  * The ddi_bus_prop_op default bus nexus prop op function.
4319  *
4320  * Code to search hardware layer (PROM), if it exists,
4321  * on behalf of child, then, if appropriate, ascend and check
4322  * my own software defined properties...
4323  */
4324 int
ddi_bus_prop_op(dev_t dev,dev_info_t * dip,dev_info_t * ch_dip,ddi_prop_op_t prop_op,int mod_flags,char * name,caddr_t valuep,int * lengthp)4325 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4326     ddi_prop_op_t prop_op, int mod_flags,
4327     char *name, caddr_t valuep, int *lengthp)
4328 {
4329 	int	error;
4330 
4331 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4332 	    name, valuep, lengthp);
4333 
4334 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4335 	    error == DDI_PROP_BUF_TOO_SMALL)
4336 		return (error);
4337 
4338 	if (error == DDI_PROP_NO_MEMORY) {
4339 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4340 		return (DDI_PROP_NO_MEMORY);
4341 	}
4342 
4343 	/*
4344 	 * Check the 'options' node as a last resort
4345 	 */
4346 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4347 		return (DDI_PROP_NOT_FOUND);
4348 
4349 	if (ch_dip == ddi_root_node())	{
4350 		/*
4351 		 * As a last resort, when we've reached
4352 		 * the top and still haven't found the
4353 		 * property, see if the desired property
4354 		 * is attached to the options node.
4355 		 *
4356 		 * The options dip is attached right after boot.
4357 		 */
4358 		ASSERT(options_dip != NULL);
4359 		/*
4360 		 * Force the "don't pass" flag to *just* see
4361 		 * what the options node has to offer.
4362 		 */
4363 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4364 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4365 		    (uint_t *)lengthp));
4366 	}
4367 
4368 	/*
4369 	 * Otherwise, continue search with parent's s/w defined properties...
4370 	 * NOTE: Using `dip' in following call increments the level.
4371 	 */
4372 
4373 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4374 	    name, valuep, (uint_t *)lengthp));
4375 }
4376 
4377 /*
4378  * External property functions used by other parts of the kernel...
4379  */
4380 
4381 /*
4382  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4383  */
4384 
4385 int
e_ddi_getlongprop(dev_t dev,vtype_t type,char * name,int flags,caddr_t valuep,int * lengthp)4386 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4387     caddr_t valuep, int *lengthp)
4388 {
4389 	_NOTE(ARGUNUSED(type))
4390 	dev_info_t *devi;
4391 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4392 	int error;
4393 
4394 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4395 		return (DDI_PROP_NOT_FOUND);
4396 
4397 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4398 	ddi_release_devi(devi);
4399 	return (error);
4400 }
4401 
4402 /*
4403  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4404  */
4405 
4406 int
e_ddi_getlongprop_buf(dev_t dev,vtype_t type,char * name,int flags,caddr_t valuep,int * lengthp)4407 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4408     caddr_t valuep, int *lengthp)
4409 {
4410 	_NOTE(ARGUNUSED(type))
4411 	dev_info_t *devi;
4412 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4413 	int error;
4414 
4415 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4416 		return (DDI_PROP_NOT_FOUND);
4417 
4418 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4419 	ddi_release_devi(devi);
4420 	return (error);
4421 }
4422 
4423 /*
4424  * e_ddi_getprop:	See comments for ddi_getprop.
4425  */
4426 int
e_ddi_getprop(dev_t dev,vtype_t type,char * name,int flags,int defvalue)4427 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4428 {
4429 	_NOTE(ARGUNUSED(type))
4430 	dev_info_t *devi;
4431 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4432 	int	propvalue = defvalue;
4433 	int	proplength = sizeof (int);
4434 	int	error;
4435 
4436 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4437 		return (defvalue);
4438 
4439 	error = cdev_prop_op(dev, devi, prop_op,
4440 	    flags, name, (caddr_t)&propvalue, &proplength);
4441 	ddi_release_devi(devi);
4442 
4443 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4444 		propvalue = 1;
4445 
4446 	return (propvalue);
4447 }
4448 
4449 /*
4450  * e_ddi_getprop_int64:
4451  *
4452  * This is a typed interfaces, but predates typed properties. With the
4453  * introduction of typed properties the framework tries to ensure
4454  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4455  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4456  * typed interface invokes legacy (non-typed) interfaces:
4457  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4458  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4459  * this type of lookup as a single operation we invoke the legacy
4460  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4461  * framework ddi_prop_op(9F) implementation is expected to check for
4462  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4463  * (currently TYPE_INT64).
4464  */
4465 int64_t
e_ddi_getprop_int64(dev_t dev,vtype_t type,char * name,int flags,int64_t defvalue)4466 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4467     int flags, int64_t defvalue)
4468 {
4469 	_NOTE(ARGUNUSED(type))
4470 	dev_info_t	*devi;
4471 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4472 	int64_t		propvalue = defvalue;
4473 	int		proplength = sizeof (propvalue);
4474 	int		error;
4475 
4476 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4477 		return (defvalue);
4478 
4479 	error = cdev_prop_op(dev, devi, prop_op, flags |
4480 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4481 	ddi_release_devi(devi);
4482 
4483 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4484 		propvalue = 1;
4485 
4486 	return (propvalue);
4487 }
4488 
4489 /*
4490  * e_ddi_getproplen:	See comments for ddi_getproplen.
4491  */
4492 int
e_ddi_getproplen(dev_t dev,vtype_t type,char * name,int flags,int * lengthp)4493 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4494 {
4495 	_NOTE(ARGUNUSED(type))
4496 	dev_info_t *devi;
4497 	ddi_prop_op_t prop_op = PROP_LEN;
4498 	int error;
4499 
4500 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4501 		return (DDI_PROP_NOT_FOUND);
4502 
4503 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4504 	ddi_release_devi(devi);
4505 	return (error);
4506 }
4507 
4508 /*
4509  * Routines to get at elements of the dev_info structure
4510  */
4511 
4512 /*
4513  * ddi_binding_name: Return the driver binding name of the devinfo node
4514  *		This is the name the OS used to bind the node to a driver.
4515  */
4516 char *
ddi_binding_name(dev_info_t * dip)4517 ddi_binding_name(dev_info_t *dip)
4518 {
4519 	return (DEVI(dip)->devi_binding_name);
4520 }
4521 
4522 /*
4523  * ddi_driver_major: Return the major number of the driver that
4524  *	the supplied devinfo is bound to.  If not yet bound,
4525  *	DDI_MAJOR_T_NONE.
4526  *
4527  * When used by the driver bound to 'devi', this
4528  * function will reliably return the driver major number.
4529  * Other ways of determining the driver major number, such as
4530  *	major = ddi_name_to_major(ddi_get_name(devi));
4531  *	major = ddi_name_to_major(ddi_binding_name(devi));
4532  * can return a different result as the driver/alias binding
4533  * can change dynamically, and thus should be avoided.
4534  */
4535 major_t
ddi_driver_major(dev_info_t * devi)4536 ddi_driver_major(dev_info_t *devi)
4537 {
4538 	return (DEVI(devi)->devi_major);
4539 }
4540 
4541 /*
4542  * ddi_driver_name: Return the normalized driver name. this is the
4543  *		actual driver name
4544  */
4545 const char *
ddi_driver_name(dev_info_t * devi)4546 ddi_driver_name(dev_info_t *devi)
4547 {
4548 	major_t major;
4549 
4550 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4551 		return (ddi_major_to_name(major));
4552 
4553 	return (ddi_node_name(devi));
4554 }
4555 
4556 /*
4557  * i_ddi_set_binding_name:	Set binding name.
4558  *
4559  *	Set the binding name to the given name.
4560  *	This routine is for use by the ddi implementation, not by drivers.
4561  */
4562 void
i_ddi_set_binding_name(dev_info_t * dip,char * name)4563 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4564 {
4565 	DEVI(dip)->devi_binding_name = name;
4566 
4567 }
4568 
4569 /*
4570  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4571  * the implementation has used to bind the node to a driver.
4572  */
4573 char *
ddi_get_name(dev_info_t * dip)4574 ddi_get_name(dev_info_t *dip)
4575 {
4576 	return (DEVI(dip)->devi_binding_name);
4577 }
4578 
4579 /*
4580  * ddi_node_name: Return the name property of the devinfo node
4581  *		This may differ from ddi_binding_name if the node name
4582  *		does not define a binding to a driver (i.e. generic names).
4583  */
4584 char *
ddi_node_name(dev_info_t * dip)4585 ddi_node_name(dev_info_t *dip)
4586 {
4587 	return (DEVI(dip)->devi_node_name);
4588 }
4589 
4590 
4591 /*
4592  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4593  */
4594 int
ddi_get_nodeid(dev_info_t * dip)4595 ddi_get_nodeid(dev_info_t *dip)
4596 {
4597 	return (DEVI(dip)->devi_nodeid);
4598 }
4599 
4600 int
ddi_get_instance(dev_info_t * dip)4601 ddi_get_instance(dev_info_t *dip)
4602 {
4603 	return (DEVI(dip)->devi_instance);
4604 }
4605 
4606 struct dev_ops *
ddi_get_driver(dev_info_t * dip)4607 ddi_get_driver(dev_info_t *dip)
4608 {
4609 	return (DEVI(dip)->devi_ops);
4610 }
4611 
4612 void
ddi_set_driver(dev_info_t * dip,struct dev_ops * devo)4613 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4614 {
4615 	DEVI(dip)->devi_ops = devo;
4616 }
4617 
4618 /*
4619  * ddi_set_driver_private/ddi_get_driver_private:
4620  * Get/set device driver private data in devinfo.
4621  */
4622 void
ddi_set_driver_private(dev_info_t * dip,void * data)4623 ddi_set_driver_private(dev_info_t *dip, void *data)
4624 {
4625 	DEVI(dip)->devi_driver_data = data;
4626 }
4627 
4628 void *
ddi_get_driver_private(dev_info_t * dip)4629 ddi_get_driver_private(dev_info_t *dip)
4630 {
4631 	return (DEVI(dip)->devi_driver_data);
4632 }
4633 
4634 /*
4635  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4636  */
4637 
4638 dev_info_t *
ddi_get_parent(dev_info_t * dip)4639 ddi_get_parent(dev_info_t *dip)
4640 {
4641 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4642 }
4643 
4644 dev_info_t *
ddi_get_child(dev_info_t * dip)4645 ddi_get_child(dev_info_t *dip)
4646 {
4647 	return ((dev_info_t *)DEVI(dip)->devi_child);
4648 }
4649 
4650 dev_info_t *
ddi_get_next_sibling(dev_info_t * dip)4651 ddi_get_next_sibling(dev_info_t *dip)
4652 {
4653 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4654 }
4655 
4656 dev_info_t *
ddi_get_next(dev_info_t * dip)4657 ddi_get_next(dev_info_t *dip)
4658 {
4659 	return ((dev_info_t *)DEVI(dip)->devi_next);
4660 }
4661 
4662 void
ddi_set_next(dev_info_t * dip,dev_info_t * nextdip)4663 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4664 {
4665 	DEVI(dip)->devi_next = DEVI(nextdip);
4666 }
4667 
4668 /*
4669  * ddi_root_node:		Return root node of devinfo tree
4670  */
4671 
4672 dev_info_t *
ddi_root_node(void)4673 ddi_root_node(void)
4674 {
4675 	extern dev_info_t *top_devinfo;
4676 
4677 	return (top_devinfo);
4678 }
4679 
4680 /*
4681  * Miscellaneous functions:
4682  */
4683 
4684 /*
4685  * Implementation specific hooks
4686  */
4687 
4688 void
ddi_report_dev(dev_info_t * d)4689 ddi_report_dev(dev_info_t *d)
4690 {
4691 	char *b;
4692 
4693 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4694 
4695 	/*
4696 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4697 	 * userland, so we print its full name together with the instance
4698 	 * number 'abbreviation' that the driver may use internally.
4699 	 */
4700 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4701 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4702 		cmn_err(CE_CONT, "?%s%d is %s\n",
4703 		    ddi_driver_name(d), ddi_get_instance(d),
4704 		    ddi_pathname(d, b));
4705 		kmem_free(b, MAXPATHLEN);
4706 	}
4707 }
4708 
4709 /*
4710  * ddi_ctlops() is described in the assembler not to buy a new register
4711  * window when it's called and can reduce cost in climbing the device tree
4712  * without using the tail call optimization.
4713  */
4714 int
ddi_dev_regsize(dev_info_t * dev,uint_t rnumber,off_t * result)4715 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4716 {
4717 	int ret;
4718 
4719 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4720 	    (void *)&rnumber, (void *)result);
4721 
4722 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4723 }
4724 
4725 int
ddi_dev_nregs(dev_info_t * dev,int * result)4726 ddi_dev_nregs(dev_info_t *dev, int *result)
4727 {
4728 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4729 }
4730 
4731 int
ddi_dev_is_sid(dev_info_t * d)4732 ddi_dev_is_sid(dev_info_t *d)
4733 {
4734 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4735 }
4736 
4737 int
ddi_slaveonly(dev_info_t * d)4738 ddi_slaveonly(dev_info_t *d)
4739 {
4740 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4741 }
4742 
4743 int
ddi_dev_affinity(dev_info_t * a,dev_info_t * b)4744 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4745 {
4746 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4747 }
4748 
4749 int
ddi_streams_driver(dev_info_t * dip)4750 ddi_streams_driver(dev_info_t *dip)
4751 {
4752 	if (i_ddi_devi_attached(dip) &&
4753 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4754 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4755 		return (DDI_SUCCESS);
4756 	return (DDI_FAILURE);
4757 }
4758 
4759 /*
4760  * callback free list
4761  */
4762 
4763 static int ncallbacks;
4764 static int nc_low = 170;
4765 static int nc_med = 512;
4766 static int nc_high = 2048;
4767 static struct ddi_callback *callbackq;
4768 static struct ddi_callback *callbackqfree;
4769 
4770 /*
4771  * set/run callback lists
4772  */
4773 struct	cbstats	{
4774 	kstat_named_t	cb_asked;
4775 	kstat_named_t	cb_new;
4776 	kstat_named_t	cb_run;
4777 	kstat_named_t	cb_delete;
4778 	kstat_named_t	cb_maxreq;
4779 	kstat_named_t	cb_maxlist;
4780 	kstat_named_t	cb_alloc;
4781 	kstat_named_t	cb_runouts;
4782 	kstat_named_t	cb_L2;
4783 	kstat_named_t	cb_grow;
4784 } cbstats = {
4785 	{"asked",	KSTAT_DATA_UINT32},
4786 	{"new",		KSTAT_DATA_UINT32},
4787 	{"run",		KSTAT_DATA_UINT32},
4788 	{"delete",	KSTAT_DATA_UINT32},
4789 	{"maxreq",	KSTAT_DATA_UINT32},
4790 	{"maxlist",	KSTAT_DATA_UINT32},
4791 	{"alloc",	KSTAT_DATA_UINT32},
4792 	{"runouts",	KSTAT_DATA_UINT32},
4793 	{"L2",		KSTAT_DATA_UINT32},
4794 	{"grow",	KSTAT_DATA_UINT32},
4795 };
4796 
4797 #define	nc_asked	cb_asked.value.ui32
4798 #define	nc_new		cb_new.value.ui32
4799 #define	nc_run		cb_run.value.ui32
4800 #define	nc_delete	cb_delete.value.ui32
4801 #define	nc_maxreq	cb_maxreq.value.ui32
4802 #define	nc_maxlist	cb_maxlist.value.ui32
4803 #define	nc_alloc	cb_alloc.value.ui32
4804 #define	nc_runouts	cb_runouts.value.ui32
4805 #define	nc_L2		cb_L2.value.ui32
4806 #define	nc_grow		cb_grow.value.ui32
4807 
4808 static kmutex_t ddi_callback_mutex;
4809 
4810 /*
4811  * callbacks are handled using a L1/L2 cache. The L1 cache
4812  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4813  * we can't get callbacks from the L1 cache [because pageout is doing
4814  * I/O at the time freemem is 0], we allocate callbacks out of the
4815  * L2 cache. The L2 cache is static and depends on the memory size.
4816  * [We might also count the number of devices at probe time and
4817  * allocate one structure per device and adjust for deferred attach]
4818  */
4819 void
impl_ddi_callback_init(void)4820 impl_ddi_callback_init(void)
4821 {
4822 	int	i;
4823 	uint_t	physmegs;
4824 	kstat_t	*ksp;
4825 
4826 	physmegs = physmem >> (20 - PAGESHIFT);
4827 	if (physmegs < 48) {
4828 		ncallbacks = nc_low;
4829 	} else if (physmegs < 128) {
4830 		ncallbacks = nc_med;
4831 	} else {
4832 		ncallbacks = nc_high;
4833 	}
4834 
4835 	/*
4836 	 * init free list
4837 	 */
4838 	callbackq = kmem_zalloc(
4839 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4840 	for (i = 0; i < ncallbacks-1; i++)
4841 		callbackq[i].c_nfree = &callbackq[i+1];
4842 	callbackqfree = callbackq;
4843 
4844 	/* init kstats */
4845 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4846 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4847 		ksp->ks_data = (void *) &cbstats;
4848 		kstat_install(ksp);
4849 	}
4850 
4851 }
4852 
4853 static void
callback_insert(int (* funcp)(caddr_t),caddr_t arg,uintptr_t * listid,int count)4854 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4855     int count)
4856 {
4857 	struct ddi_callback *list, *marker, *new;
4858 	size_t size = sizeof (struct ddi_callback);
4859 
4860 	list = marker = (struct ddi_callback *)*listid;
4861 	while (list != NULL) {
4862 		if (list->c_call == funcp && list->c_arg == arg) {
4863 			list->c_count += count;
4864 			return;
4865 		}
4866 		marker = list;
4867 		list = list->c_nlist;
4868 	}
4869 	new = kmem_alloc(size, KM_NOSLEEP);
4870 	if (new == NULL) {
4871 		new = callbackqfree;
4872 		if (new == NULL) {
4873 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4874 			    &size, KM_NOSLEEP | KM_PANIC);
4875 			cbstats.nc_grow++;
4876 		} else {
4877 			callbackqfree = new->c_nfree;
4878 			cbstats.nc_L2++;
4879 		}
4880 	}
4881 	if (marker != NULL) {
4882 		marker->c_nlist = new;
4883 	} else {
4884 		*listid = (uintptr_t)new;
4885 	}
4886 	new->c_size = size;
4887 	new->c_nlist = NULL;
4888 	new->c_call = funcp;
4889 	new->c_arg = arg;
4890 	new->c_count = count;
4891 	cbstats.nc_new++;
4892 	cbstats.nc_alloc++;
4893 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
4894 		cbstats.nc_maxlist = cbstats.nc_alloc;
4895 }
4896 
4897 void
ddi_set_callback(int (* funcp)(caddr_t),caddr_t arg,uintptr_t * listid)4898 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4899 {
4900 	mutex_enter(&ddi_callback_mutex);
4901 	cbstats.nc_asked++;
4902 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4903 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4904 	(void) callback_insert(funcp, arg, listid, 1);
4905 	mutex_exit(&ddi_callback_mutex);
4906 }
4907 
4908 static void
real_callback_run(void * Queue)4909 real_callback_run(void *Queue)
4910 {
4911 	int (*funcp)(caddr_t);
4912 	caddr_t arg;
4913 	int count, rval;
4914 	uintptr_t *listid;
4915 	struct ddi_callback *list, *marker;
4916 	int check_pending = 1;
4917 	int pending = 0;
4918 
4919 	do {
4920 		mutex_enter(&ddi_callback_mutex);
4921 		listid = Queue;
4922 		list = (struct ddi_callback *)*listid;
4923 		if (list == NULL) {
4924 			mutex_exit(&ddi_callback_mutex);
4925 			return;
4926 		}
4927 		if (check_pending) {
4928 			marker = list;
4929 			while (marker != NULL) {
4930 				pending += marker->c_count;
4931 				marker = marker->c_nlist;
4932 			}
4933 			check_pending = 0;
4934 		}
4935 		ASSERT(pending > 0);
4936 		ASSERT(list->c_count > 0);
4937 		funcp = list->c_call;
4938 		arg = list->c_arg;
4939 		count = list->c_count;
4940 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
4941 		if (list >= &callbackq[0] &&
4942 		    list <= &callbackq[ncallbacks-1]) {
4943 			list->c_nfree = callbackqfree;
4944 			callbackqfree = list;
4945 		} else
4946 			kmem_free(list, list->c_size);
4947 
4948 		cbstats.nc_delete++;
4949 		cbstats.nc_alloc--;
4950 		mutex_exit(&ddi_callback_mutex);
4951 
4952 		do {
4953 			if ((rval = (*funcp)(arg)) == 0) {
4954 				pending -= count;
4955 				mutex_enter(&ddi_callback_mutex);
4956 				(void) callback_insert(funcp, arg, listid,
4957 				    count);
4958 				cbstats.nc_runouts++;
4959 			} else {
4960 				pending--;
4961 				mutex_enter(&ddi_callback_mutex);
4962 				cbstats.nc_run++;
4963 			}
4964 			mutex_exit(&ddi_callback_mutex);
4965 		} while (rval != 0 && (--count > 0));
4966 	} while (pending > 0);
4967 }
4968 
4969 void
ddi_run_callback(uintptr_t * listid)4970 ddi_run_callback(uintptr_t *listid)
4971 {
4972 	softcall(real_callback_run, listid);
4973 }
4974 
4975 /*
4976  * ddi_periodic_t
4977  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
4978  *     int level)
4979  *
4980  * INTERFACE LEVEL
4981  *      Solaris DDI specific (Solaris DDI)
4982  *
4983  * PARAMETERS
4984  *      func: the callback function
4985  *
4986  *            The callback function will be invoked. The function is invoked
4987  *            in kernel context if the argument level passed is the zero.
4988  *            Otherwise it's invoked in interrupt context at the specified
4989  *            level.
4990  *
4991  *       arg: the argument passed to the callback function
4992  *
4993  *  interval: interval time
4994  *
4995  *    level : callback interrupt level
4996  *
4997  *            If the value is the zero, the callback function is invoked
4998  *            in kernel context. If the value is more than the zero, but
4999  *            less than or equal to ten, the callback function is invoked in
5000  *            interrupt context at the specified interrupt level, which may
5001  *            be used for real time applications.
5002  *
5003  *            This value must be in range of 0-10, which can be a numeric
5004  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5005  *
5006  * DESCRIPTION
5007  *      ddi_periodic_add(9F) schedules the specified function to be
5008  *      periodically invoked in the interval time.
5009  *
5010  *      As well as timeout(9F), the exact time interval over which the function
5011  *      takes effect cannot be guaranteed, but the value given is a close
5012  *      approximation.
5013  *
5014  *      Drivers waiting on behalf of processes with real-time constraints must
5015  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5016  *
5017  * RETURN VALUES
5018  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5019  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5020  *
5021  * CONTEXT
5022  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5023  *      it cannot be called in interrupt context, which is different from
5024  *      timeout(9F).
5025  */
5026 ddi_periodic_t
ddi_periodic_add(void (* func)(void *),void * arg,hrtime_t interval,int level)5027 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5028 {
5029 	/*
5030 	 * Sanity check of the argument level.
5031 	 */
5032 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5033 		cmn_err(CE_PANIC,
5034 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5035 
5036 	/*
5037 	 * Sanity check of the context. ddi_periodic_add() cannot be
5038 	 * called in either interrupt context or high interrupt context.
5039 	 */
5040 	if (servicing_interrupt())
5041 		cmn_err(CE_PANIC,
5042 		    "ddi_periodic_add: called in (high) interrupt context.");
5043 
5044 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5045 }
5046 
5047 /*
5048  * void
5049  * ddi_periodic_delete(ddi_periodic_t req)
5050  *
5051  * INTERFACE LEVEL
5052  *     Solaris DDI specific (Solaris DDI)
5053  *
5054  * PARAMETERS
5055  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5056  *     previously.
5057  *
5058  * DESCRIPTION
5059  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5060  *     previously requested.
5061  *
5062  *     ddi_periodic_delete(9F) will not return until the pending request
5063  *     is canceled or executed.
5064  *
5065  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5066  *     timeout which is either running on another CPU, or has already
5067  *     completed causes no problems. However, unlike untimeout(9F), there is
5068  *     no restrictions on the lock which might be held across the call to
5069  *     ddi_periodic_delete(9F).
5070  *
5071  *     Drivers should be structured with the understanding that the arrival of
5072  *     both an interrupt and a timeout for that interrupt can occasionally
5073  *     occur, in either order.
5074  *
5075  * CONTEXT
5076  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5077  *     it cannot be called in interrupt context, which is different from
5078  *     untimeout(9F).
5079  */
5080 void
ddi_periodic_delete(ddi_periodic_t req)5081 ddi_periodic_delete(ddi_periodic_t req)
5082 {
5083 	/*
5084 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5085 	 * called in either interrupt context or high interrupt context.
5086 	 */
5087 	if (servicing_interrupt())
5088 		cmn_err(CE_PANIC,
5089 		    "ddi_periodic_delete: called in (high) interrupt context.");
5090 
5091 	i_untimeout((timeout_t)req);
5092 }
5093 
5094 dev_info_t *
nodevinfo(dev_t dev,int otyp)5095 nodevinfo(dev_t dev, int otyp)
5096 {
5097 	_NOTE(ARGUNUSED(dev, otyp))
5098 	return ((dev_info_t *)0);
5099 }
5100 
5101 /*
5102  * A driver should support its own getinfo(9E) entry point. This function
5103  * is provided as a convenience for ON drivers that don't expect their
5104  * getinfo(9E) entry point to be called. A driver that uses this must not
5105  * call ddi_create_minor_node.
5106  */
5107 int
ddi_no_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5108 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5109 {
5110 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5111 	return (DDI_FAILURE);
5112 }
5113 
5114 /*
5115  * A driver should support its own getinfo(9E) entry point. This function
5116  * is provided as a convenience for ON drivers that where the minor number
5117  * is the instance. Drivers that do not have 1:1 mapping must implement
5118  * their own getinfo(9E) function.
5119  */
5120 int
ddi_getinfo_1to1(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5121 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5122     void *arg, void **result)
5123 {
5124 	_NOTE(ARGUNUSED(dip))
5125 	int	instance;
5126 
5127 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5128 		return (DDI_FAILURE);
5129 
5130 	instance = getminor((dev_t)(uintptr_t)arg);
5131 	*result = (void *)(uintptr_t)instance;
5132 	return (DDI_SUCCESS);
5133 }
5134 
5135 int
ddifail(dev_info_t * devi,ddi_attach_cmd_t cmd)5136 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5137 {
5138 	_NOTE(ARGUNUSED(devi, cmd))
5139 	return (DDI_FAILURE);
5140 }
5141 
5142 int
ddi_no_dma_map(dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareqp,ddi_dma_handle_t * handlep)5143 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5144     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5145 {
5146 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5147 	return (DDI_DMA_NOMAPPING);
5148 }
5149 
5150 int
ddi_no_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)5151 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5152     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5153 {
5154 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5155 	return (DDI_DMA_BADATTR);
5156 }
5157 
5158 int
ddi_no_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)5159 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5160     ddi_dma_handle_t handle)
5161 {
5162 	_NOTE(ARGUNUSED(dip, rdip, handle))
5163 	return (DDI_FAILURE);
5164 }
5165 
5166 int
ddi_no_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)5167 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5168     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5169     ddi_dma_cookie_t *cp, uint_t *ccountp)
5170 {
5171 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5172 	return (DDI_DMA_NOMAPPING);
5173 }
5174 
5175 int
ddi_no_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)5176 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5177     ddi_dma_handle_t handle)
5178 {
5179 	_NOTE(ARGUNUSED(dip, rdip, handle))
5180 	return (DDI_FAILURE);
5181 }
5182 
5183 int
ddi_no_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)5184 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5185     ddi_dma_handle_t handle, off_t off, size_t len,
5186     uint_t cache_flags)
5187 {
5188 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5189 	return (DDI_FAILURE);
5190 }
5191 
5192 int
ddi_no_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)5193 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5194     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5195     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5196 {
5197 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5198 	return (DDI_FAILURE);
5199 }
5200 
5201 int
ddi_no_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t flags)5202 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5203     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5204     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5205 {
5206 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5207 	return (DDI_FAILURE);
5208 }
5209 
5210 void
ddivoid(void)5211 ddivoid(void)
5212 {}
5213 
5214 int
nochpoll(dev_t dev,short events,int anyyet,short * reventsp,struct pollhead ** pollhdrp)5215 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5216     struct pollhead **pollhdrp)
5217 {
5218 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5219 	return (ENXIO);
5220 }
5221 
5222 cred_t *
ddi_get_cred(void)5223 ddi_get_cred(void)
5224 {
5225 	return (CRED());
5226 }
5227 
5228 clock_t
ddi_get_lbolt(void)5229 ddi_get_lbolt(void)
5230 {
5231 	return ((clock_t)lbolt_hybrid());
5232 }
5233 
5234 int64_t
ddi_get_lbolt64(void)5235 ddi_get_lbolt64(void)
5236 {
5237 	return (lbolt_hybrid());
5238 }
5239 
5240 time_t
ddi_get_time(void)5241 ddi_get_time(void)
5242 {
5243 	time_t	now;
5244 
5245 	if ((now = gethrestime_sec()) == 0) {
5246 		timestruc_t ts;
5247 		mutex_enter(&tod_lock);
5248 		ts = tod_get();
5249 		mutex_exit(&tod_lock);
5250 		return (ts.tv_sec);
5251 	} else {
5252 		return (now);
5253 	}
5254 }
5255 
5256 pid_t
ddi_get_pid(void)5257 ddi_get_pid(void)
5258 {
5259 	return (ttoproc(curthread)->p_pid);
5260 }
5261 
5262 kt_did_t
ddi_get_kt_did(void)5263 ddi_get_kt_did(void)
5264 {
5265 	return (curthread->t_did);
5266 }
5267 
5268 /*
5269  * This function returns B_TRUE if the caller can reasonably expect that a call
5270  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5271  * by user-level signal.  If it returns B_FALSE, then the caller should use
5272  * other means to make certain that the wait will not hang "forever."
5273  *
5274  * It does not check the signal mask, nor for reception of any particular
5275  * signal.
5276  *
5277  * Currently, a thread can receive a signal if it's not a kernel thread and it
5278  * is not in the middle of exit(2) tear-down.  Threads that are in that
5279  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5280  * cv_timedwait, and qwait_sig to qwait.
5281  */
5282 boolean_t
ddi_can_receive_sig(void)5283 ddi_can_receive_sig(void)
5284 {
5285 	proc_t *pp;
5286 
5287 	if (curthread->t_proc_flag & TP_LWPEXIT)
5288 		return (B_FALSE);
5289 	if ((pp = ttoproc(curthread)) == NULL)
5290 		return (B_FALSE);
5291 	return (pp->p_as != &kas);
5292 }
5293 
5294 /*
5295  * Swap bytes in 16-bit [half-]words
5296  */
5297 void
swab(void * src,void * dst,size_t nbytes)5298 swab(void *src, void *dst, size_t nbytes)
5299 {
5300 	uchar_t *pf = (uchar_t *)src;
5301 	uchar_t *pt = (uchar_t *)dst;
5302 	uchar_t tmp;
5303 	int nshorts;
5304 
5305 	nshorts = nbytes >> 1;
5306 
5307 	while (--nshorts >= 0) {
5308 		tmp = *pf++;
5309 		*pt++ = *pf++;
5310 		*pt++ = tmp;
5311 	}
5312 }
5313 
5314 static void
ddi_append_minor_node(dev_info_t * ddip,struct ddi_minor_data * dmdp)5315 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5316 {
5317 	struct ddi_minor_data	*dp;
5318 
5319 	ndi_devi_enter(ddip);
5320 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5321 		DEVI(ddip)->devi_minor = dmdp;
5322 	} else {
5323 		while (dp->next != (struct ddi_minor_data *)NULL)
5324 			dp = dp->next;
5325 		dp->next = dmdp;
5326 	}
5327 	ndi_devi_exit(ddip);
5328 }
5329 
5330 static int
i_log_devfs_minor_create(dev_info_t * dip,char * minor_name)5331 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5332 {
5333 	int se_flag;
5334 	int kmem_flag;
5335 	int se_err;
5336 	char *pathname, *class_name;
5337 	sysevent_t *ev = NULL;
5338 	sysevent_id_t eid;
5339 	sysevent_value_t se_val;
5340 	sysevent_attr_list_t *ev_attr_list = NULL;
5341 
5342 	/* determine interrupt context */
5343 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5344 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5345 
5346 	i_ddi_di_cache_invalidate();
5347 
5348 #ifdef DEBUG
5349 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5350 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5351 		    "interrupt level by driver %s",
5352 		    ddi_driver_name(dip));
5353 	}
5354 #endif /* DEBUG */
5355 
5356 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5357 	if (ev == NULL) {
5358 		goto fail;
5359 	}
5360 
5361 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5362 	if (pathname == NULL) {
5363 		sysevent_free(ev);
5364 		goto fail;
5365 	}
5366 
5367 	(void) ddi_pathname(dip, pathname);
5368 	ASSERT(strlen(pathname));
5369 	se_val.value_type = SE_DATA_TYPE_STRING;
5370 	se_val.value.sv_string = pathname;
5371 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5372 	    &se_val, se_flag) != 0) {
5373 		kmem_free(pathname, MAXPATHLEN);
5374 		sysevent_free(ev);
5375 		goto fail;
5376 	}
5377 	kmem_free(pathname, MAXPATHLEN);
5378 
5379 	/* add the device class attribute */
5380 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5381 		se_val.value_type = SE_DATA_TYPE_STRING;
5382 		se_val.value.sv_string = class_name;
5383 		if (sysevent_add_attr(&ev_attr_list,
5384 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5385 			sysevent_free_attr(ev_attr_list);
5386 			goto fail;
5387 		}
5388 	}
5389 
5390 	/*
5391 	 * allow for NULL minor names
5392 	 */
5393 	if (minor_name != NULL) {
5394 		se_val.value.sv_string = minor_name;
5395 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5396 		    &se_val, se_flag) != 0) {
5397 			sysevent_free_attr(ev_attr_list);
5398 			sysevent_free(ev);
5399 			goto fail;
5400 		}
5401 	}
5402 
5403 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5404 		sysevent_free_attr(ev_attr_list);
5405 		sysevent_free(ev);
5406 		goto fail;
5407 	}
5408 
5409 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5410 		if (se_err == SE_NO_TRANSPORT) {
5411 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5412 			    "for driver %s (%s). Run devfsadm -i %s",
5413 			    ddi_driver_name(dip), "syseventd not responding",
5414 			    ddi_driver_name(dip));
5415 		} else {
5416 			sysevent_free(ev);
5417 			goto fail;
5418 		}
5419 	}
5420 
5421 	sysevent_free(ev);
5422 	return (DDI_SUCCESS);
5423 fail:
5424 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5425 	    "for driver %s. Run devfsadm -i %s",
5426 	    ddi_driver_name(dip), ddi_driver_name(dip));
5427 	return (DDI_SUCCESS);
5428 }
5429 
5430 /*
5431  * failing to remove a minor node is not of interest
5432  * therefore we do not generate an error message
5433  */
5434 static int
i_log_devfs_minor_remove(dev_info_t * dip,char * minor_name)5435 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5436 {
5437 	char *pathname, *class_name;
5438 	sysevent_t *ev;
5439 	sysevent_id_t eid;
5440 	sysevent_value_t se_val;
5441 	sysevent_attr_list_t *ev_attr_list = NULL;
5442 
5443 	/*
5444 	 * only log ddi_remove_minor_node() calls outside the scope
5445 	 * of attach/detach reconfigurations and when the dip is
5446 	 * still initialized.
5447 	 */
5448 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5449 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5450 		return (DDI_SUCCESS);
5451 	}
5452 
5453 	i_ddi_di_cache_invalidate();
5454 
5455 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5456 	if (ev == NULL) {
5457 		return (DDI_SUCCESS);
5458 	}
5459 
5460 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5461 	if (pathname == NULL) {
5462 		sysevent_free(ev);
5463 		return (DDI_SUCCESS);
5464 	}
5465 
5466 	(void) ddi_pathname(dip, pathname);
5467 	ASSERT(strlen(pathname));
5468 	se_val.value_type = SE_DATA_TYPE_STRING;
5469 	se_val.value.sv_string = pathname;
5470 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5471 	    &se_val, SE_SLEEP) != 0) {
5472 		kmem_free(pathname, MAXPATHLEN);
5473 		sysevent_free(ev);
5474 		return (DDI_SUCCESS);
5475 	}
5476 
5477 	kmem_free(pathname, MAXPATHLEN);
5478 
5479 	/*
5480 	 * allow for NULL minor names
5481 	 */
5482 	if (minor_name != NULL) {
5483 		se_val.value.sv_string = minor_name;
5484 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5485 		    &se_val, SE_SLEEP) != 0) {
5486 			sysevent_free_attr(ev_attr_list);
5487 			goto fail;
5488 		}
5489 	}
5490 
5491 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5492 		/* add the device class, driver name and instance attributes */
5493 
5494 		se_val.value_type = SE_DATA_TYPE_STRING;
5495 		se_val.value.sv_string = class_name;
5496 		if (sysevent_add_attr(&ev_attr_list,
5497 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5498 			sysevent_free_attr(ev_attr_list);
5499 			goto fail;
5500 		}
5501 
5502 		se_val.value_type = SE_DATA_TYPE_STRING;
5503 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5504 		if (sysevent_add_attr(&ev_attr_list,
5505 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5506 			sysevent_free_attr(ev_attr_list);
5507 			goto fail;
5508 		}
5509 
5510 		se_val.value_type = SE_DATA_TYPE_INT32;
5511 		se_val.value.sv_int32 = ddi_get_instance(dip);
5512 		if (sysevent_add_attr(&ev_attr_list,
5513 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5514 			sysevent_free_attr(ev_attr_list);
5515 			goto fail;
5516 		}
5517 
5518 	}
5519 
5520 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5521 		sysevent_free_attr(ev_attr_list);
5522 	} else {
5523 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5524 	}
5525 fail:
5526 	sysevent_free(ev);
5527 	return (DDI_SUCCESS);
5528 }
5529 
5530 /*
5531  * Derive the device class of the node.
5532  * Device class names aren't defined yet. Until this is done we use
5533  * devfs event subclass names as device class names.
5534  */
5535 static int
derive_devi_class(dev_info_t * dip,const char * node_type,int flag)5536 derive_devi_class(dev_info_t *dip, const char *node_type, int flag)
5537 {
5538 	int rv = DDI_SUCCESS;
5539 
5540 	if (i_ddi_devi_class(dip) == NULL) {
5541 		if (strncmp(node_type, DDI_NT_BLOCK,
5542 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5543 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5544 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5545 		    strcmp(node_type, DDI_NT_FD) != 0) {
5546 
5547 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5548 
5549 		} else if (strncmp(node_type, DDI_NT_NET,
5550 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5551 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5552 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5553 
5554 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5555 
5556 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5557 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5558 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5559 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5560 
5561 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5562 
5563 		} else if (strncmp(node_type, DDI_PSEUDO,
5564 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5565 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5566 		    sizeof (ESC_LOFI) -1) == 0)) {
5567 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5568 		}
5569 	}
5570 
5571 	return (rv);
5572 }
5573 
5574 /*
5575  * Check compliance with PSARC 2003/375:
5576  *
5577  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5578  * exceed IFNAMSIZ (16) characters in length.
5579  */
5580 static boolean_t
verify_name(const char * name)5581 verify_name(const char *name)
5582 {
5583 	size_t len = strlen(name);
5584 	const char *cp;
5585 
5586 	if (len == 0 || len > IFNAMSIZ)
5587 		return (B_FALSE);
5588 
5589 	for (cp = name; *cp != '\0'; cp++) {
5590 		if (!isalnum(*cp) && *cp != '_')
5591 			return (B_FALSE);
5592 	}
5593 
5594 	return (B_TRUE);
5595 }
5596 
5597 /*
5598  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5599  *				attach it to the given devinfo node.
5600  */
5601 
5602 static int
ddi_create_minor_common(dev_info_t * dip,const char * name,int spec_type,minor_t minor_num,const char * node_type,int flag,ddi_minor_type mtype,const char * read_priv,const char * write_priv,mode_t priv_mode)5603 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type,
5604     minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype,
5605     const char *read_priv, const char *write_priv, mode_t priv_mode)
5606 {
5607 	struct ddi_minor_data *dmdp;
5608 	major_t major;
5609 
5610 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5611 		return (DDI_FAILURE);
5612 
5613 	if (name == NULL)
5614 		return (DDI_FAILURE);
5615 
5616 	/*
5617 	 * Log a message if the minor number the driver is creating
5618 	 * is not expressible on the on-disk filesystem (currently
5619 	 * this is limited to 18 bits both by UFS). The device can
5620 	 * be opened via devfs, but not by device special files created
5621 	 * via mknod().
5622 	 */
5623 	if (minor_num > L_MAXMIN32) {
5624 		cmn_err(CE_WARN,
5625 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5626 		    ddi_driver_name(dip), ddi_get_instance(dip),
5627 		    name, minor_num);
5628 		return (DDI_FAILURE);
5629 	}
5630 
5631 	/* dip must be bound and attached */
5632 	major = ddi_driver_major(dip);
5633 	ASSERT(major != DDI_MAJOR_T_NONE);
5634 
5635 	/*
5636 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5637 	 */
5638 	if (node_type == NULL) {
5639 		node_type = DDI_PSEUDO;
5640 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5641 		    " minor node %s; default to DDI_PSEUDO",
5642 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5643 	}
5644 
5645 	/*
5646 	 * If the driver is a network driver, ensure that the name falls within
5647 	 * the interface naming constraints specified by PSARC/2003/375.
5648 	 */
5649 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5650 		if (!verify_name(name))
5651 			return (DDI_FAILURE);
5652 
5653 		if (mtype == DDM_MINOR) {
5654 			struct devnames *dnp = &devnamesp[major];
5655 
5656 			/* Mark driver as a network driver */
5657 			LOCK_DEV_OPS(&dnp->dn_lock);
5658 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5659 
5660 			/*
5661 			 * If this minor node is created during the device
5662 			 * attachment, this is a physical network device.
5663 			 * Mark the driver as a physical network driver.
5664 			 */
5665 			if (DEVI_IS_ATTACHING(dip))
5666 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5667 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5668 		}
5669 	}
5670 
5671 	if (mtype == DDM_MINOR) {
5672 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5673 		    DDI_SUCCESS)
5674 			return (DDI_FAILURE);
5675 	}
5676 
5677 	/*
5678 	 * Take care of minor number information for the node.
5679 	 */
5680 
5681 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5682 	    KM_NOSLEEP)) == NULL) {
5683 		return (DDI_FAILURE);
5684 	}
5685 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5686 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5687 		return (DDI_FAILURE);
5688 	}
5689 	dmdp->dip = dip;
5690 	dmdp->ddm_dev = makedevice(major, minor_num);
5691 	dmdp->ddm_spec_type = spec_type;
5692 	dmdp->ddm_node_type = node_type;
5693 	dmdp->type = mtype;
5694 	if (flag & CLONE_DEV) {
5695 		dmdp->type = DDM_ALIAS;
5696 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5697 	}
5698 	if (flag & PRIVONLY_DEV) {
5699 		dmdp->ddm_flags |= DM_NO_FSPERM;
5700 	}
5701 	if (read_priv || write_priv) {
5702 		dmdp->ddm_node_priv =
5703 		    devpolicy_priv_by_name(read_priv, write_priv);
5704 	}
5705 	dmdp->ddm_priv_mode = priv_mode;
5706 
5707 	ddi_append_minor_node(dip, dmdp);
5708 
5709 	/*
5710 	 * only log ddi_create_minor_node() calls which occur
5711 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5712 	 */
5713 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5714 	    mtype != DDM_INTERNAL_PATH) {
5715 		(void) i_log_devfs_minor_create(dip, dmdp->ddm_name);
5716 	}
5717 
5718 	/*
5719 	 * Check if any dacf rules match the creation of this minor node
5720 	 */
5721 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5722 	return (DDI_SUCCESS);
5723 }
5724 
5725 int
ddi_create_minor_node(dev_info_t * dip,const char * name,int spec_type,minor_t minor_num,const char * node_type,int flag)5726 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
5727     minor_t minor_num, const char *node_type, int flag)
5728 {
5729 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5730 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5731 }
5732 
5733 int
ddi_create_priv_minor_node(dev_info_t * dip,const char * name,int spec_type,minor_t minor_num,const char * node_type,int flag,const char * rdpriv,const char * wrpriv,mode_t priv_mode)5734 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type,
5735     minor_t minor_num, const char *node_type, int flag,
5736     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5737 {
5738 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5739 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5740 }
5741 
5742 int
ddi_create_default_minor_node(dev_info_t * dip,const char * name,int spec_type,minor_t minor_num,const char * node_type,int flag)5743 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type,
5744     minor_t minor_num, const char *node_type, int flag)
5745 {
5746 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5747 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5748 }
5749 
5750 /*
5751  * Internal (non-ddi) routine for drivers to export names known
5752  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5753  * but not exported externally to /dev
5754  */
5755 int
ddi_create_internal_pathname(dev_info_t * dip,char * name,int spec_type,minor_t minor_num)5756 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5757     minor_t minor_num)
5758 {
5759 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5760 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5761 }
5762 
5763 void
ddi_remove_minor_node(dev_info_t * dip,const char * name)5764 ddi_remove_minor_node(dev_info_t *dip, const char *name)
5765 {
5766 	struct ddi_minor_data	*dmdp, *dmdp1;
5767 	struct ddi_minor_data	**dmdp_prev;
5768 
5769 	ndi_devi_enter(dip);
5770 	dmdp_prev = &DEVI(dip)->devi_minor;
5771 	dmdp = DEVI(dip)->devi_minor;
5772 	while (dmdp != NULL) {
5773 		dmdp1 = dmdp->next;
5774 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5775 		    strcmp(name, dmdp->ddm_name) == 0))) {
5776 			if (dmdp->ddm_name != NULL) {
5777 				if (dmdp->type != DDM_INTERNAL_PATH)
5778 					(void) i_log_devfs_minor_remove(dip,
5779 					    dmdp->ddm_name);
5780 				kmem_free(dmdp->ddm_name,
5781 				    strlen(dmdp->ddm_name) + 1);
5782 			}
5783 			/*
5784 			 * Release device privilege, if any.
5785 			 * Release dacf client data associated with this minor
5786 			 * node by storing NULL.
5787 			 */
5788 			if (dmdp->ddm_node_priv)
5789 				dpfree(dmdp->ddm_node_priv);
5790 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5791 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5792 			*dmdp_prev = dmdp1;
5793 			/*
5794 			 * OK, we found it, so get out now -- if we drive on,
5795 			 * we will strcmp against garbage.  See 1139209.
5796 			 */
5797 			if (name != NULL)
5798 				break;
5799 		} else {
5800 			dmdp_prev = &dmdp->next;
5801 		}
5802 		dmdp = dmdp1;
5803 	}
5804 	ndi_devi_exit(dip);
5805 }
5806 
5807 
5808 int
ddi_in_panic()5809 ddi_in_panic()
5810 {
5811 	return (panicstr != NULL);
5812 }
5813 
5814 
5815 /*
5816  * Find first bit set in a mask (returned counting from 1 up)
5817  */
5818 
5819 int
ddi_ffs(long mask)5820 ddi_ffs(long mask)
5821 {
5822 	return (ffs(mask));
5823 }
5824 
5825 /*
5826  * Find last bit set. Take mask and clear
5827  * all but the most significant bit, and
5828  * then let ffs do the rest of the work.
5829  *
5830  * Algorithm courtesy of Steve Chessin.
5831  */
5832 
5833 int
ddi_fls(long mask)5834 ddi_fls(long mask)
5835 {
5836 	while (mask) {
5837 		long nx;
5838 
5839 		if ((nx = (mask & (mask - 1))) == 0)
5840 			break;
5841 		mask = nx;
5842 	}
5843 	return (ffs(mask));
5844 }
5845 
5846 /*
5847  * The ddi_soft_state_* routines comprise generic storage management utilities
5848  * for driver soft state structures (in "the old days," this was done with
5849  * statically sized array - big systems and dynamic loading and unloading
5850  * make heap allocation more attractive).
5851  */
5852 
5853 /*
5854  * Allocate a set of pointers to 'n_items' objects of size 'size'
5855  * bytes.  Each pointer is initialized to nil.
5856  *
5857  * The 'size' and 'n_items' values are stashed in the opaque
5858  * handle returned to the caller.
5859  *
5860  * This implementation interprets 'set of pointers' to mean 'array
5861  * of pointers' but note that nothing in the interface definition
5862  * precludes an implementation that uses, for example, a linked list.
5863  * However there should be a small efficiency gain from using an array
5864  * at lookup time.
5865  *
5866  * NOTE	As an optimization, we make our growable array allocations in
5867  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5868  *	gives us anyway.  It should save us some free/realloc's ..
5869  *
5870  *	As a further optimization, we make the growable array start out
5871  *	with MIN_N_ITEMS in it.
5872  */
5873 
5874 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
5875 
5876 int
ddi_soft_state_init(void ** state_p,size_t size,size_t n_items)5877 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5878 {
5879 	i_ddi_soft_state	*ss;
5880 
5881 	if (state_p == NULL || size == 0)
5882 		return (EINVAL);
5883 
5884 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5885 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5886 	ss->size = size;
5887 
5888 	if (n_items < MIN_N_ITEMS)
5889 		ss->n_items = MIN_N_ITEMS;
5890 	else {
5891 		int bitlog;
5892 
5893 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5894 			bitlog--;
5895 		ss->n_items = 1 << bitlog;
5896 	}
5897 
5898 	ASSERT(ss->n_items >= n_items);
5899 
5900 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5901 
5902 	*state_p = ss;
5903 	return (0);
5904 }
5905 
5906 /*
5907  * Allocate a state structure of size 'size' to be associated
5908  * with item 'item'.
5909  *
5910  * In this implementation, the array is extended to
5911  * allow the requested offset, if needed.
5912  */
5913 int
ddi_soft_state_zalloc(void * state,int item)5914 ddi_soft_state_zalloc(void *state, int item)
5915 {
5916 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
5917 	void			**array;
5918 	void			*new_element;
5919 
5920 	if ((state == NULL) || (item < 0))
5921 		return (DDI_FAILURE);
5922 
5923 	mutex_enter(&ss->lock);
5924 	if (ss->size == 0) {
5925 		mutex_exit(&ss->lock);
5926 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5927 		    mod_containing_pc(caller()));
5928 		return (DDI_FAILURE);
5929 	}
5930 
5931 	array = ss->array;	/* NULL if ss->n_items == 0 */
5932 	ASSERT(ss->n_items != 0 && array != NULL);
5933 
5934 	/*
5935 	 * refuse to tread on an existing element
5936 	 */
5937 	if (item < ss->n_items && array[item] != NULL) {
5938 		mutex_exit(&ss->lock);
5939 		return (DDI_FAILURE);
5940 	}
5941 
5942 	/*
5943 	 * Allocate a new element to plug in
5944 	 */
5945 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
5946 
5947 	/*
5948 	 * Check if the array is big enough, if not, grow it.
5949 	 */
5950 	if (item >= ss->n_items) {
5951 		void			**new_array;
5952 		size_t			new_n_items;
5953 		struct i_ddi_soft_state	*dirty;
5954 
5955 		/*
5956 		 * Allocate a new array of the right length, copy
5957 		 * all the old pointers to the new array, then
5958 		 * if it exists at all, put the old array on the
5959 		 * dirty list.
5960 		 *
5961 		 * Note that we can't kmem_free() the old array.
5962 		 *
5963 		 * Why -- well the 'get' operation is 'mutex-free', so we
5964 		 * can't easily catch a suspended thread that is just about
5965 		 * to dereference the array we just grew out of.  So we
5966 		 * cons up a header and put it on a list of 'dirty'
5967 		 * pointer arrays.  (Dirty in the sense that there may
5968 		 * be suspended threads somewhere that are in the middle
5969 		 * of referencing them).  Fortunately, we -can- garbage
5970 		 * collect it all at ddi_soft_state_fini time.
5971 		 */
5972 		new_n_items = ss->n_items;
5973 		while (new_n_items < (1 + item))
5974 			new_n_items <<= 1;	/* double array size .. */
5975 
5976 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
5977 
5978 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
5979 		    KM_SLEEP);
5980 		/*
5981 		 * Copy the pointers into the new array
5982 		 */
5983 		bcopy(array, new_array, ss->n_items * sizeof (void *));
5984 
5985 		/*
5986 		 * Save the old array on the dirty list
5987 		 */
5988 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
5989 		dirty->array = ss->array;
5990 		dirty->n_items = ss->n_items;
5991 		dirty->next = ss->next;
5992 		ss->next = dirty;
5993 
5994 		ss->array = (array = new_array);
5995 		ss->n_items = new_n_items;
5996 	}
5997 
5998 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
5999 
6000 	array[item] = new_element;
6001 
6002 	mutex_exit(&ss->lock);
6003 	return (DDI_SUCCESS);
6004 }
6005 
6006 /*
6007  * Fetch a pointer to the allocated soft state structure.
6008  *
6009  * This is designed to be cheap.
6010  *
6011  * There's an argument that there should be more checking for
6012  * nil pointers and out of bounds on the array.. but we do a lot
6013  * of that in the alloc/free routines.
6014  *
6015  * An array has the convenience that we don't need to lock read-access
6016  * to it c.f. a linked list.  However our "expanding array" strategy
6017  * means that we should hold a readers lock on the i_ddi_soft_state
6018  * structure.
6019  *
6020  * However, from a performance viewpoint, we need to do it without
6021  * any locks at all -- this also makes it a leaf routine.  The algorithm
6022  * is 'lock-free' because we only discard the pointer arrays at
6023  * ddi_soft_state_fini() time.
6024  */
6025 void *
ddi_get_soft_state(void * state,int item)6026 ddi_get_soft_state(void *state, int item)
6027 {
6028 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6029 
6030 	ASSERT((ss != NULL) && (item >= 0));
6031 
6032 	if (item < ss->n_items && ss->array != NULL)
6033 		return (ss->array[item]);
6034 	return (NULL);
6035 }
6036 
6037 /*
6038  * Free the state structure corresponding to 'item.'   Freeing an
6039  * element that has either gone or was never allocated is not
6040  * considered an error.  Note that we free the state structure, but
6041  * we don't shrink our pointer array, or discard 'dirty' arrays,
6042  * since even a few pointers don't really waste too much memory.
6043  *
6044  * Passing an item number that is out of bounds, or a null pointer will
6045  * provoke an error message.
6046  */
6047 void
ddi_soft_state_free(void * state,int item)6048 ddi_soft_state_free(void *state, int item)
6049 {
6050 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6051 	void			**array;
6052 	void			*element;
6053 	static char		msg[] = "ddi_soft_state_free:";
6054 
6055 	if (ss == NULL) {
6056 		cmn_err(CE_WARN, "%s null handle: %s",
6057 		    msg, mod_containing_pc(caller()));
6058 		return;
6059 	}
6060 
6061 	element = NULL;
6062 
6063 	mutex_enter(&ss->lock);
6064 
6065 	if ((array = ss->array) == NULL || ss->size == 0) {
6066 		cmn_err(CE_WARN, "%s bad handle: %s",
6067 		    msg, mod_containing_pc(caller()));
6068 	} else if (item < 0 || item >= ss->n_items) {
6069 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6070 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6071 	} else if (array[item] != NULL) {
6072 		element = array[item];
6073 		array[item] = NULL;
6074 	}
6075 
6076 	mutex_exit(&ss->lock);
6077 
6078 	if (element)
6079 		kmem_free(element, ss->size);
6080 }
6081 
6082 /*
6083  * Free the entire set of pointers, and any
6084  * soft state structures contained therein.
6085  *
6086  * Note that we don't grab the ss->lock mutex, even though
6087  * we're inspecting the various fields of the data structure.
6088  *
6089  * There is an implicit assumption that this routine will
6090  * never run concurrently with any of the above on this
6091  * particular state structure i.e. by the time the driver
6092  * calls this routine, there should be no other threads
6093  * running in the driver.
6094  */
6095 void
ddi_soft_state_fini(void ** state_p)6096 ddi_soft_state_fini(void **state_p)
6097 {
6098 	i_ddi_soft_state	*ss, *dirty;
6099 	int			item;
6100 	static char		msg[] = "ddi_soft_state_fini:";
6101 
6102 	if (state_p == NULL ||
6103 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6104 		cmn_err(CE_WARN, "%s null handle: %s",
6105 		    msg, mod_containing_pc(caller()));
6106 		return;
6107 	}
6108 
6109 	if (ss->size == 0) {
6110 		cmn_err(CE_WARN, "%s bad handle: %s",
6111 		    msg, mod_containing_pc(caller()));
6112 		return;
6113 	}
6114 
6115 	if (ss->n_items > 0) {
6116 		for (item = 0; item < ss->n_items; item++)
6117 			ddi_soft_state_free(ss, item);
6118 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6119 	}
6120 
6121 	/*
6122 	 * Now delete any dirty arrays from previous 'grow' operations
6123 	 */
6124 	for (dirty = ss->next; dirty; dirty = ss->next) {
6125 		ss->next = dirty->next;
6126 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6127 		kmem_free(dirty, sizeof (*dirty));
6128 	}
6129 
6130 	mutex_destroy(&ss->lock);
6131 	kmem_free(ss, sizeof (*ss));
6132 
6133 	*state_p = NULL;
6134 }
6135 
6136 #define	SS_N_ITEMS_PER_HASH	16
6137 #define	SS_MIN_HASH_SZ		16
6138 #define	SS_MAX_HASH_SZ		4096
6139 
6140 int
ddi_soft_state_bystr_init(ddi_soft_state_bystr ** state_p,size_t size,int n_items)6141 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6142     int n_items)
6143 {
6144 	i_ddi_soft_state_bystr	*sss;
6145 	int			hash_sz;
6146 
6147 	ASSERT(state_p && size && n_items);
6148 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6149 		return (EINVAL);
6150 
6151 	/* current implementation is based on hash, convert n_items to hash */
6152 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6153 	if (hash_sz < SS_MIN_HASH_SZ)
6154 		hash_sz = SS_MIN_HASH_SZ;
6155 	else if (hash_sz > SS_MAX_HASH_SZ)
6156 		hash_sz = SS_MAX_HASH_SZ;
6157 
6158 	/* allocate soft_state pool */
6159 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6160 	sss->ss_size = size;
6161 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6162 	    hash_sz, mod_hash_null_valdtor);
6163 	*state_p = (ddi_soft_state_bystr *)sss;
6164 	return (0);
6165 }
6166 
6167 int
ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr * state,const char * str)6168 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6169 {
6170 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6171 	void			*sso;
6172 	char			*dup_str;
6173 
6174 	ASSERT(sss && str && sss->ss_mod_hash);
6175 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6176 		return (DDI_FAILURE);
6177 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6178 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6179 	if (mod_hash_insert(sss->ss_mod_hash,
6180 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6181 		return (DDI_SUCCESS);
6182 
6183 	/*
6184 	 * The only error from an strhash insert is caused by a duplicate key.
6185 	 * We refuse to tread on an existing elements, so free and fail.
6186 	 */
6187 	kmem_free(dup_str, strlen(dup_str) + 1);
6188 	kmem_free(sso, sss->ss_size);
6189 	return (DDI_FAILURE);
6190 }
6191 
6192 void *
ddi_soft_state_bystr_get(ddi_soft_state_bystr * state,const char * str)6193 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6194 {
6195 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6196 	void			*sso;
6197 
6198 	ASSERT(sss && str && sss->ss_mod_hash);
6199 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6200 		return (NULL);
6201 
6202 	if (mod_hash_find(sss->ss_mod_hash,
6203 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6204 		return (sso);
6205 	return (NULL);
6206 }
6207 
6208 void
ddi_soft_state_bystr_free(ddi_soft_state_bystr * state,const char * str)6209 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6210 {
6211 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6212 	void			*sso;
6213 
6214 	ASSERT(sss && str && sss->ss_mod_hash);
6215 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6216 		return;
6217 
6218 	(void) mod_hash_remove(sss->ss_mod_hash,
6219 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6220 	kmem_free(sso, sss->ss_size);
6221 }
6222 
6223 void
ddi_soft_state_bystr_fini(ddi_soft_state_bystr ** state_p)6224 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6225 {
6226 	i_ddi_soft_state_bystr	*sss;
6227 
6228 	ASSERT(state_p);
6229 	if (state_p == NULL)
6230 		return;
6231 
6232 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6233 	if (sss == NULL)
6234 		return;
6235 
6236 	ASSERT(sss->ss_mod_hash);
6237 	if (sss->ss_mod_hash) {
6238 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6239 		sss->ss_mod_hash = NULL;
6240 	}
6241 
6242 	kmem_free(sss, sizeof (*sss));
6243 	*state_p = NULL;
6244 }
6245 
6246 /*
6247  * The ddi_strid_* routines provide string-to-index management utilities.
6248  */
6249 /* allocate and initialize an strid set */
6250 int
ddi_strid_init(ddi_strid ** strid_p,int n_items)6251 ddi_strid_init(ddi_strid **strid_p, int n_items)
6252 {
6253 	i_ddi_strid	*ss;
6254 	int		hash_sz;
6255 
6256 	if (strid_p == NULL)
6257 		return (DDI_FAILURE);
6258 
6259 	/* current implementation is based on hash, convert n_items to hash */
6260 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6261 	if (hash_sz < SS_MIN_HASH_SZ)
6262 		hash_sz = SS_MIN_HASH_SZ;
6263 	else if (hash_sz > SS_MAX_HASH_SZ)
6264 		hash_sz = SS_MAX_HASH_SZ;
6265 
6266 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6267 	ss->strid_chunksz = n_items;
6268 	ss->strid_spacesz = n_items;
6269 	ss->strid_space = id_space_create("strid", 1, n_items);
6270 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6271 	    mod_hash_null_valdtor);
6272 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6273 	    mod_hash_null_valdtor);
6274 	*strid_p = (ddi_strid *)ss;
6275 	return (DDI_SUCCESS);
6276 }
6277 
6278 /* allocate an id mapping within the specified set for str, return id */
6279 static id_t
i_ddi_strid_alloc(ddi_strid * strid,char * str)6280 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6281 {
6282 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6283 	id_t		id;
6284 	char		*s;
6285 
6286 	ASSERT(ss && str);
6287 	if ((ss == NULL) || (str == NULL))
6288 		return (0);
6289 
6290 	/*
6291 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6292 	 * range as compressed as possible.  This is important to minimize
6293 	 * the amount of space used when the id is used as a ddi_soft_state
6294 	 * index by the caller.
6295 	 *
6296 	 * If the id list is exhausted, increase the size of the list
6297 	 * by the chuck size specified in ddi_strid_init and reattempt
6298 	 * the allocation
6299 	 */
6300 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6301 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6302 		    ss->strid_spacesz + ss->strid_chunksz);
6303 		ss->strid_spacesz += ss->strid_chunksz;
6304 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6305 			return (0);
6306 	}
6307 
6308 	/*
6309 	 * NOTE: since we create and destroy in unison we can save space by
6310 	 * using bystr key as the byid value.  This means destroy must occur
6311 	 * in (byid, bystr) order.
6312 	 */
6313 	s = i_ddi_strdup(str, KM_SLEEP);
6314 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6315 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6316 		ddi_strid_free(strid, id);
6317 		return (0);
6318 	}
6319 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6320 	    (mod_hash_val_t)s) != 0) {
6321 		ddi_strid_free(strid, id);
6322 		return (0);
6323 	}
6324 
6325 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6326 	return (id);
6327 }
6328 
6329 /* allocate an id mapping within the specified set for str, return id */
6330 id_t
ddi_strid_alloc(ddi_strid * strid,char * str)6331 ddi_strid_alloc(ddi_strid *strid, char *str)
6332 {
6333 	return (i_ddi_strid_alloc(strid, str));
6334 }
6335 
6336 /* return the id within the specified strid given the str */
6337 id_t
ddi_strid_str2id(ddi_strid * strid,char * str)6338 ddi_strid_str2id(ddi_strid *strid, char *str)
6339 {
6340 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6341 	id_t		id = 0;
6342 	mod_hash_val_t	hv;
6343 
6344 	ASSERT(ss && str);
6345 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6346 	    (mod_hash_key_t)str, &hv) == 0))
6347 		id = (int)(intptr_t)hv;
6348 	return (id);
6349 }
6350 
6351 /* return str within the specified strid given the id */
6352 char *
ddi_strid_id2str(ddi_strid * strid,id_t id)6353 ddi_strid_id2str(ddi_strid *strid, id_t id)
6354 {
6355 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6356 	char		*str = NULL;
6357 	mod_hash_val_t	hv;
6358 
6359 	ASSERT(ss && id > 0);
6360 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6361 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6362 		str = (char *)hv;
6363 	return (str);
6364 }
6365 
6366 /* free the id mapping within the specified strid */
6367 void
ddi_strid_free(ddi_strid * strid,id_t id)6368 ddi_strid_free(ddi_strid *strid, id_t id)
6369 {
6370 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6371 	char		*str;
6372 
6373 	ASSERT(ss && id > 0);
6374 	if ((ss == NULL) || (id <= 0))
6375 		return;
6376 
6377 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6378 	str = ddi_strid_id2str(strid, id);
6379 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6380 	id_free(ss->strid_space, id);
6381 
6382 	if (str)
6383 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6384 }
6385 
6386 /* destroy the strid set */
6387 void
ddi_strid_fini(ddi_strid ** strid_p)6388 ddi_strid_fini(ddi_strid **strid_p)
6389 {
6390 	i_ddi_strid	*ss;
6391 
6392 	ASSERT(strid_p);
6393 	if (strid_p == NULL)
6394 		return;
6395 
6396 	ss = (i_ddi_strid *)(*strid_p);
6397 	if (ss == NULL)
6398 		return;
6399 
6400 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6401 	if (ss->strid_byid)
6402 		mod_hash_destroy_hash(ss->strid_byid);
6403 	if (ss->strid_byid)
6404 		mod_hash_destroy_hash(ss->strid_bystr);
6405 	if (ss->strid_space)
6406 		id_space_destroy(ss->strid_space);
6407 	kmem_free(ss, sizeof (*ss));
6408 	*strid_p = NULL;
6409 }
6410 
6411 /*
6412  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6413  * Storage is double buffered to prevent updates during devi_addr use -
6414  * double buffering is adaquate for reliable ddi_deviname() consumption.
6415  * The double buffer is not freed until dev_info structure destruction
6416  * (by i_ddi_free_node).
6417  */
6418 void
ddi_set_name_addr(dev_info_t * dip,char * name)6419 ddi_set_name_addr(dev_info_t *dip, char *name)
6420 {
6421 	char	*buf = DEVI(dip)->devi_addr_buf;
6422 	char	*newaddr;
6423 
6424 	if (buf == NULL) {
6425 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6426 		DEVI(dip)->devi_addr_buf = buf;
6427 	}
6428 
6429 	if (name) {
6430 		ASSERT(strlen(name) < MAXNAMELEN);
6431 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6432 		    (buf + MAXNAMELEN) : buf;
6433 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6434 	} else
6435 		newaddr = NULL;
6436 
6437 	DEVI(dip)->devi_addr = newaddr;
6438 }
6439 
6440 char *
ddi_get_name_addr(dev_info_t * dip)6441 ddi_get_name_addr(dev_info_t *dip)
6442 {
6443 	return (DEVI(dip)->devi_addr);
6444 }
6445 
6446 void
ddi_set_parent_data(dev_info_t * dip,void * pd)6447 ddi_set_parent_data(dev_info_t *dip, void *pd)
6448 {
6449 	DEVI(dip)->devi_parent_data = pd;
6450 }
6451 
6452 void *
ddi_get_parent_data(dev_info_t * dip)6453 ddi_get_parent_data(dev_info_t *dip)
6454 {
6455 	return (DEVI(dip)->devi_parent_data);
6456 }
6457 
6458 /*
6459  * ddi_name_to_major: returns the major number of a named module,
6460  * derived from the current driver alias binding.
6461  *
6462  * Caveat: drivers should avoid the use of this function, in particular
6463  * together with ddi_get_name/ddi_binding name, as per
6464  *	major = ddi_name_to_major(ddi_get_name(devi));
6465  * ddi_name_to_major() relies on the state of the device/alias binding,
6466  * which can and does change dynamically as aliases are administered
6467  * over time.  An attached device instance cannot rely on the major
6468  * number returned by ddi_name_to_major() to match its own major number.
6469  *
6470  * For driver use, ddi_driver_major() reliably returns the major number
6471  * for the module to which the device was bound at attach time over
6472  * the life of the instance.
6473  *	major = ddi_driver_major(dev_info_t *)
6474  */
6475 major_t
ddi_name_to_major(const char * name)6476 ddi_name_to_major(const char *name)
6477 {
6478 	return (mod_name_to_major(name));
6479 }
6480 
6481 /*
6482  * ddi_major_to_name: Returns the module name bound to a major number.
6483  */
6484 char *
ddi_major_to_name(major_t major)6485 ddi_major_to_name(major_t major)
6486 {
6487 	return (mod_major_to_name(major));
6488 }
6489 
6490 /*
6491  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6492  * pointed at by 'name.'  A devinfo node is named as a result of calling
6493  * ddi_initchild().
6494  *
6495  * Note: the driver must be held before calling this function!
6496  */
6497 char *
ddi_deviname(dev_info_t * dip,char * name)6498 ddi_deviname(dev_info_t *dip, char *name)
6499 {
6500 	char *addrname;
6501 	char none = '\0';
6502 
6503 	if (dip == ddi_root_node()) {
6504 		*name = '\0';
6505 		return (name);
6506 	}
6507 
6508 	if (i_ddi_node_state(dip) < DS_BOUND) {
6509 		addrname = &none;
6510 	} else {
6511 		/*
6512 		 * Use ddi_get_name_addr() without checking state so we get
6513 		 * a unit-address if we are called after ddi_set_name_addr()
6514 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6515 		 * node promotion to DS_INITIALIZED.  We currently have
6516 		 * two situations where we are called in this state:
6517 		 *   o  For framework processing of a path-oriented alias.
6518 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6519 		 *	from it's tran_tgt_init(9E) implementation.
6520 		 */
6521 		addrname = ddi_get_name_addr(dip);
6522 		if (addrname == NULL)
6523 			addrname = &none;
6524 	}
6525 
6526 	if (*addrname == '\0') {
6527 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6528 	} else {
6529 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6530 	}
6531 
6532 	return (name);
6533 }
6534 
6535 /*
6536  * Spits out the name of device node, typically name@addr, for a given node,
6537  * using the driver name, not the nodename.
6538  *
6539  * Used by match_parent. Not to be used elsewhere.
6540  */
6541 char *
i_ddi_parname(dev_info_t * dip,char * name)6542 i_ddi_parname(dev_info_t *dip, char *name)
6543 {
6544 	char *addrname;
6545 
6546 	if (dip == ddi_root_node()) {
6547 		*name = '\0';
6548 		return (name);
6549 	}
6550 
6551 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6552 
6553 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6554 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6555 	else
6556 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6557 	return (name);
6558 }
6559 
6560 static char *
pathname_work(dev_info_t * dip,char * path)6561 pathname_work(dev_info_t *dip, char *path)
6562 {
6563 	char *bp;
6564 
6565 	if (dip == ddi_root_node()) {
6566 		*path = '\0';
6567 		return (path);
6568 	}
6569 	(void) pathname_work(ddi_get_parent(dip), path);
6570 	bp = path + strlen(path);
6571 	(void) ddi_deviname(dip, bp);
6572 	return (path);
6573 }
6574 
6575 char *
ddi_pathname(dev_info_t * dip,char * path)6576 ddi_pathname(dev_info_t *dip, char *path)
6577 {
6578 	return (pathname_work(dip, path));
6579 }
6580 
6581 char *
ddi_pathname_minor(struct ddi_minor_data * dmdp,char * path)6582 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6583 {
6584 	if (dmdp->dip == NULL)
6585 		*path = '\0';
6586 	else {
6587 		(void) ddi_pathname(dmdp->dip, path);
6588 		if (dmdp->ddm_name) {
6589 			(void) strcat(path, ":");
6590 			(void) strcat(path, dmdp->ddm_name);
6591 		}
6592 	}
6593 	return (path);
6594 }
6595 
6596 static char *
pathname_work_obp(dev_info_t * dip,char * path)6597 pathname_work_obp(dev_info_t *dip, char *path)
6598 {
6599 	char *bp;
6600 	char *obp_path;
6601 
6602 	/*
6603 	 * look up the "obp-path" property, return the path if it exists
6604 	 */
6605 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6606 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6607 		(void) strcpy(path, obp_path);
6608 		ddi_prop_free(obp_path);
6609 		return (path);
6610 	}
6611 
6612 	/*
6613 	 * stop at root, no obp path
6614 	 */
6615 	if (dip == ddi_root_node()) {
6616 		return (NULL);
6617 	}
6618 
6619 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6620 	if (obp_path == NULL)
6621 		return (NULL);
6622 
6623 	/*
6624 	 * append our component to parent's obp path
6625 	 */
6626 	bp = path + strlen(path);
6627 	if (*(bp - 1) != '/')
6628 		(void) strcat(bp++, "/");
6629 	(void) ddi_deviname(dip, bp);
6630 	return (path);
6631 }
6632 
6633 /*
6634  * return the 'obp-path' based path for the given node, or NULL if the node
6635  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6636  * function can't be called from interrupt context (since we need to
6637  * lookup a string property).
6638  */
6639 char *
ddi_pathname_obp(dev_info_t * dip,char * path)6640 ddi_pathname_obp(dev_info_t *dip, char *path)
6641 {
6642 	ASSERT(!servicing_interrupt());
6643 	if (dip == NULL || path == NULL)
6644 		return (NULL);
6645 
6646 	/* split work into a separate function to aid debugging */
6647 	return (pathname_work_obp(dip, path));
6648 }
6649 
6650 int
ddi_pathname_obp_set(dev_info_t * dip,char * component)6651 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6652 {
6653 	dev_info_t *pdip;
6654 	char *obp_path = NULL;
6655 	int rc = DDI_FAILURE;
6656 
6657 	if (dip == NULL)
6658 		return (DDI_FAILURE);
6659 
6660 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6661 
6662 	pdip = ddi_get_parent(dip);
6663 
6664 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6665 		(void) ddi_pathname(pdip, obp_path);
6666 	}
6667 
6668 	if (component) {
6669 		(void) strncat(obp_path, "/", MAXPATHLEN);
6670 		(void) strncat(obp_path, component, MAXPATHLEN);
6671 	}
6672 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6673 	    obp_path);
6674 
6675 	if (obp_path)
6676 		kmem_free(obp_path, MAXPATHLEN);
6677 
6678 	return (rc);
6679 }
6680 
6681 /*
6682  * Given a dev_t, return the pathname of the corresponding device in the
6683  * buffer pointed at by "path."  The buffer is assumed to be large enough
6684  * to hold the pathname of the device (MAXPATHLEN).
6685  *
6686  * The pathname of a device is the pathname of the devinfo node to which
6687  * the device "belongs," concatenated with the character ':' and the name
6688  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6689  * just the pathname of the devinfo node is returned without driving attach
6690  * of that node.  For a non-zero spec_type, an attach is performed and a
6691  * search of the minor list occurs.
6692  *
6693  * It is possible that the path associated with the dev_t is not
6694  * currently available in the devinfo tree.  In order to have a
6695  * dev_t, a device must have been discovered before, which means
6696  * that the path is always in the instance tree.  The one exception
6697  * to this is if the dev_t is associated with a pseudo driver, in
6698  * which case the device must exist on the pseudo branch of the
6699  * devinfo tree as a result of parsing .conf files.
6700  */
6701 int
ddi_dev_pathname(dev_t devt,int spec_type,char * path)6702 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6703 {
6704 	major_t		major = getmajor(devt);
6705 	int		instance;
6706 	dev_info_t	*dip;
6707 	char		*minorname;
6708 	char		*drvname;
6709 
6710 	if (major >= devcnt)
6711 		goto fail;
6712 	if (major == clone_major) {
6713 		/* clone has no minor nodes, manufacture the path here */
6714 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6715 			goto fail;
6716 
6717 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6718 		return (DDI_SUCCESS);
6719 	}
6720 
6721 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6722 	if ((instance = dev_to_instance(devt)) == -1)
6723 		goto fail;
6724 
6725 	/* reconstruct the path given the major/instance */
6726 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6727 		goto fail;
6728 
6729 	/* if spec_type given we must drive attach and search minor nodes */
6730 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6731 		/* attach the path so we can search minors */
6732 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6733 			goto fail;
6734 
6735 		/* Add minorname to path. */
6736 		ndi_devi_enter(dip);
6737 		minorname = i_ddi_devtspectype_to_minorname(dip,
6738 		    devt, spec_type);
6739 		if (minorname) {
6740 			(void) strcat(path, ":");
6741 			(void) strcat(path, minorname);
6742 		}
6743 		ndi_devi_exit(dip);
6744 		ddi_release_devi(dip);
6745 		if (minorname == NULL)
6746 			goto fail;
6747 	}
6748 	ASSERT(strlen(path) < MAXPATHLEN);
6749 	return (DDI_SUCCESS);
6750 
6751 fail:	*path = 0;
6752 	return (DDI_FAILURE);
6753 }
6754 
6755 /*
6756  * Given a major number and an instance, return the path.
6757  * This interface does NOT drive attach.
6758  */
6759 int
e_ddi_majorinstance_to_path(major_t major,int instance,char * path)6760 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6761 {
6762 	struct devnames *dnp;
6763 	dev_info_t	*dip;
6764 
6765 	if ((major >= devcnt) || (instance == -1)) {
6766 		*path = 0;
6767 		return (DDI_FAILURE);
6768 	}
6769 
6770 	/* look for the major/instance in the instance tree */
6771 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6772 	    path) == DDI_SUCCESS) {
6773 		ASSERT(strlen(path) < MAXPATHLEN);
6774 		return (DDI_SUCCESS);
6775 	}
6776 
6777 	/*
6778 	 * Not in instance tree, find the instance on the per driver list and
6779 	 * construct path to instance via ddi_pathname(). This is how paths
6780 	 * down the 'pseudo' branch are constructed.
6781 	 */
6782 	dnp = &(devnamesp[major]);
6783 	LOCK_DEV_OPS(&(dnp->dn_lock));
6784 	for (dip = dnp->dn_head; dip;
6785 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6786 		/* Skip if instance does not match. */
6787 		if (DEVI(dip)->devi_instance != instance)
6788 			continue;
6789 
6790 		/*
6791 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6792 		 * node demotion, so it is not an effective way of ensuring
6793 		 * that the ddi_pathname result has a unit-address.  Instead,
6794 		 * we reverify the node state after calling ddi_pathname().
6795 		 */
6796 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6797 			(void) ddi_pathname(dip, path);
6798 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6799 				continue;
6800 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6801 			ASSERT(strlen(path) < MAXPATHLEN);
6802 			return (DDI_SUCCESS);
6803 		}
6804 	}
6805 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6806 
6807 	/* can't reconstruct the path */
6808 	*path = 0;
6809 	return (DDI_FAILURE);
6810 }
6811 
6812 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6813 
6814 /*
6815  * Given the dip for a network interface return the ppa for that interface.
6816  *
6817  * In all cases except GLD v0 drivers, the ppa == instance.
6818  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6819  * So for these drivers when the attach routine calls gld_register(),
6820  * the GLD framework creates an integer property called "gld_driver_ppa"
6821  * that can be queried here.
6822  *
6823  * The only time this function is used is when a system is booting over nfs.
6824  * In this case the system has to resolve the pathname of the boot device
6825  * to it's ppa.
6826  */
6827 int
i_ddi_devi_get_ppa(dev_info_t * dip)6828 i_ddi_devi_get_ppa(dev_info_t *dip)
6829 {
6830 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6831 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6832 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6833 }
6834 
6835 /*
6836  * i_ddi_devi_set_ppa() should only be called from gld_register()
6837  * and only for GLD v0 drivers
6838  */
6839 void
i_ddi_devi_set_ppa(dev_info_t * dip,int ppa)6840 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6841 {
6842 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6843 }
6844 
6845 
6846 /*
6847  * Private DDI Console bell functions.
6848  */
6849 void
ddi_ring_console_bell(clock_t duration)6850 ddi_ring_console_bell(clock_t duration)
6851 {
6852 	if (ddi_console_bell_func != NULL)
6853 		(*ddi_console_bell_func)(duration);
6854 }
6855 
6856 void
ddi_set_console_bell(void (* bellfunc)(clock_t duration))6857 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6858 {
6859 	ddi_console_bell_func = bellfunc;
6860 }
6861 
6862 int
ddi_dma_alloc_handle(dev_info_t * dip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)6863 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6864     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6865 {
6866 	int (*funcp)() = ddi_dma_allochdl;
6867 	ddi_dma_attr_t dma_attr;
6868 	struct bus_ops *bop;
6869 
6870 	if (attr == (ddi_dma_attr_t *)0)
6871 		return (DDI_DMA_BADATTR);
6872 
6873 	dma_attr = *attr;
6874 
6875 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6876 	if (bop && bop->bus_dma_allochdl)
6877 		funcp = bop->bus_dma_allochdl;
6878 
6879 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6880 }
6881 
6882 void
ddi_dma_free_handle(ddi_dma_handle_t * handlep)6883 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6884 {
6885 	ddi_dma_handle_t h = *handlep;
6886 	(void) ddi_dma_freehdl(HD, HD, h);
6887 }
6888 
6889 static uintptr_t dma_mem_list_id = 0;
6890 
6891 
6892 int
ddi_dma_mem_alloc(ddi_dma_handle_t handle,size_t length,ddi_device_acc_attr_t * accattrp,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,caddr_t * kaddrp,size_t * real_length,ddi_acc_handle_t * handlep)6893 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6894     ddi_device_acc_attr_t *accattrp, uint_t flags,
6895     int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6896     size_t *real_length, ddi_acc_handle_t *handlep)
6897 {
6898 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6899 	dev_info_t *dip = hp->dmai_rdip;
6900 	ddi_acc_hdl_t *ap;
6901 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6902 	uint_t sleepflag, xfermodes;
6903 	int (*fp)(caddr_t);
6904 	int rval;
6905 
6906 	if (waitfp == DDI_DMA_SLEEP)
6907 		fp = (int (*)())KM_SLEEP;
6908 	else if (waitfp == DDI_DMA_DONTWAIT)
6909 		fp = (int (*)())KM_NOSLEEP;
6910 	else
6911 		fp = waitfp;
6912 	*handlep = impl_acc_hdl_alloc(fp, arg);
6913 	if (*handlep == NULL)
6914 		return (DDI_FAILURE);
6915 
6916 	/* check if the cache attributes are supported */
6917 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6918 		return (DDI_FAILURE);
6919 
6920 	/*
6921 	 * Transfer the meaningful bits to xfermodes.
6922 	 * Double-check if the 3rd party driver correctly sets the bits.
6923 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6924 	 */
6925 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6926 	if (xfermodes == 0) {
6927 		xfermodes = DDI_DMA_STREAMING;
6928 	}
6929 
6930 	/*
6931 	 * initialize the common elements of data access handle
6932 	 */
6933 	ap = impl_acc_hdl_get(*handlep);
6934 	ap->ah_vers = VERS_ACCHDL;
6935 	ap->ah_dip = dip;
6936 	ap->ah_offset = 0;
6937 	ap->ah_len = 0;
6938 	ap->ah_xfermodes = flags;
6939 	ap->ah_acc = *accattrp;
6940 
6941 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6942 	if (xfermodes == DDI_DMA_CONSISTENT) {
6943 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6944 		    flags, accattrp, kaddrp, NULL, ap);
6945 		*real_length = length;
6946 	} else {
6947 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6948 		    flags, accattrp, kaddrp, real_length, ap);
6949 	}
6950 	if (rval == DDI_SUCCESS) {
6951 		ap->ah_len = (off_t)(*real_length);
6952 		ap->ah_addr = *kaddrp;
6953 	} else {
6954 		impl_acc_hdl_free(*handlep);
6955 		*handlep = (ddi_acc_handle_t)NULL;
6956 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6957 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6958 		}
6959 		rval = DDI_FAILURE;
6960 	}
6961 	return (rval);
6962 }
6963 
6964 void
ddi_dma_mem_free(ddi_acc_handle_t * handlep)6965 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6966 {
6967 	ddi_acc_hdl_t *ap;
6968 
6969 	ap = impl_acc_hdl_get(*handlep);
6970 	ASSERT(ap);
6971 
6972 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6973 
6974 	/*
6975 	 * free the handle
6976 	 */
6977 	impl_acc_hdl_free(*handlep);
6978 	*handlep = (ddi_acc_handle_t)NULL;
6979 
6980 	if (dma_mem_list_id != 0) {
6981 		ddi_run_callback(&dma_mem_list_id);
6982 	}
6983 }
6984 
6985 int
ddi_dma_buf_bind_handle(ddi_dma_handle_t handle,struct buf * bp,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_cookie_t * cookiep,uint_t * ccountp)6986 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6987     uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6988     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6989 {
6990 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6991 	dev_info_t *dip, *rdip;
6992 	struct ddi_dma_req dmareq;
6993 	int (*funcp)();
6994 	ddi_dma_cookie_t cookie;
6995 	uint_t count;
6996 
6997 	if (cookiep == NULL)
6998 		cookiep = &cookie;
6999 
7000 	if (ccountp == NULL)
7001 		ccountp = &count;
7002 
7003 	dmareq.dmar_flags = flags;
7004 	dmareq.dmar_fp = waitfp;
7005 	dmareq.dmar_arg = arg;
7006 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7007 
7008 	if (bp->b_flags & B_PAGEIO) {
7009 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7010 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7011 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7012 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7013 	} else {
7014 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7015 		if (bp->b_flags & B_SHADOW) {
7016 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7017 			    bp->b_shadow;
7018 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7019 		} else {
7020 			dmareq.dmar_object.dmao_type =
7021 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7022 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7023 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7024 		}
7025 
7026 		/*
7027 		 * If the buffer has no proc pointer, or the proc
7028 		 * struct has the kernel address space, or the buffer has
7029 		 * been marked B_REMAPPED (meaning that it is now
7030 		 * mapped into the kernel's address space), then
7031 		 * the address space is kas (kernel address space).
7032 		 */
7033 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7034 		    (bp->b_flags & B_REMAPPED)) {
7035 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7036 		} else {
7037 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7038 			    bp->b_proc->p_as;
7039 		}
7040 	}
7041 
7042 	dip = rdip = hp->dmai_rdip;
7043 	if (dip != ddi_root_node())
7044 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7045 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7046 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7047 }
7048 
7049 int
ddi_dma_addr_bind_handle(ddi_dma_handle_t handle,struct as * as,caddr_t addr,size_t len,uint_t flags,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_cookie_t * cookiep,uint_t * ccountp)7050 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7051     caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7052     caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7053 {
7054 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7055 	dev_info_t *dip, *rdip;
7056 	struct ddi_dma_req dmareq;
7057 	int (*funcp)();
7058 	ddi_dma_cookie_t cookie;
7059 	uint_t count;
7060 
7061 	if (len == (uint_t)0) {
7062 		return (DDI_DMA_NOMAPPING);
7063 	}
7064 
7065 	if (cookiep == NULL)
7066 		cookiep = &cookie;
7067 
7068 	if (ccountp == NULL)
7069 		ccountp = &count;
7070 
7071 	dmareq.dmar_flags = flags;
7072 	dmareq.dmar_fp = waitfp;
7073 	dmareq.dmar_arg = arg;
7074 	dmareq.dmar_object.dmao_size = len;
7075 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7076 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7077 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7078 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7079 
7080 	dip = rdip = hp->dmai_rdip;
7081 	if (dip != ddi_root_node())
7082 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7083 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7084 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7085 }
7086 
7087 void
ddi_dma_nextcookie(ddi_dma_handle_t handle,ddi_dma_cookie_t * cookiep)7088 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7089 {
7090 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7091 	ddi_dma_cookie_t *cp;
7092 
7093 	if (hp->dmai_curcookie >= hp->dmai_ncookies) {
7094 		panic("ddi_dma_nextcookie() called too many times on handle %p",
7095 		    hp);
7096 	}
7097 
7098 	cp = hp->dmai_cookie;
7099 	ASSERT(cp);
7100 
7101 	cookiep->dmac_notused = cp->dmac_notused;
7102 	cookiep->dmac_type = cp->dmac_type;
7103 	cookiep->dmac_address = cp->dmac_address;
7104 	cookiep->dmac_size = cp->dmac_size;
7105 	hp->dmai_cookie++;
7106 	hp->dmai_curcookie++;
7107 }
7108 
7109 int
ddi_dma_ncookies(ddi_dma_handle_t handle)7110 ddi_dma_ncookies(ddi_dma_handle_t handle)
7111 {
7112 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7113 
7114 	return (hp->dmai_ncookies);
7115 }
7116 
7117 const ddi_dma_cookie_t *
ddi_dma_cookie_iter(ddi_dma_handle_t handle,const ddi_dma_cookie_t * iter)7118 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter)
7119 {
7120 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7121 	const ddi_dma_cookie_t *base, *end;
7122 
7123 	if (hp->dmai_ncookies == 0) {
7124 		return (NULL);
7125 	}
7126 
7127 	base = hp->dmai_cookie - hp->dmai_curcookie;
7128 	end = base + hp->dmai_ncookies;
7129 	if (iter == NULL) {
7130 		return (base);
7131 	}
7132 
7133 	if ((uintptr_t)iter < (uintptr_t)base ||
7134 	    (uintptr_t)iter >= (uintptr_t)end) {
7135 		return (NULL);
7136 	}
7137 
7138 	iter++;
7139 	if (iter == end) {
7140 		return (NULL);
7141 	}
7142 
7143 	return (iter);
7144 }
7145 
7146 const ddi_dma_cookie_t *
ddi_dma_cookie_get(ddi_dma_handle_t handle,uint_t index)7147 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index)
7148 {
7149 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7150 	const ddi_dma_cookie_t *base;
7151 
7152 	if (index >= hp->dmai_ncookies) {
7153 		return (NULL);
7154 	}
7155 
7156 	base = hp->dmai_cookie - hp->dmai_curcookie;
7157 	return (base + index);
7158 }
7159 
7160 const ddi_dma_cookie_t *
ddi_dma_cookie_one(ddi_dma_handle_t handle)7161 ddi_dma_cookie_one(ddi_dma_handle_t handle)
7162 {
7163 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7164 	const ddi_dma_cookie_t *base;
7165 
7166 	if (hp->dmai_ncookies != 1) {
7167 		panic("ddi_dma_cookie_one() called with improper handle %p",
7168 		    hp);
7169 	}
7170 	ASSERT3P(hp->dmai_cookie, !=, NULL);
7171 
7172 	base = hp->dmai_cookie - hp->dmai_curcookie;
7173 	return (base);
7174 }
7175 
7176 int
ddi_dma_numwin(ddi_dma_handle_t handle,uint_t * nwinp)7177 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7178 {
7179 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7180 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7181 		return (DDI_FAILURE);
7182 	} else {
7183 		*nwinp = hp->dmai_nwin;
7184 		return (DDI_SUCCESS);
7185 	}
7186 }
7187 
7188 int
ddi_dma_getwin(ddi_dma_handle_t h,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)7189 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7190     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7191 {
7192 	int (*funcp)() = ddi_dma_win;
7193 	struct bus_ops *bop;
7194 	ddi_dma_cookie_t cookie;
7195 	uint_t count;
7196 
7197 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7198 	if (bop && bop->bus_dma_win)
7199 		funcp = bop->bus_dma_win;
7200 
7201 	if (cookiep == NULL)
7202 		cookiep = &cookie;
7203 
7204 	if (ccountp == NULL)
7205 		ccountp = &count;
7206 
7207 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7208 }
7209 
7210 int
ddi_dma_set_sbus64(ddi_dma_handle_t h,ulong_t burstsizes)7211 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7212 {
7213 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7214 	    &burstsizes, 0, 0));
7215 }
7216 
7217 int
i_ddi_dma_fault_check(ddi_dma_impl_t * hp)7218 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7219 {
7220 	return (hp->dmai_fault);
7221 }
7222 
7223 int
ddi_check_dma_handle(ddi_dma_handle_t handle)7224 ddi_check_dma_handle(ddi_dma_handle_t handle)
7225 {
7226 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7227 	int (*check)(ddi_dma_impl_t *);
7228 
7229 	if ((check = hp->dmai_fault_check) == NULL)
7230 		check = i_ddi_dma_fault_check;
7231 
7232 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7233 }
7234 
7235 void
i_ddi_dma_set_fault(ddi_dma_handle_t handle)7236 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7237 {
7238 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7239 	void (*notify)(ddi_dma_impl_t *);
7240 
7241 	if (!hp->dmai_fault) {
7242 		hp->dmai_fault = 1;
7243 		if ((notify = hp->dmai_fault_notify) != NULL)
7244 			(*notify)(hp);
7245 	}
7246 }
7247 
7248 void
i_ddi_dma_clr_fault(ddi_dma_handle_t handle)7249 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7250 {
7251 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7252 	void (*notify)(ddi_dma_impl_t *);
7253 
7254 	if (hp->dmai_fault) {
7255 		hp->dmai_fault = 0;
7256 		if ((notify = hp->dmai_fault_notify) != NULL)
7257 			(*notify)(hp);
7258 	}
7259 }
7260 
7261 /*
7262  * register mapping routines.
7263  */
7264 int
ddi_regs_map_setup(dev_info_t * dip,uint_t rnumber,caddr_t * addrp,offset_t offset,offset_t len,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handle)7265 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7266     offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7267     ddi_acc_handle_t *handle)
7268 {
7269 	ddi_map_req_t mr;
7270 	ddi_acc_hdl_t *hp;
7271 	int result;
7272 
7273 	/*
7274 	 * Allocate and initialize the common elements of data access handle.
7275 	 */
7276 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7277 	hp = impl_acc_hdl_get(*handle);
7278 	hp->ah_vers = VERS_ACCHDL;
7279 	hp->ah_dip = dip;
7280 	hp->ah_rnumber = rnumber;
7281 	hp->ah_offset = offset;
7282 	hp->ah_len = len;
7283 	hp->ah_acc = *accattrp;
7284 
7285 	/*
7286 	 * Set up the mapping request and call to parent.
7287 	 */
7288 	mr.map_op = DDI_MO_MAP_LOCKED;
7289 	mr.map_type = DDI_MT_RNUMBER;
7290 	mr.map_obj.rnumber = rnumber;
7291 	mr.map_prot = PROT_READ | PROT_WRITE;
7292 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7293 	mr.map_handlep = hp;
7294 	mr.map_vers = DDI_MAP_VERSION;
7295 	result = ddi_map(dip, &mr, offset, len, addrp);
7296 
7297 	/*
7298 	 * check for end result
7299 	 */
7300 	if (result != DDI_SUCCESS) {
7301 		impl_acc_hdl_free(*handle);
7302 		*handle = (ddi_acc_handle_t)NULL;
7303 	} else {
7304 		hp->ah_addr = *addrp;
7305 	}
7306 
7307 	return (result);
7308 }
7309 
7310 void
ddi_regs_map_free(ddi_acc_handle_t * handlep)7311 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7312 {
7313 	ddi_map_req_t mr;
7314 	ddi_acc_hdl_t *hp;
7315 
7316 	hp = impl_acc_hdl_get(*handlep);
7317 	ASSERT(hp);
7318 
7319 	mr.map_op = DDI_MO_UNMAP;
7320 	mr.map_type = DDI_MT_RNUMBER;
7321 	mr.map_obj.rnumber = hp->ah_rnumber;
7322 	mr.map_prot = PROT_READ | PROT_WRITE;
7323 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7324 	mr.map_handlep = hp;
7325 	mr.map_vers = DDI_MAP_VERSION;
7326 
7327 	/*
7328 	 * Call my parent to unmap my regs.
7329 	 */
7330 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7331 	    hp->ah_len, &hp->ah_addr);
7332 	/*
7333 	 * free the handle
7334 	 */
7335 	impl_acc_hdl_free(*handlep);
7336 	*handlep = (ddi_acc_handle_t)NULL;
7337 }
7338 
7339 int
ddi_device_zero(ddi_acc_handle_t handle,caddr_t dev_addr,size_t bytecount,ssize_t dev_advcnt,uint_t dev_datasz)7340 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7341     ssize_t dev_advcnt, uint_t dev_datasz)
7342 {
7343 	uint8_t *b;
7344 	uint16_t *w;
7345 	uint32_t *l;
7346 	uint64_t *ll;
7347 
7348 	/* check for total byte count is multiple of data transfer size */
7349 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7350 		return (DDI_FAILURE);
7351 
7352 	switch (dev_datasz) {
7353 	case DDI_DATA_SZ01_ACC:
7354 		for (b = (uint8_t *)dev_addr;
7355 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7356 			ddi_put8(handle, b, 0);
7357 		break;
7358 	case DDI_DATA_SZ02_ACC:
7359 		for (w = (uint16_t *)dev_addr;
7360 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7361 			ddi_put16(handle, w, 0);
7362 		break;
7363 	case DDI_DATA_SZ04_ACC:
7364 		for (l = (uint32_t *)dev_addr;
7365 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7366 			ddi_put32(handle, l, 0);
7367 		break;
7368 	case DDI_DATA_SZ08_ACC:
7369 		for (ll = (uint64_t *)dev_addr;
7370 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7371 			ddi_put64(handle, ll, 0x0ll);
7372 		break;
7373 	default:
7374 		return (DDI_FAILURE);
7375 	}
7376 	return (DDI_SUCCESS);
7377 }
7378 
7379 int
ddi_device_copy(ddi_acc_handle_t src_handle,caddr_t src_addr,ssize_t src_advcnt,ddi_acc_handle_t dest_handle,caddr_t dest_addr,ssize_t dest_advcnt,size_t bytecount,uint_t dev_datasz)7380 ddi_device_copy(
7381 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7382 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7383 	size_t bytecount, uint_t dev_datasz)
7384 {
7385 	uint8_t *b_src, *b_dst;
7386 	uint16_t *w_src, *w_dst;
7387 	uint32_t *l_src, *l_dst;
7388 	uint64_t *ll_src, *ll_dst;
7389 
7390 	/* check for total byte count is multiple of data transfer size */
7391 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7392 		return (DDI_FAILURE);
7393 
7394 	switch (dev_datasz) {
7395 	case DDI_DATA_SZ01_ACC:
7396 		b_src = (uint8_t *)src_addr;
7397 		b_dst = (uint8_t *)dest_addr;
7398 
7399 		for (; bytecount != 0; bytecount -= 1) {
7400 			ddi_put8(dest_handle, b_dst,
7401 			    ddi_get8(src_handle, b_src));
7402 			b_dst += dest_advcnt;
7403 			b_src += src_advcnt;
7404 		}
7405 		break;
7406 	case DDI_DATA_SZ02_ACC:
7407 		w_src = (uint16_t *)src_addr;
7408 		w_dst = (uint16_t *)dest_addr;
7409 
7410 		for (; bytecount != 0; bytecount -= 2) {
7411 			ddi_put16(dest_handle, w_dst,
7412 			    ddi_get16(src_handle, w_src));
7413 			w_dst += dest_advcnt;
7414 			w_src += src_advcnt;
7415 		}
7416 		break;
7417 	case DDI_DATA_SZ04_ACC:
7418 		l_src = (uint32_t *)src_addr;
7419 		l_dst = (uint32_t *)dest_addr;
7420 
7421 		for (; bytecount != 0; bytecount -= 4) {
7422 			ddi_put32(dest_handle, l_dst,
7423 			    ddi_get32(src_handle, l_src));
7424 			l_dst += dest_advcnt;
7425 			l_src += src_advcnt;
7426 		}
7427 		break;
7428 	case DDI_DATA_SZ08_ACC:
7429 		ll_src = (uint64_t *)src_addr;
7430 		ll_dst = (uint64_t *)dest_addr;
7431 
7432 		for (; bytecount != 0; bytecount -= 8) {
7433 			ddi_put64(dest_handle, ll_dst,
7434 			    ddi_get64(src_handle, ll_src));
7435 			ll_dst += dest_advcnt;
7436 			ll_src += src_advcnt;
7437 		}
7438 		break;
7439 	default:
7440 		return (DDI_FAILURE);
7441 	}
7442 	return (DDI_SUCCESS);
7443 }
7444 
7445 #define	swap16(value)  \
7446 	((((value) & 0xff) << 8) | ((value) >> 8))
7447 
7448 #define	swap32(value)	\
7449 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7450 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7451 
7452 #define	swap64(value)	\
7453 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7454 	    << 32) | \
7455 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7456 
7457 uint16_t
ddi_swap16(uint16_t value)7458 ddi_swap16(uint16_t value)
7459 {
7460 	return (swap16(value));
7461 }
7462 
7463 uint32_t
ddi_swap32(uint32_t value)7464 ddi_swap32(uint32_t value)
7465 {
7466 	return (swap32(value));
7467 }
7468 
7469 uint64_t
ddi_swap64(uint64_t value)7470 ddi_swap64(uint64_t value)
7471 {
7472 	return (swap64(value));
7473 }
7474 
7475 /*
7476  * Convert a binding name to a driver name.
7477  * A binding name is the name used to determine the driver for a
7478  * device - it may be either an alias for the driver or the name
7479  * of the driver itself.
7480  */
7481 char *
i_binding_to_drv_name(char * bname)7482 i_binding_to_drv_name(char *bname)
7483 {
7484 	major_t major_no;
7485 
7486 	ASSERT(bname != NULL);
7487 
7488 	if ((major_no = ddi_name_to_major(bname)) == -1)
7489 		return (NULL);
7490 	return (ddi_major_to_name(major_no));
7491 }
7492 
7493 /*
7494  * Search for minor name that has specified dev_t and spec_type.
7495  * If spec_type is zero then any dev_t match works.  Since we
7496  * are returning a pointer to the minor name string, we require the
7497  * caller to do the locking.
7498  */
7499 char *
i_ddi_devtspectype_to_minorname(dev_info_t * dip,dev_t dev,int spec_type)7500 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7501 {
7502 	struct ddi_minor_data	*dmdp;
7503 
7504 	/*
7505 	 * The did layered driver currently intentionally returns a
7506 	 * devinfo ptr for an underlying sd instance based on a did
7507 	 * dev_t. In this case it is not an error.
7508 	 *
7509 	 * The did layered driver is associated with Sun Cluster.
7510 	 */
7511 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7512 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7513 
7514 	ASSERT(DEVI_BUSY_OWNED(dip));
7515 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7516 		if (((dmdp->type == DDM_MINOR) ||
7517 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7518 		    (dmdp->type == DDM_DEFAULT)) &&
7519 		    (dmdp->ddm_dev == dev) &&
7520 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7521 		    (dmdp->ddm_spec_type == spec_type)))
7522 			return (dmdp->ddm_name);
7523 	}
7524 
7525 	return (NULL);
7526 }
7527 
7528 /*
7529  * Find the devt and spectype of the specified minor_name.
7530  * Return DDI_FAILURE if minor_name not found. Since we are
7531  * returning everything via arguments we can do the locking.
7532  */
7533 int
i_ddi_minorname_to_devtspectype(dev_info_t * dip,const char * minor_name,dev_t * devtp,int * spectypep)7534 i_ddi_minorname_to_devtspectype(dev_info_t *dip, const char *minor_name,
7535     dev_t *devtp, int *spectypep)
7536 {
7537 	struct ddi_minor_data	*dmdp;
7538 
7539 	/* deal with clone minor nodes */
7540 	if (dip == clone_dip) {
7541 		major_t	major;
7542 		/*
7543 		 * Make sure minor_name is a STREAMS driver.
7544 		 * We load the driver but don't attach to any instances.
7545 		 */
7546 
7547 		major = ddi_name_to_major(minor_name);
7548 		if (major == DDI_MAJOR_T_NONE)
7549 			return (DDI_FAILURE);
7550 
7551 		if (ddi_hold_driver(major) == NULL)
7552 			return (DDI_FAILURE);
7553 
7554 		if (STREAMSTAB(major) == NULL) {
7555 			ddi_rele_driver(major);
7556 			return (DDI_FAILURE);
7557 		}
7558 		ddi_rele_driver(major);
7559 
7560 		if (devtp)
7561 			*devtp = makedevice(clone_major, (minor_t)major);
7562 
7563 		if (spectypep)
7564 			*spectypep = S_IFCHR;
7565 
7566 		return (DDI_SUCCESS);
7567 	}
7568 
7569 	ndi_devi_enter(dip);
7570 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7571 		if (((dmdp->type != DDM_MINOR) &&
7572 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7573 		    (dmdp->type != DDM_DEFAULT)) ||
7574 		    strcmp(minor_name, dmdp->ddm_name))
7575 			continue;
7576 
7577 		if (devtp)
7578 			*devtp = dmdp->ddm_dev;
7579 
7580 		if (spectypep)
7581 			*spectypep = dmdp->ddm_spec_type;
7582 
7583 		ndi_devi_exit(dip);
7584 		return (DDI_SUCCESS);
7585 	}
7586 	ndi_devi_exit(dip);
7587 
7588 	return (DDI_FAILURE);
7589 }
7590 
7591 static kmutex_t devid_gen_mutex;
7592 static short	devid_gen_number;
7593 
7594 #ifdef DEBUG
7595 
7596 static int	devid_register_corrupt = 0;
7597 static int	devid_register_corrupt_major = 0;
7598 static int	devid_register_corrupt_hint = 0;
7599 static int	devid_register_corrupt_hint_major = 0;
7600 
7601 static int devid_lyr_debug = 0;
7602 
7603 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7604 	if (devid_lyr_debug)					\
7605 		ddi_debug_devid_devts(msg, ndevs, devs)
7606 
7607 #else
7608 
7609 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7610 
7611 #endif /* DEBUG */
7612 
7613 
7614 #ifdef	DEBUG
7615 
7616 static void
ddi_debug_devid_devts(char * msg,int ndevs,dev_t * devs)7617 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7618 {
7619 	int i;
7620 
7621 	cmn_err(CE_CONT, "%s:\n", msg);
7622 	for (i = 0; i < ndevs; i++) {
7623 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7624 	}
7625 }
7626 
7627 static void
ddi_debug_devid_paths(char * msg,int npaths,char ** paths)7628 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7629 {
7630 	int i;
7631 
7632 	cmn_err(CE_CONT, "%s:\n", msg);
7633 	for (i = 0; i < npaths; i++) {
7634 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7635 	}
7636 }
7637 
7638 static void
ddi_debug_devid_devts_per_path(char * path,int ndevs,dev_t * devs)7639 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7640 {
7641 	int i;
7642 
7643 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7644 	for (i = 0; i < ndevs; i++) {
7645 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7646 	}
7647 }
7648 
7649 #endif	/* DEBUG */
7650 
7651 /*
7652  * Register device id into DDI framework.
7653  * Must be called when the driver is bound.
7654  */
7655 static int
i_ddi_devid_register(dev_info_t * dip,ddi_devid_t devid)7656 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7657 {
7658 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7659 	size_t		driver_len;
7660 	const char	*driver_name;
7661 	char		*devid_str;
7662 	major_t		major;
7663 
7664 	if ((dip == NULL) ||
7665 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7666 		return (DDI_FAILURE);
7667 
7668 	/* verify that the devid is valid */
7669 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7670 		return (DDI_FAILURE);
7671 
7672 	/* Updating driver name hint in devid */
7673 	driver_name = ddi_driver_name(dip);
7674 	driver_len = strlen(driver_name);
7675 	if (driver_len > DEVID_HINT_SIZE) {
7676 		/* Pick up last four characters of driver name */
7677 		driver_name += driver_len - DEVID_HINT_SIZE;
7678 		driver_len = DEVID_HINT_SIZE;
7679 	}
7680 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7681 	bcopy(driver_name, i_devid->did_driver, driver_len);
7682 
7683 #ifdef DEBUG
7684 	/* Corrupt the devid for testing. */
7685 	if (devid_register_corrupt)
7686 		i_devid->did_id[0] += devid_register_corrupt;
7687 	if (devid_register_corrupt_major &&
7688 	    (major == devid_register_corrupt_major))
7689 		i_devid->did_id[0] += 1;
7690 	if (devid_register_corrupt_hint)
7691 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7692 	if (devid_register_corrupt_hint_major &&
7693 	    (major == devid_register_corrupt_hint_major))
7694 		i_devid->did_driver[0] += 1;
7695 #endif /* DEBUG */
7696 
7697 	/* encode the devid as a string */
7698 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7699 		return (DDI_FAILURE);
7700 
7701 	/* add string as a string property */
7702 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7703 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7704 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7705 		    ddi_driver_name(dip), ddi_get_instance(dip));
7706 		ddi_devid_str_free(devid_str);
7707 		return (DDI_FAILURE);
7708 	}
7709 
7710 	/* keep pointer to devid string for interrupt context fma code */
7711 	if (DEVI(dip)->devi_devid_str)
7712 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7713 	DEVI(dip)->devi_devid_str = devid_str;
7714 	return (DDI_SUCCESS);
7715 }
7716 
7717 int
ddi_devid_register(dev_info_t * dip,ddi_devid_t devid)7718 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7719 {
7720 	int rval;
7721 
7722 	rval = i_ddi_devid_register(dip, devid);
7723 	if (rval == DDI_SUCCESS) {
7724 		/*
7725 		 * Register devid in devid-to-path cache
7726 		 */
7727 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7728 			mutex_enter(&DEVI(dip)->devi_lock);
7729 			DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7730 			mutex_exit(&DEVI(dip)->devi_lock);
7731 		} else if (ddi_get_name_addr(dip)) {
7732 			/*
7733 			 * We only expect cache_register DDI_FAILURE when we
7734 			 * can't form the full path because of NULL devi_addr.
7735 			 */
7736 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7737 			    ddi_driver_name(dip), ddi_get_instance(dip));
7738 		}
7739 	} else {
7740 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7741 		    ddi_driver_name(dip), ddi_get_instance(dip));
7742 	}
7743 	return (rval);
7744 }
7745 
7746 /*
7747  * Remove (unregister) device id from DDI framework.
7748  * Must be called when device is detached.
7749  */
7750 static void
i_ddi_devid_unregister(dev_info_t * dip)7751 i_ddi_devid_unregister(dev_info_t *dip)
7752 {
7753 	if (DEVI(dip)->devi_devid_str) {
7754 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7755 		DEVI(dip)->devi_devid_str = NULL;
7756 	}
7757 
7758 	/* remove the devid property */
7759 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7760 }
7761 
7762 void
ddi_devid_unregister(dev_info_t * dip)7763 ddi_devid_unregister(dev_info_t *dip)
7764 {
7765 	mutex_enter(&DEVI(dip)->devi_lock);
7766 	DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7767 	mutex_exit(&DEVI(dip)->devi_lock);
7768 	e_devid_cache_unregister(dip);
7769 	i_ddi_devid_unregister(dip);
7770 }
7771 
7772 /*
7773  * Allocate and initialize a device id.
7774  */
7775 int
ddi_devid_init(dev_info_t * dip,ushort_t devid_type,ushort_t nbytes,void * id,ddi_devid_t * ret_devid)7776 ddi_devid_init(
7777 	dev_info_t	*dip,
7778 	ushort_t	devid_type,
7779 	ushort_t	nbytes,
7780 	void		*id,
7781 	ddi_devid_t	*ret_devid)
7782 {
7783 	impl_devid_t	*i_devid;
7784 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7785 	int		driver_len;
7786 	const char	*driver_name;
7787 
7788 	switch (devid_type) {
7789 	case DEVID_SCSI3_WWN:
7790 		/*FALLTHRU*/
7791 	case DEVID_SCSI_SERIAL:
7792 		/*FALLTHRU*/
7793 	case DEVID_ATA_SERIAL:
7794 		/*FALLTHRU*/
7795 	case DEVID_NVME_NSID:
7796 		/*FALLTHRU*/
7797 	case DEVID_NVME_EUI64:
7798 		/*FALLTHRU*/
7799 	case DEVID_NVME_NGUID:
7800 		/*FALLTHRU*/
7801 	case DEVID_ENCAP:
7802 		if (nbytes == 0)
7803 			return (DDI_FAILURE);
7804 		if (id == NULL)
7805 			return (DDI_FAILURE);
7806 		break;
7807 	case DEVID_FAB:
7808 		if (nbytes != 0)
7809 			return (DDI_FAILURE);
7810 		if (id != NULL)
7811 			return (DDI_FAILURE);
7812 		nbytes = sizeof (int) +
7813 		    sizeof (struct timeval32) + sizeof (short);
7814 		sz += nbytes;
7815 		break;
7816 	default:
7817 		return (DDI_FAILURE);
7818 	}
7819 
7820 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7821 		return (DDI_FAILURE);
7822 
7823 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7824 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7825 	i_devid->did_rev_hi = DEVID_REV_MSB;
7826 	i_devid->did_rev_lo = DEVID_REV_LSB;
7827 	DEVID_FORMTYPE(i_devid, devid_type);
7828 	DEVID_FORMLEN(i_devid, nbytes);
7829 
7830 	/* Fill in driver name hint */
7831 	driver_name = ddi_driver_name(dip);
7832 	driver_len = strlen(driver_name);
7833 	if (driver_len > DEVID_HINT_SIZE) {
7834 		/* Pick up last four characters of driver name */
7835 		driver_name += driver_len - DEVID_HINT_SIZE;
7836 		driver_len = DEVID_HINT_SIZE;
7837 	}
7838 
7839 	bcopy(driver_name, i_devid->did_driver, driver_len);
7840 
7841 	/* Fill in id field */
7842 	if (devid_type == DEVID_FAB) {
7843 		char		*cp;
7844 		uint32_t	hostid;
7845 		struct timeval32 timestamp32;
7846 		int		i;
7847 		int		*ip;
7848 		short		gen;
7849 
7850 		/* increase the generation number */
7851 		mutex_enter(&devid_gen_mutex);
7852 		gen = devid_gen_number++;
7853 		mutex_exit(&devid_gen_mutex);
7854 
7855 		cp = i_devid->did_id;
7856 
7857 		/* Fill in host id (big-endian byte ordering) */
7858 		hostid = zone_get_hostid(NULL);
7859 		*cp++ = hibyte(hiword(hostid));
7860 		*cp++ = lobyte(hiword(hostid));
7861 		*cp++ = hibyte(loword(hostid));
7862 		*cp++ = lobyte(loword(hostid));
7863 
7864 		/*
7865 		 * Fill in timestamp (big-endian byte ordering)
7866 		 *
7867 		 * (Note that the format may have to be changed
7868 		 * before 2038 comes around, though it's arguably
7869 		 * unique enough as it is..)
7870 		 */
7871 		uniqtime32(&timestamp32);
7872 		ip = (int *)&timestamp32;
7873 		for (i = 0;
7874 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7875 			int	val;
7876 			val = *ip;
7877 			*cp++ = hibyte(hiword(val));
7878 			*cp++ = lobyte(hiword(val));
7879 			*cp++ = hibyte(loword(val));
7880 			*cp++ = lobyte(loword(val));
7881 		}
7882 
7883 		/* fill in the generation number */
7884 		*cp++ = hibyte(gen);
7885 		*cp++ = lobyte(gen);
7886 	} else
7887 		bcopy(id, i_devid->did_id, nbytes);
7888 
7889 	/* return device id */
7890 	*ret_devid = (ddi_devid_t)i_devid;
7891 	return (DDI_SUCCESS);
7892 }
7893 
7894 int
ddi_devid_get(dev_info_t * dip,ddi_devid_t * ret_devid)7895 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7896 {
7897 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7898 }
7899 
7900 int
i_ddi_devi_get_devid(dev_t dev,dev_info_t * dip,ddi_devid_t * ret_devid)7901 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7902 {
7903 	char		*devidstr;
7904 
7905 	ASSERT(dev != DDI_DEV_T_NONE);
7906 
7907 	/* look up the property, devt specific first */
7908 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7909 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7910 		if ((dev == DDI_DEV_T_ANY) ||
7911 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7912 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7913 		    DDI_PROP_SUCCESS)) {
7914 			return (DDI_FAILURE);
7915 		}
7916 	}
7917 
7918 	/* convert to binary form */
7919 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7920 		ddi_prop_free(devidstr);
7921 		return (DDI_FAILURE);
7922 	}
7923 	ddi_prop_free(devidstr);
7924 	return (DDI_SUCCESS);
7925 }
7926 
7927 /*
7928  * Return a copy of the device id for dev_t
7929  */
7930 int
ddi_lyr_get_devid(dev_t dev,ddi_devid_t * ret_devid)7931 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7932 {
7933 	dev_info_t	*dip;
7934 	int		rval;
7935 
7936 	/* get the dip */
7937 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7938 		return (DDI_FAILURE);
7939 
7940 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7941 
7942 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7943 	return (rval);
7944 }
7945 
7946 /*
7947  * Return a copy of the minor name for dev_t and spec_type
7948  */
7949 int
ddi_lyr_get_minor_name(dev_t dev,int spec_type,char ** minor_name)7950 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7951 {
7952 	char		*buf;
7953 	dev_info_t	*dip;
7954 	char		*nm;
7955 	int		rval;
7956 
7957 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7958 		*minor_name = NULL;
7959 		return (DDI_FAILURE);
7960 	}
7961 
7962 	/* Find the minor name and copy into max size buf */
7963 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7964 	ndi_devi_enter(dip);
7965 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7966 	if (nm)
7967 		(void) strcpy(buf, nm);
7968 	ndi_devi_exit(dip);
7969 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7970 
7971 	if (nm) {
7972 		/* duplicate into min size buf for return result */
7973 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7974 		rval = DDI_SUCCESS;
7975 	} else {
7976 		*minor_name = NULL;
7977 		rval = DDI_FAILURE;
7978 	}
7979 
7980 	/* free max size buf and return */
7981 	kmem_free(buf, MAXNAMELEN);
7982 	return (rval);
7983 }
7984 
7985 int
ddi_lyr_devid_to_devlist(ddi_devid_t devid,const char * minor_name,int * retndevs,dev_t ** retdevs)7986 ddi_lyr_devid_to_devlist(
7987 	ddi_devid_t	devid,
7988 	const char	*minor_name,
7989 	int		*retndevs,
7990 	dev_t		**retdevs)
7991 {
7992 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7993 
7994 	if (e_devid_cache_to_devt_list(devid, minor_name,
7995 	    retndevs, retdevs) == DDI_SUCCESS) {
7996 		ASSERT(*retndevs > 0);
7997 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7998 		    *retndevs, *retdevs);
7999 		return (DDI_SUCCESS);
8000 	}
8001 
8002 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8003 		return (DDI_FAILURE);
8004 	}
8005 
8006 	if (e_devid_cache_to_devt_list(devid, minor_name,
8007 	    retndevs, retdevs) == DDI_SUCCESS) {
8008 		ASSERT(*retndevs > 0);
8009 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8010 		    *retndevs, *retdevs);
8011 		return (DDI_SUCCESS);
8012 	}
8013 
8014 	return (DDI_FAILURE);
8015 }
8016 
8017 void
ddi_lyr_free_devlist(dev_t * devlist,int ndevs)8018 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8019 {
8020 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8021 }
8022 
8023 /*
8024  * Note: This will need to be fixed if we ever allow processes to
8025  * have more than one data model per exec.
8026  */
8027 model_t
ddi_mmap_get_model(void)8028 ddi_mmap_get_model(void)
8029 {
8030 	return (get_udatamodel());
8031 }
8032 
8033 model_t
ddi_model_convert_from(model_t model)8034 ddi_model_convert_from(model_t model)
8035 {
8036 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8037 }
8038 
8039 /*
8040  * ddi interfaces managing storage and retrieval of eventcookies.
8041  */
8042 
8043 /*
8044  * Invoke bus nexus driver's implementation of the
8045  * (*bus_remove_eventcall)() interface to remove a registered
8046  * callback handler for "event".
8047  */
8048 int
ddi_remove_event_handler(ddi_callback_id_t id)8049 ddi_remove_event_handler(ddi_callback_id_t id)
8050 {
8051 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8052 	dev_info_t *ddip;
8053 
8054 	ASSERT(cb);
8055 	if (!cb) {
8056 		return (DDI_FAILURE);
8057 	}
8058 
8059 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8060 	return (ndi_busop_remove_eventcall(ddip, id));
8061 }
8062 
8063 /*
8064  * Invoke bus nexus driver's implementation of the
8065  * (*bus_add_eventcall)() interface to register a callback handler
8066  * for "event".
8067  */
8068 int
ddi_add_event_handler(dev_info_t * dip,ddi_eventcookie_t event,void (* handler)(dev_info_t *,ddi_eventcookie_t,void *,void *),void * arg,ddi_callback_id_t * id)8069 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8070     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8071     void *arg, ddi_callback_id_t *id)
8072 {
8073 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8074 }
8075 
8076 
8077 /*
8078  * Return a handle for event "name" by calling up the device tree
8079  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8080  * by a bus nexus or top of dev_info tree is reached.
8081  */
8082 int
ddi_get_eventcookie(dev_info_t * dip,char * name,ddi_eventcookie_t * event_cookiep)8083 ddi_get_eventcookie(dev_info_t *dip, char *name,
8084     ddi_eventcookie_t *event_cookiep)
8085 {
8086 	return (ndi_busop_get_eventcookie(dip, dip,
8087 	    name, event_cookiep));
8088 }
8089 
8090 /*
8091  * This procedure is provided as the general callback function when
8092  * umem_lockmemory calls as_add_callback for long term memory locking.
8093  * When as_unmap, as_setprot, or as_free encounter segments which have
8094  * locked memory, this callback will be invoked.
8095  */
8096 void
umem_lock_undo(struct as * as,void * arg,uint_t event)8097 umem_lock_undo(struct as *as, void *arg, uint_t event)
8098 {
8099 	_NOTE(ARGUNUSED(as, event))
8100 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8101 
8102 	/*
8103 	 * Call the cleanup function.  Decrement the cookie reference
8104 	 * count, if it goes to zero, return the memory for the cookie.
8105 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8106 	 * called already.  It is the responsibility of the caller of
8107 	 * umem_lockmemory to handle the case of the cleanup routine
8108 	 * being called after a ddi_umem_unlock for the cookie
8109 	 * was called.
8110 	 */
8111 
8112 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8113 
8114 	/* remove the cookie if reference goes to zero */
8115 	if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8116 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8117 	}
8118 }
8119 
8120 /*
8121  * The following two Consolidation Private routines provide generic
8122  * interfaces to increase/decrease the amount of device-locked memory.
8123  *
8124  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8125  * must be called every time i_ddi_incr_locked_memory() is called.
8126  */
8127 int
8128 /* ARGSUSED */
i_ddi_incr_locked_memory(proc_t * procp,rctl_qty_t inc)8129 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8130 {
8131 	ASSERT(procp != NULL);
8132 	mutex_enter(&procp->p_lock);
8133 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8134 		mutex_exit(&procp->p_lock);
8135 		return (ENOMEM);
8136 	}
8137 	mutex_exit(&procp->p_lock);
8138 	return (0);
8139 }
8140 
8141 /*
8142  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8143  * must be called every time i_ddi_decr_locked_memory() is called.
8144  */
8145 /* ARGSUSED */
8146 void
i_ddi_decr_locked_memory(proc_t * procp,rctl_qty_t dec)8147 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8148 {
8149 	ASSERT(procp != NULL);
8150 	mutex_enter(&procp->p_lock);
8151 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8152 	mutex_exit(&procp->p_lock);
8153 }
8154 
8155 /*
8156  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8157  * charge device locked memory to the max-locked-memory rctl.  Tracking
8158  * device locked memory causes the rctl locks to get hot under high-speed
8159  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8160  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8161  * flag tells us if the rctl value should be updated when unlocking the memory,
8162  * in case the rctl gets changed after the memory was locked.  Any device
8163  * locked memory in that rare case will not be counted toward the rctl limit.
8164  *
8165  * When tracking the locked memory, the kproject_t parameter is always NULL
8166  * in the code paths:
8167  *	i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8168  *	i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8169  * Thus, we always use the tk_proj member to check the projp setting.
8170  */
8171 static void
init_lockedmem_rctl_flag(struct ddi_umem_cookie * cookie)8172 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8173 {
8174 	proc_t		*p;
8175 	kproject_t	*projp;
8176 	zone_t		*zonep;
8177 
8178 	ASSERT(cookie);
8179 	p = cookie->procp;
8180 	ASSERT(p);
8181 
8182 	zonep = p->p_zone;
8183 	projp = p->p_task->tk_proj;
8184 
8185 	ASSERT(zonep);
8186 	ASSERT(projp);
8187 
8188 	if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8189 	    projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8190 		cookie->upd_max_lock_rctl = 0;
8191 	else
8192 		cookie->upd_max_lock_rctl = 1;
8193 }
8194 
8195 /*
8196  * This routine checks if the max-locked-memory resource ctl is
8197  * exceeded, if not increments it, grabs a hold on the project.
8198  * Returns 0 if successful otherwise returns error code
8199  */
8200 static int
umem_incr_devlockmem(struct ddi_umem_cookie * cookie)8201 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8202 {
8203 	proc_t		*procp;
8204 	int		ret;
8205 
8206 	ASSERT(cookie);
8207 	if (cookie->upd_max_lock_rctl == 0)
8208 		return (0);
8209 
8210 	procp = cookie->procp;
8211 	ASSERT(procp);
8212 
8213 	if ((ret = i_ddi_incr_locked_memory(procp,
8214 	    cookie->size)) != 0) {
8215 		return (ret);
8216 	}
8217 	return (0);
8218 }
8219 
8220 /*
8221  * Decrements the max-locked-memory resource ctl and releases
8222  * the hold on the project that was acquired during umem_incr_devlockmem
8223  */
8224 static void
umem_decr_devlockmem(struct ddi_umem_cookie * cookie)8225 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8226 {
8227 	proc_t		*proc;
8228 
8229 	if (cookie->upd_max_lock_rctl == 0)
8230 		return;
8231 
8232 	proc = (proc_t *)cookie->procp;
8233 	if (!proc)
8234 		return;
8235 
8236 	i_ddi_decr_locked_memory(proc, cookie->size);
8237 }
8238 
8239 /*
8240  * A consolidation private function which is essentially equivalent to
8241  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8242  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8243  * the ops_vector is valid.
8244  *
8245  * Lock the virtual address range in the current process and create a
8246  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8247  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8248  * to user space.
8249  *
8250  * Note: The resource control accounting currently uses a full charge model
8251  * in other words attempts to lock the same/overlapping areas of memory
8252  * will deduct the full size of the buffer from the projects running
8253  * counter for the device locked memory.
8254  *
8255  * addr, size should be PAGESIZE aligned
8256  *
8257  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8258  *	identifies whether the locked memory will be read or written or both
8259  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8260  * be maintained for an indefinitely long period (essentially permanent),
8261  * rather than for what would be required for a typical I/O completion.
8262  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8263  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8264  * This is to prevent a deadlock if a file truncation is attempted after
8265  * after the locking is done.
8266  *
8267  * Returns 0 on success
8268  *	EINVAL - for invalid parameters
8269  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8270  *	ENOMEM - is returned if the current request to lock memory exceeds
8271  *		*.max-locked-memory resource control value.
8272  *      EFAULT - memory pertains to a regular file mapped shared and
8273  *		and DDI_UMEMLOCK_LONGTERM flag is set
8274  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8275  */
8276 int
umem_lockmemory(caddr_t addr,size_t len,int flags,ddi_umem_cookie_t * cookie,struct umem_callback_ops * ops_vector,proc_t * procp)8277 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8278     struct umem_callback_ops *ops_vector,
8279     proc_t *procp)
8280 {
8281 	int	error;
8282 	struct ddi_umem_cookie *p;
8283 	void	(*driver_callback)() = NULL;
8284 	struct as *as;
8285 	struct seg		*seg;
8286 	vnode_t			*vp;
8287 
8288 	/* Allow device drivers to not have to reference "curproc" */
8289 	if (procp == NULL)
8290 		procp = curproc;
8291 	as = procp->p_as;
8292 	*cookie = NULL;		/* in case of any error return */
8293 
8294 	/* These are the only three valid flags */
8295 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8296 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8297 		return (EINVAL);
8298 
8299 	/* At least one (can be both) of the two access flags must be set */
8300 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8301 		return (EINVAL);
8302 
8303 	/* addr and len must be page-aligned */
8304 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8305 		return (EINVAL);
8306 
8307 	if ((len & PAGEOFFSET) != 0)
8308 		return (EINVAL);
8309 
8310 	/*
8311 	 * For longterm locking a driver callback must be specified; if
8312 	 * not longterm then a callback is optional.
8313 	 */
8314 	if (ops_vector != NULL) {
8315 		if (ops_vector->cbo_umem_callback_version !=
8316 		    UMEM_CALLBACK_VERSION)
8317 			return (EINVAL);
8318 		else
8319 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8320 	}
8321 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8322 		return (EINVAL);
8323 
8324 	/*
8325 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8326 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8327 	 */
8328 	if (ddi_umem_unlock_thread == NULL)
8329 		i_ddi_umem_unlock_thread_start();
8330 
8331 	/* Allocate memory for the cookie */
8332 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8333 
8334 	/* Convert the flags to seg_rw type */
8335 	if (flags & DDI_UMEMLOCK_WRITE) {
8336 		p->s_flags = S_WRITE;
8337 	} else {
8338 		p->s_flags = S_READ;
8339 	}
8340 
8341 	/* Store procp in cookie for later iosetup/unlock */
8342 	p->procp = (void *)procp;
8343 
8344 	/*
8345 	 * Store the struct as pointer in cookie for later use by
8346 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8347 	 * is called after relvm is called.
8348 	 */
8349 	p->asp = as;
8350 
8351 	/*
8352 	 * The size field is needed for lockmem accounting.
8353 	 */
8354 	p->size = len;
8355 	init_lockedmem_rctl_flag(p);
8356 
8357 	if (umem_incr_devlockmem(p) != 0) {
8358 		/*
8359 		 * The requested memory cannot be locked
8360 		 */
8361 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8362 		*cookie = (ddi_umem_cookie_t)NULL;
8363 		return (ENOMEM);
8364 	}
8365 
8366 	/* Lock the pages corresponding to addr, len in memory */
8367 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8368 	if (error != 0) {
8369 		umem_decr_devlockmem(p);
8370 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8371 		*cookie = (ddi_umem_cookie_t)NULL;
8372 		return (error);
8373 	}
8374 
8375 	/*
8376 	 * For longterm locking the addr must pertain to a seg_vn segment or
8377 	 * or a seg_spt segment.
8378 	 * If the segment pertains to a regular file, it cannot be
8379 	 * mapped MAP_SHARED.
8380 	 * This is to prevent a deadlock if a file truncation is attempted
8381 	 * after the locking is done.
8382 	 * Doing this after as_pagelock guarantees persistence of the as; if
8383 	 * an unacceptable segment is found, the cleanup includes calling
8384 	 * as_pageunlock before returning EFAULT.
8385 	 *
8386 	 * segdev is allowed here as it is already locked.  This allows
8387 	 * for memory exported by drivers through mmap() (which is already
8388 	 * locked) to be allowed for LONGTERM.
8389 	 */
8390 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8391 		extern  struct seg_ops segspt_shmops;
8392 		extern	struct seg_ops segdev_ops;
8393 		AS_LOCK_ENTER(as, RW_READER);
8394 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8395 			if (seg == NULL || seg->s_base > addr + len)
8396 				break;
8397 			if (seg->s_ops == &segdev_ops)
8398 				continue;
8399 			if (((seg->s_ops != &segvn_ops) &&
8400 			    (seg->s_ops != &segspt_shmops)) ||
8401 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8402 			    vp != NULL && vp->v_type == VREG) &&
8403 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8404 				as_pageunlock(as, p->pparray,
8405 				    addr, len, p->s_flags);
8406 				AS_LOCK_EXIT(as);
8407 				umem_decr_devlockmem(p);
8408 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8409 				*cookie = (ddi_umem_cookie_t)NULL;
8410 				return (EFAULT);
8411 			}
8412 		}
8413 		AS_LOCK_EXIT(as);
8414 	}
8415 
8416 
8417 	/* Initialize the fields in the ddi_umem_cookie */
8418 	p->cvaddr = addr;
8419 	p->type = UMEM_LOCKED;
8420 	if (driver_callback != NULL) {
8421 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8422 		p->cook_refcnt = 2;
8423 		p->callbacks = *ops_vector;
8424 	} else {
8425 		/* only i_ddi_umme_unlock needs the cookie */
8426 		p->cook_refcnt = 1;
8427 	}
8428 
8429 	*cookie = (ddi_umem_cookie_t)p;
8430 
8431 	/*
8432 	 * If a driver callback was specified, add an entry to the
8433 	 * as struct callback list. The as_pagelock above guarantees
8434 	 * the persistence of as.
8435 	 */
8436 	if (driver_callback) {
8437 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8438 		    addr, len, KM_SLEEP);
8439 		if (error != 0) {
8440 			as_pageunlock(as, p->pparray,
8441 			    addr, len, p->s_flags);
8442 			umem_decr_devlockmem(p);
8443 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8444 			*cookie = (ddi_umem_cookie_t)NULL;
8445 		}
8446 	}
8447 	return (error);
8448 }
8449 
8450 /*
8451  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8452  * the cookie.  Called from i_ddi_umem_unlock_thread.
8453  */
8454 
8455 static void
i_ddi_umem_unlock(struct ddi_umem_cookie * p)8456 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8457 {
8458 	uint_t	rc;
8459 
8460 	/*
8461 	 * There is no way to determine whether a callback to
8462 	 * umem_lock_undo was registered via as_add_callback.
8463 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8464 	 * a valid callback function structure.)  as_delete_callback
8465 	 * is called to delete a possible registered callback.  If the
8466 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8467 	 * indicates that there was a callback registered, and that is was
8468 	 * successfully deleted.  Thus, the cookie reference count
8469 	 * will never be decremented by umem_lock_undo.  Just return the
8470 	 * memory for the cookie, since both users of the cookie are done.
8471 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8472 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8473 	 * indicates that callback processing is taking place and, and
8474 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8475 	 * the cookie reference count when it is complete.
8476 	 *
8477 	 * This needs to be done before as_pageunlock so that the
8478 	 * persistence of as is guaranteed because of the locked pages.
8479 	 *
8480 	 */
8481 	rc = as_delete_callback(p->asp, p);
8482 
8483 
8484 	/*
8485 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8486 	 * after relvm is called so use p->asp.
8487 	 */
8488 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8489 
8490 	/*
8491 	 * Now that we have unlocked the memory decrement the
8492 	 * *.max-locked-memory rctl
8493 	 */
8494 	umem_decr_devlockmem(p);
8495 
8496 	if (rc == AS_CALLBACK_DELETED) {
8497 		/* umem_lock_undo will not happen, return the cookie memory */
8498 		ASSERT(p->cook_refcnt == 2);
8499 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8500 	} else {
8501 		/*
8502 		 * umem_undo_lock may happen if as_delete_callback returned
8503 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8504 		 * reference count, atomically, and return the cookie
8505 		 * memory if the reference count goes to zero.  The only
8506 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8507 		 * case, just return the cookie memory.
8508 		 */
8509 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8510 		    (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8511 		    == 0)) {
8512 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8513 		}
8514 	}
8515 }
8516 
8517 /*
8518  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8519  *
8520  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8521  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8522  * via calls to ddi_umem_unlock.
8523  */
8524 
8525 static void
i_ddi_umem_unlock_thread(void)8526 i_ddi_umem_unlock_thread(void)
8527 {
8528 	struct ddi_umem_cookie	*ret_cookie;
8529 	callb_cpr_t	cprinfo;
8530 
8531 	/* process the ddi_umem_unlock list */
8532 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8533 	    callb_generic_cpr, "unlock_thread");
8534 	for (;;) {
8535 		mutex_enter(&ddi_umem_unlock_mutex);
8536 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8537 			ret_cookie = ddi_umem_unlock_head;
8538 			/* take if off the list */
8539 			if ((ddi_umem_unlock_head =
8540 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8541 				ddi_umem_unlock_tail = NULL;
8542 			}
8543 			mutex_exit(&ddi_umem_unlock_mutex);
8544 			/* unlock the pages in this cookie */
8545 			(void) i_ddi_umem_unlock(ret_cookie);
8546 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8547 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8548 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8549 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8550 			mutex_exit(&ddi_umem_unlock_mutex);
8551 		}
8552 	}
8553 	/* ddi_umem_unlock_thread does not exit */
8554 	/* NOTREACHED */
8555 }
8556 
8557 /*
8558  * Start the thread that will process the ddi_umem_unlock list if it is
8559  * not already started (i_ddi_umem_unlock_thread).
8560  */
8561 static void
i_ddi_umem_unlock_thread_start(void)8562 i_ddi_umem_unlock_thread_start(void)
8563 {
8564 	mutex_enter(&ddi_umem_unlock_mutex);
8565 	if (ddi_umem_unlock_thread == NULL) {
8566 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8567 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8568 		    TS_RUN, minclsyspri);
8569 	}
8570 	mutex_exit(&ddi_umem_unlock_mutex);
8571 }
8572 
8573 /*
8574  * Lock the virtual address range in the current process and create a
8575  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8576  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8577  * to user space.
8578  *
8579  * Note: The resource control accounting currently uses a full charge model
8580  * in other words attempts to lock the same/overlapping areas of memory
8581  * will deduct the full size of the buffer from the projects running
8582  * counter for the device locked memory. This applies to umem_lockmemory too.
8583  *
8584  * addr, size should be PAGESIZE aligned
8585  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8586  *	identifies whether the locked memory will be read or written or both
8587  *
8588  * Returns 0 on success
8589  *	EINVAL - for invalid parameters
8590  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8591  *	ENOMEM - is returned if the current request to lock memory exceeds
8592  *		*.max-locked-memory resource control value.
8593  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8594  */
8595 int
ddi_umem_lock(caddr_t addr,size_t len,int flags,ddi_umem_cookie_t * cookie)8596 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8597 {
8598 	int	error;
8599 	struct ddi_umem_cookie *p;
8600 
8601 	*cookie = NULL;		/* in case of any error return */
8602 
8603 	/* These are the only two valid flags */
8604 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8605 		return (EINVAL);
8606 	}
8607 
8608 	/* At least one of the two flags (or both) must be set */
8609 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8610 		return (EINVAL);
8611 	}
8612 
8613 	/* addr and len must be page-aligned */
8614 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8615 		return (EINVAL);
8616 	}
8617 
8618 	if ((len & PAGEOFFSET) != 0) {
8619 		return (EINVAL);
8620 	}
8621 
8622 	/*
8623 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8624 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8625 	 */
8626 	if (ddi_umem_unlock_thread == NULL)
8627 		i_ddi_umem_unlock_thread_start();
8628 
8629 	/* Allocate memory for the cookie */
8630 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8631 
8632 	/* Convert the flags to seg_rw type */
8633 	if (flags & DDI_UMEMLOCK_WRITE) {
8634 		p->s_flags = S_WRITE;
8635 	} else {
8636 		p->s_flags = S_READ;
8637 	}
8638 
8639 	/* Store curproc in cookie for later iosetup/unlock */
8640 	p->procp = (void *)curproc;
8641 
8642 	/*
8643 	 * Store the struct as pointer in cookie for later use by
8644 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8645 	 * is called after relvm is called.
8646 	 */
8647 	p->asp = curproc->p_as;
8648 	/*
8649 	 * The size field is needed for lockmem accounting.
8650 	 */
8651 	p->size = len;
8652 	init_lockedmem_rctl_flag(p);
8653 
8654 	if (umem_incr_devlockmem(p) != 0) {
8655 		/*
8656 		 * The requested memory cannot be locked
8657 		 */
8658 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8659 		*cookie = (ddi_umem_cookie_t)NULL;
8660 		return (ENOMEM);
8661 	}
8662 
8663 	/* Lock the pages corresponding to addr, len in memory */
8664 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8665 	    addr, len, p->s_flags);
8666 	if (error != 0) {
8667 		umem_decr_devlockmem(p);
8668 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8669 		*cookie = (ddi_umem_cookie_t)NULL;
8670 		return (error);
8671 	}
8672 
8673 	/* Initialize the fields in the ddi_umem_cookie */
8674 	p->cvaddr = addr;
8675 	p->type = UMEM_LOCKED;
8676 	p->cook_refcnt = 1;
8677 
8678 	*cookie = (ddi_umem_cookie_t)p;
8679 	return (error);
8680 }
8681 
8682 /*
8683  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8684  * unlocked by i_ddi_umem_unlock_thread.
8685  */
8686 
8687 void
ddi_umem_unlock(ddi_umem_cookie_t cookie)8688 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8689 {
8690 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8691 
8692 	ASSERT(p->type == UMEM_LOCKED);
8693 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8694 	ASSERT(ddi_umem_unlock_thread != NULL);
8695 
8696 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8697 	/*
8698 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8699 	 * if it's called in the interrupt context. Otherwise, unlock pages
8700 	 * immediately.
8701 	 */
8702 	if (servicing_interrupt()) {
8703 		/* queue the unlock request and notify the thread */
8704 		mutex_enter(&ddi_umem_unlock_mutex);
8705 		if (ddi_umem_unlock_head == NULL) {
8706 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8707 			cv_broadcast(&ddi_umem_unlock_cv);
8708 		} else {
8709 			ddi_umem_unlock_tail->unl_forw = p;
8710 			ddi_umem_unlock_tail = p;
8711 		}
8712 		mutex_exit(&ddi_umem_unlock_mutex);
8713 	} else {
8714 		/* unlock the pages right away */
8715 		(void) i_ddi_umem_unlock(p);
8716 	}
8717 }
8718 
8719 /*
8720  * Create a buf structure from a ddi_umem_cookie
8721  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8722  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8723  * off, len - identifies the portion of the memory represented by the cookie
8724  *		that the buf points to.
8725  *	NOTE: off, len need to follow the alignment/size restrictions of the
8726  *		device (dev) that this buf will be passed to. Some devices
8727  *		will accept unrestricted alignment/size, whereas others (such as
8728  *		st) require some block-size alignment/size. It is the caller's
8729  *		responsibility to ensure that the alignment/size restrictions
8730  *		are met (we cannot assert as we do not know the restrictions)
8731  *
8732  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8733  *		the flags used in ddi_umem_lock
8734  *
8735  * The following three arguments are used to initialize fields in the
8736  * buf structure and are uninterpreted by this routine.
8737  *
8738  * dev
8739  * blkno
8740  * iodone
8741  *
8742  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8743  *
8744  * Returns a buf structure pointer on success (to be freed by freerbuf)
8745  *	NULL on any parameter error or memory alloc failure
8746  *
8747  */
8748 struct buf *
ddi_umem_iosetup(ddi_umem_cookie_t cookie,off_t off,size_t len,int direction,dev_t dev,daddr_t blkno,int (* iodone)(struct buf *),int sleepflag)8749 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8750     int direction, dev_t dev, daddr_t blkno,
8751     int (*iodone)(struct buf *), int sleepflag)
8752 {
8753 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8754 	struct buf *bp;
8755 
8756 	/*
8757 	 * check for valid cookie offset, len
8758 	 */
8759 	if ((off + len) > p->size) {
8760 		return (NULL);
8761 	}
8762 
8763 	if (len > p->size) {
8764 		return (NULL);
8765 	}
8766 
8767 	/* direction has to be one of B_READ or B_WRITE */
8768 	if ((direction != B_READ) && (direction != B_WRITE)) {
8769 		return (NULL);
8770 	}
8771 
8772 	/* These are the only two valid sleepflags */
8773 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8774 		return (NULL);
8775 	}
8776 
8777 	/*
8778 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8779 	 */
8780 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8781 		return (NULL);
8782 	}
8783 
8784 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8785 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8786 	    (p->procp == NULL) : (p->procp != NULL));
8787 
8788 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8789 	if (bp == NULL) {
8790 		return (NULL);
8791 	}
8792 	bioinit(bp);
8793 
8794 	bp->b_flags = B_BUSY | B_PHYS | direction;
8795 	bp->b_edev = dev;
8796 	bp->b_lblkno = blkno;
8797 	bp->b_iodone = iodone;
8798 	bp->b_bcount = len;
8799 	bp->b_proc = (proc_t *)p->procp;
8800 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8801 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8802 	if (p->pparray != NULL) {
8803 		bp->b_flags |= B_SHADOW;
8804 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8805 		bp->b_shadow = p->pparray + btop(off);
8806 	}
8807 	return (bp);
8808 }
8809 
8810 /*
8811  * Fault-handling and related routines
8812  */
8813 
8814 ddi_devstate_t
ddi_get_devstate(dev_info_t * dip)8815 ddi_get_devstate(dev_info_t *dip)
8816 {
8817 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8818 		return (DDI_DEVSTATE_OFFLINE);
8819 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8820 		return (DDI_DEVSTATE_DOWN);
8821 	else if (DEVI_IS_BUS_QUIESCED(dip))
8822 		return (DDI_DEVSTATE_QUIESCED);
8823 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8824 		return (DDI_DEVSTATE_DEGRADED);
8825 	else
8826 		return (DDI_DEVSTATE_UP);
8827 }
8828 
8829 void
ddi_dev_report_fault(dev_info_t * dip,ddi_fault_impact_t impact,ddi_fault_location_t location,const char * message)8830 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8831     ddi_fault_location_t location, const char *message)
8832 {
8833 	struct ddi_fault_event_data fd;
8834 	ddi_eventcookie_t ec;
8835 
8836 	/*
8837 	 * Assemble all the information into a fault-event-data structure
8838 	 */
8839 	fd.f_dip = dip;
8840 	fd.f_impact = impact;
8841 	fd.f_location = location;
8842 	fd.f_message = message;
8843 	fd.f_oldstate = ddi_get_devstate(dip);
8844 
8845 	/*
8846 	 * Get eventcookie from defining parent.
8847 	 */
8848 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8849 	    DDI_SUCCESS)
8850 		return;
8851 
8852 	(void) ndi_post_event(dip, dip, ec, &fd);
8853 }
8854 
8855 char *
i_ddi_devi_class(dev_info_t * dip)8856 i_ddi_devi_class(dev_info_t *dip)
8857 {
8858 	return (DEVI(dip)->devi_device_class);
8859 }
8860 
8861 int
i_ddi_set_devi_class(dev_info_t * dip,const char * devi_class,int flag)8862 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag)
8863 {
8864 	struct dev_info *devi = DEVI(dip);
8865 
8866 	mutex_enter(&devi->devi_lock);
8867 
8868 	if (devi->devi_device_class)
8869 		kmem_free(devi->devi_device_class,
8870 		    strlen(devi->devi_device_class) + 1);
8871 
8872 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8873 	    != NULL) {
8874 		mutex_exit(&devi->devi_lock);
8875 		return (DDI_SUCCESS);
8876 	}
8877 
8878 	mutex_exit(&devi->devi_lock);
8879 
8880 	return (DDI_FAILURE);
8881 }
8882 
8883 
8884 /*
8885  * Task Queues DDI interfaces.
8886  */
8887 
8888 /* ARGSUSED */
8889 ddi_taskq_t *
ddi_taskq_create(dev_info_t * dip,const char * name,int nthreads,pri_t pri,uint_t cflags)8890 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8891     pri_t pri, uint_t cflags)
8892 {
8893 	char full_name[TASKQ_NAMELEN];
8894 	const char *tq_name;
8895 	int nodeid = 0;
8896 
8897 	if (dip == NULL)
8898 		tq_name = name;
8899 	else {
8900 		nodeid = ddi_get_instance(dip);
8901 
8902 		if (name == NULL)
8903 			name = "tq";
8904 
8905 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8906 		    ddi_driver_name(dip), name);
8907 
8908 		tq_name = full_name;
8909 	}
8910 
8911 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8912 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8913 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8914 }
8915 
8916 void
ddi_taskq_destroy(ddi_taskq_t * tq)8917 ddi_taskq_destroy(ddi_taskq_t *tq)
8918 {
8919 	taskq_destroy((taskq_t *)tq);
8920 }
8921 
8922 int
ddi_taskq_dispatch(ddi_taskq_t * tq,void (* func)(void *),void * arg,uint_t dflags)8923 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8924     void *arg, uint_t dflags)
8925 {
8926 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8927 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8928 
8929 	return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE);
8930 }
8931 
8932 void
ddi_taskq_wait(ddi_taskq_t * tq)8933 ddi_taskq_wait(ddi_taskq_t *tq)
8934 {
8935 	taskq_wait((taskq_t *)tq);
8936 }
8937 
8938 void
ddi_taskq_suspend(ddi_taskq_t * tq)8939 ddi_taskq_suspend(ddi_taskq_t *tq)
8940 {
8941 	taskq_suspend((taskq_t *)tq);
8942 }
8943 
8944 boolean_t
ddi_taskq_suspended(ddi_taskq_t * tq)8945 ddi_taskq_suspended(ddi_taskq_t *tq)
8946 {
8947 	return (taskq_suspended((taskq_t *)tq));
8948 }
8949 
8950 void
ddi_taskq_resume(ddi_taskq_t * tq)8951 ddi_taskq_resume(ddi_taskq_t *tq)
8952 {
8953 	taskq_resume((taskq_t *)tq);
8954 }
8955 
8956 int
ddi_parse(const char * ifname,char * alnum,uint_t * nump)8957 ddi_parse(const char *ifname, char *alnum, uint_t *nump)
8958 {
8959 	/*
8960 	 * Cap "alnum" size at LIFNAMSIZ, as callers use that in most/all
8961 	 * cases.
8962 	 */
8963 	return (ddi_parse_dlen(ifname, alnum, LIFNAMSIZ, nump));
8964 }
8965 
8966 int
ddi_parse_dlen(const char * ifname,char * alnum,size_t alnumsize,uint_t * nump)8967 ddi_parse_dlen(const char *ifname, char *alnum, size_t alnumsize, uint_t *nump)
8968 {
8969 	const char	*p;
8970 	int		copy_len;
8971 	ulong_t		num;
8972 	boolean_t	nonum = B_TRUE;
8973 	char		c;
8974 
8975 	copy_len = strlen(ifname);
8976 	for (p = ifname + copy_len; p != ifname; copy_len--) {
8977 		c = *--p;
8978 		if (!isdigit(c)) {
8979 			/*
8980 			 * At this point, copy_len is the length of ifname
8981 			 * WITHOUT the PPA number. For "e1000g10" copy_len is 6.
8982 			 *
8983 			 * We must first make sure we HAVE a PPA, and we
8984 			 * aren't exceeding alnumsize with copy_len and a '\0'
8985 			 * terminator...
8986 			 */
8987 			int copy_len_nul = copy_len + 1;
8988 
8989 			if (nonum || alnumsize < copy_len_nul)
8990 				return (DDI_FAILURE);
8991 
8992 			/*
8993 			 * ... then we abuse strlcpy() to copy over the
8994 			 * driver name portion AND '\0'-terminate it.
8995 			 */
8996 			(void) strlcpy(alnum, ifname, copy_len_nul);
8997 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8998 				return (DDI_FAILURE);
8999 			break;
9000 		}
9001 		nonum = B_FALSE;
9002 	}
9003 
9004 	if (copy_len == 0)
9005 		return (DDI_FAILURE);
9006 
9007 	*nump = num;
9008 	return (DDI_SUCCESS);
9009 }
9010 
9011 /*
9012  * Default initialization function for drivers that don't need to quiesce.
9013  */
9014 /* ARGSUSED */
9015 int
ddi_quiesce_not_needed(dev_info_t * dip)9016 ddi_quiesce_not_needed(dev_info_t *dip)
9017 {
9018 	return (DDI_SUCCESS);
9019 }
9020 
9021 /*
9022  * Initialization function for drivers that should implement quiesce()
9023  * but haven't yet.
9024  */
9025 /* ARGSUSED */
9026 int
ddi_quiesce_not_supported(dev_info_t * dip)9027 ddi_quiesce_not_supported(dev_info_t *dip)
9028 {
9029 	return (DDI_FAILURE);
9030 }
9031 
9032 char *
ddi_strdup(const char * str,int flag)9033 ddi_strdup(const char *str, int flag)
9034 {
9035 	int	n;
9036 	char	*ptr;
9037 
9038 	ASSERT(str != NULL);
9039 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9040 
9041 	n = strlen(str);
9042 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9043 		return (NULL);
9044 	bcopy(str, ptr, n + 1);
9045 	return (ptr);
9046 }
9047 
9048 char *
strdup(const char * str)9049 strdup(const char *str)
9050 {
9051 	return (ddi_strdup(str, KM_SLEEP));
9052 }
9053 
9054 void
strfree(char * str)9055 strfree(char *str)
9056 {
9057 	ASSERT(str != NULL);
9058 	kmem_free(str, strlen(str) + 1);
9059 }
9060 
9061 /*
9062  * Generic DDI callback interfaces.
9063  */
9064 
9065 int
ddi_cb_register(dev_info_t * dip,ddi_cb_flags_t flags,ddi_cb_func_t cbfunc,void * arg1,void * arg2,ddi_cb_handle_t * ret_hdlp)9066 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9067     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9068 {
9069 	ddi_cb_t	*cbp;
9070 
9071 	ASSERT(dip != NULL);
9072 	ASSERT(DDI_CB_FLAG_VALID(flags));
9073 	ASSERT(cbfunc != NULL);
9074 	ASSERT(ret_hdlp != NULL);
9075 
9076 	/* Sanity check the context */
9077 	ASSERT(!servicing_interrupt());
9078 	if (servicing_interrupt())
9079 		return (DDI_FAILURE);
9080 
9081 	/* Validate parameters */
9082 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9083 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9084 		return (DDI_EINVAL);
9085 
9086 	/* Check for previous registration */
9087 	if (DEVI(dip)->devi_cb_p != NULL)
9088 		return (DDI_EALREADY);
9089 
9090 	/* Allocate and initialize callback */
9091 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9092 	cbp->cb_dip = dip;
9093 	cbp->cb_func = cbfunc;
9094 	cbp->cb_arg1 = arg1;
9095 	cbp->cb_arg2 = arg2;
9096 	cbp->cb_flags = flags;
9097 	DEVI(dip)->devi_cb_p = cbp;
9098 
9099 	/* If adding an IRM callback, notify IRM */
9100 	if (flags & DDI_CB_FLAG_INTR)
9101 		i_ddi_irm_set_cb(dip, B_TRUE);
9102 
9103 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9104 	return (DDI_SUCCESS);
9105 }
9106 
9107 int
ddi_cb_unregister(ddi_cb_handle_t hdl)9108 ddi_cb_unregister(ddi_cb_handle_t hdl)
9109 {
9110 	ddi_cb_t	*cbp;
9111 	dev_info_t	*dip;
9112 
9113 	ASSERT(hdl != NULL);
9114 
9115 	/* Sanity check the context */
9116 	ASSERT(!servicing_interrupt());
9117 	if (servicing_interrupt())
9118 		return (DDI_FAILURE);
9119 
9120 	/* Validate parameters */
9121 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9122 	    ((dip = cbp->cb_dip) == NULL))
9123 		return (DDI_EINVAL);
9124 
9125 	/* If removing an IRM callback, notify IRM */
9126 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9127 		i_ddi_irm_set_cb(dip, B_FALSE);
9128 
9129 	/* Destroy the callback */
9130 	kmem_free(cbp, sizeof (ddi_cb_t));
9131 	DEVI(dip)->devi_cb_p = NULL;
9132 
9133 	return (DDI_SUCCESS);
9134 }
9135 
9136 /*
9137  * Platform independent DR routines
9138  */
9139 
9140 static int
ndi2errno(int n)9141 ndi2errno(int n)
9142 {
9143 	int err = 0;
9144 
9145 	switch (n) {
9146 		case NDI_NOMEM:
9147 			err = ENOMEM;
9148 			break;
9149 		case NDI_BUSY:
9150 			err = EBUSY;
9151 			break;
9152 		case NDI_FAULT:
9153 			err = EFAULT;
9154 			break;
9155 		case NDI_FAILURE:
9156 			err = EIO;
9157 			break;
9158 		case NDI_SUCCESS:
9159 			break;
9160 		case NDI_BADHANDLE:
9161 		default:
9162 			err = EINVAL;
9163 			break;
9164 	}
9165 	return (err);
9166 }
9167 
9168 /*
9169  * Prom tree node list
9170  */
9171 struct ptnode {
9172 	pnode_t		nodeid;
9173 	struct ptnode	*next;
9174 };
9175 
9176 /*
9177  * Prom tree walk arg
9178  */
9179 struct pta {
9180 	dev_info_t	*pdip;
9181 	devi_branch_t	*bp;
9182 	uint_t		flags;
9183 	dev_info_t	*fdip;
9184 	struct ptnode	*head;
9185 };
9186 
9187 static void
visit_node(pnode_t nodeid,struct pta * ap)9188 visit_node(pnode_t nodeid, struct pta *ap)
9189 {
9190 	struct ptnode	**nextp;
9191 	int		(*select)(pnode_t, void *, uint_t);
9192 
9193 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9194 
9195 	select = ap->bp->create.prom_branch_select;
9196 
9197 	ASSERT(select);
9198 
9199 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9200 
9201 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9202 			;
9203 
9204 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9205 
9206 		(*nextp)->nodeid = nodeid;
9207 	}
9208 
9209 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9210 		return;
9211 
9212 	nodeid = prom_childnode(nodeid);
9213 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9214 		visit_node(nodeid, ap);
9215 		nodeid = prom_nextnode(nodeid);
9216 	}
9217 }
9218 
9219 /*
9220  * NOTE: The caller of this function must check for device contracts
9221  * or LDI callbacks against this dip before setting the dip offline.
9222  */
9223 static int
set_infant_dip_offline(dev_info_t * dip,void * arg)9224 set_infant_dip_offline(dev_info_t *dip, void *arg)
9225 {
9226 	char	*path = (char *)arg;
9227 
9228 	ASSERT(dip);
9229 	ASSERT(arg);
9230 
9231 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9232 		(void) ddi_pathname(dip, path);
9233 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9234 		    "node: %s", path);
9235 		return (DDI_FAILURE);
9236 	}
9237 
9238 	mutex_enter(&(DEVI(dip)->devi_lock));
9239 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9240 		DEVI_SET_DEVICE_OFFLINE(dip);
9241 	mutex_exit(&(DEVI(dip)->devi_lock));
9242 
9243 	return (DDI_SUCCESS);
9244 }
9245 
9246 typedef struct result {
9247 	char	*path;
9248 	int	result;
9249 } result_t;
9250 
9251 static int
dip_set_offline(dev_info_t * dip,void * arg)9252 dip_set_offline(dev_info_t *dip, void *arg)
9253 {
9254 	int end;
9255 	result_t *resp = (result_t *)arg;
9256 
9257 	ASSERT(dip);
9258 	ASSERT(resp);
9259 
9260 	/*
9261 	 * We stop the walk if e_ddi_offline_notify() returns
9262 	 * failure, because this implies that one or more consumers
9263 	 * (either LDI or contract based) has blocked the offline.
9264 	 * So there is no point in conitnuing the walk
9265 	 */
9266 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9267 		resp->result = DDI_FAILURE;
9268 		return (DDI_WALK_TERMINATE);
9269 	}
9270 
9271 	/*
9272 	 * If set_infant_dip_offline() returns failure, it implies
9273 	 * that we failed to set a particular dip offline. This
9274 	 * does not imply that the offline as a whole should fail.
9275 	 * We want to do the best we can, so we continue the walk.
9276 	 */
9277 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9278 		end = DDI_SUCCESS;
9279 	else
9280 		end = DDI_FAILURE;
9281 
9282 	e_ddi_offline_finalize(dip, end);
9283 
9284 	return (DDI_WALK_CONTINUE);
9285 }
9286 
9287 /*
9288  * The call to e_ddi_offline_notify() exists for the
9289  * unlikely error case that a branch we are trying to
9290  * create already exists and has device contracts or LDI
9291  * event callbacks against it.
9292  *
9293  * We allow create to succeed for such branches only if
9294  * no constraints block the offline.
9295  */
9296 static int
branch_set_offline(dev_info_t * dip,char * path)9297 branch_set_offline(dev_info_t *dip, char *path)
9298 {
9299 	int		end;
9300 	result_t	res;
9301 
9302 
9303 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9304 		return (DDI_FAILURE);
9305 	}
9306 
9307 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9308 		end = DDI_SUCCESS;
9309 	else
9310 		end = DDI_FAILURE;
9311 
9312 	e_ddi_offline_finalize(dip, end);
9313 
9314 	if (end == DDI_FAILURE)
9315 		return (DDI_FAILURE);
9316 
9317 	res.result = DDI_SUCCESS;
9318 	res.path = path;
9319 
9320 	ndi_devi_enter(dip);
9321 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9322 	ndi_devi_exit(dip);
9323 
9324 	return (res.result);
9325 }
9326 
9327 /*ARGSUSED*/
9328 static int
create_prom_branch(void * arg,int has_changed)9329 create_prom_branch(void *arg, int has_changed)
9330 {
9331 	int		exists, rv;
9332 	pnode_t		nodeid;
9333 	struct ptnode	*tnp;
9334 	dev_info_t	*dip;
9335 	struct pta	*ap = arg;
9336 	devi_branch_t	*bp;
9337 	char		*path;
9338 
9339 	ASSERT(ap);
9340 	ASSERT(ap->fdip == NULL);
9341 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9342 
9343 	bp = ap->bp;
9344 
9345 	nodeid = ddi_get_nodeid(ap->pdip);
9346 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9347 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9348 		    "nodeid: 0x%x", nodeid);
9349 		return (EINVAL);
9350 	}
9351 
9352 	ap->head = NULL;
9353 
9354 	nodeid = prom_childnode(nodeid);
9355 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9356 		visit_node(nodeid, ap);
9357 		nodeid = prom_nextnode(nodeid);
9358 	}
9359 
9360 	if (ap->head == NULL)
9361 		return (ENODEV);
9362 
9363 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9364 	rv = 0;
9365 	while ((tnp = ap->head) != NULL) {
9366 		ap->head = tnp->next;
9367 
9368 		ndi_devi_enter(ap->pdip);
9369 
9370 		/*
9371 		 * Check if the branch already exists.
9372 		 */
9373 		exists = 0;
9374 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9375 		if (dip != NULL) {
9376 			exists = 1;
9377 
9378 			/* Parent is held busy, so release hold */
9379 			ndi_rele_devi(dip);
9380 #ifdef	DEBUG
9381 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9382 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9383 #endif
9384 		} else {
9385 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9386 		}
9387 
9388 		kmem_free(tnp, sizeof (struct ptnode));
9389 
9390 		/*
9391 		 * Hold the branch if it is not already held
9392 		 */
9393 		if (dip && !exists) {
9394 			e_ddi_branch_hold(dip);
9395 		}
9396 
9397 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9398 
9399 		/*
9400 		 * Set all dips in the newly created branch offline so that
9401 		 * only a "configure" operation can attach
9402 		 * the branch
9403 		 */
9404 		if (dip == NULL || branch_set_offline(dip, path)
9405 		    == DDI_FAILURE) {
9406 			ndi_devi_exit(ap->pdip);
9407 			rv = EIO;
9408 			continue;
9409 		}
9410 
9411 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9412 
9413 		ndi_devi_exit(ap->pdip);
9414 
9415 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9416 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9417 			if (error && rv == 0)
9418 				rv = error;
9419 		}
9420 
9421 		/*
9422 		 * Invoke devi_branch_callback() (if it exists) only for
9423 		 * newly created branches
9424 		 */
9425 		if (bp->devi_branch_callback && !exists)
9426 			bp->devi_branch_callback(dip, bp->arg, 0);
9427 	}
9428 
9429 	kmem_free(path, MAXPATHLEN);
9430 
9431 	return (rv);
9432 }
9433 
9434 static int
sid_node_create(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** rdipp)9435 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9436 {
9437 	int			rv, len;
9438 	int			i, flags, ret;
9439 	dev_info_t		*dip;
9440 	char			*nbuf;
9441 	char			*path;
9442 	static const char	*noname = "<none>";
9443 
9444 	ASSERT(pdip);
9445 	ASSERT(DEVI_BUSY_OWNED(pdip));
9446 
9447 	flags = 0;
9448 
9449 	/*
9450 	 * Creating the root of a branch ?
9451 	 */
9452 	if (rdipp) {
9453 		*rdipp = NULL;
9454 		flags = DEVI_BRANCH_ROOT;
9455 	}
9456 
9457 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9458 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9459 
9460 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9461 
9462 	if (rv == DDI_WALK_ERROR) {
9463 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9464 		    " properties on devinfo node %p",  (void *)dip);
9465 		goto fail;
9466 	}
9467 
9468 	len = OBP_MAXDRVNAME;
9469 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9470 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9471 	    != DDI_PROP_SUCCESS) {
9472 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9473 		    "no name property", (void *)dip);
9474 		goto fail;
9475 	}
9476 
9477 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9478 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9479 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9480 		    " for devinfo node %p", nbuf, (void *)dip);
9481 		goto fail;
9482 	}
9483 
9484 	kmem_free(nbuf, OBP_MAXDRVNAME);
9485 
9486 	/*
9487 	 * Ignore bind failures just like boot does
9488 	 */
9489 	(void) ndi_devi_bind_driver(dip, 0);
9490 
9491 	switch (rv) {
9492 	case DDI_WALK_CONTINUE:
9493 	case DDI_WALK_PRUNESIB:
9494 		ndi_devi_enter(dip);
9495 
9496 		i = DDI_WALK_CONTINUE;
9497 		for (; i == DDI_WALK_CONTINUE; ) {
9498 			i = sid_node_create(dip, bp, NULL);
9499 		}
9500 
9501 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9502 		if (i == DDI_WALK_ERROR)
9503 			rv = i;
9504 		/*
9505 		 * If PRUNESIB stop creating siblings
9506 		 * of dip's child. Subsequent walk behavior
9507 		 * is determined by rv returned by dip.
9508 		 */
9509 
9510 		ndi_devi_exit(dip);
9511 		break;
9512 	case DDI_WALK_TERMINATE:
9513 		/*
9514 		 * Don't create children and ask our parent
9515 		 * to not create siblings either.
9516 		 */
9517 		rv = DDI_WALK_PRUNESIB;
9518 		break;
9519 	case DDI_WALK_PRUNECHILD:
9520 		/*
9521 		 * Don't create children, but ask parent to continue
9522 		 * with siblings.
9523 		 */
9524 		rv = DDI_WALK_CONTINUE;
9525 		break;
9526 	default:
9527 		ASSERT(0);
9528 		break;
9529 	}
9530 
9531 	if (rdipp)
9532 		*rdipp = dip;
9533 
9534 	/*
9535 	 * Set device offline - only the "configure" op should cause an attach.
9536 	 * Note that it is safe to set the dip offline without checking
9537 	 * for either device contract or layered driver (LDI) based constraints
9538 	 * since there cannot be any contracts or LDI opens of this device.
9539 	 * This is because this node is a newly created dip with the parent busy
9540 	 * held, so no other thread can come in and attach this dip. A dip that
9541 	 * has never been attached cannot have contracts since by definition
9542 	 * a device contract (an agreement between a process and a device minor
9543 	 * node) can only be created against a device that has minor nodes
9544 	 * i.e is attached. Similarly an LDI open will only succeed if the
9545 	 * dip is attached. We assert below that the dip is not attached.
9546 	 */
9547 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9548 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9549 	ret = set_infant_dip_offline(dip, path);
9550 	ASSERT(ret == DDI_SUCCESS);
9551 	kmem_free(path, MAXPATHLEN);
9552 
9553 	return (rv);
9554 fail:
9555 	(void) ndi_devi_free(dip);
9556 	kmem_free(nbuf, OBP_MAXDRVNAME);
9557 	return (DDI_WALK_ERROR);
9558 }
9559 
9560 static int
create_sid_branch(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** dipp,uint_t flags)9561 create_sid_branch(
9562 	dev_info_t	*pdip,
9563 	devi_branch_t	*bp,
9564 	dev_info_t	**dipp,
9565 	uint_t		flags)
9566 {
9567 	int		rv = 0, state = DDI_WALK_CONTINUE;
9568 	dev_info_t	*rdip;
9569 
9570 	while (state == DDI_WALK_CONTINUE) {
9571 		ndi_devi_enter(pdip);
9572 
9573 		state = sid_node_create(pdip, bp, &rdip);
9574 		if (rdip == NULL) {
9575 			ndi_devi_exit(pdip);
9576 			ASSERT(state == DDI_WALK_ERROR);
9577 			break;
9578 		}
9579 
9580 		e_ddi_branch_hold(rdip);
9581 
9582 		ndi_devi_exit(pdip);
9583 
9584 		if (flags & DEVI_BRANCH_CONFIGURE) {
9585 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9586 			if (error && rv == 0)
9587 				rv = error;
9588 		}
9589 
9590 		/*
9591 		 * devi_branch_callback() is optional
9592 		 */
9593 		if (bp->devi_branch_callback)
9594 			bp->devi_branch_callback(rdip, bp->arg, 0);
9595 	}
9596 
9597 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9598 
9599 	return (state == DDI_WALK_ERROR ? EIO : rv);
9600 }
9601 
9602 int
e_ddi_branch_create(dev_info_t * pdip,devi_branch_t * bp,dev_info_t ** dipp,uint_t flags)9603 e_ddi_branch_create(
9604 	dev_info_t	*pdip,
9605 	devi_branch_t	*bp,
9606 	dev_info_t	**dipp,
9607 	uint_t		flags)
9608 {
9609 	int prom_devi, sid_devi, error;
9610 
9611 	if (pdip == NULL || bp == NULL || bp->type == 0)
9612 		return (EINVAL);
9613 
9614 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9615 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9616 
9617 	if (prom_devi && bp->create.prom_branch_select == NULL)
9618 		return (EINVAL);
9619 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9620 		return (EINVAL);
9621 	else if (!prom_devi && !sid_devi)
9622 		return (EINVAL);
9623 
9624 	if (flags & DEVI_BRANCH_EVENT)
9625 		return (EINVAL);
9626 
9627 	if (prom_devi) {
9628 		struct pta pta = {0};
9629 
9630 		pta.pdip = pdip;
9631 		pta.bp = bp;
9632 		pta.flags = flags;
9633 
9634 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9635 
9636 		if (dipp)
9637 			*dipp = pta.fdip;
9638 		else if (pta.fdip)
9639 			ndi_rele_devi(pta.fdip);
9640 	} else {
9641 		error = create_sid_branch(pdip, bp, dipp, flags);
9642 	}
9643 
9644 	return (error);
9645 }
9646 
9647 int
e_ddi_branch_configure(dev_info_t * rdip,dev_info_t ** dipp,uint_t flags)9648 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9649 {
9650 	int		rv;
9651 	char		*devnm;
9652 	dev_info_t	*pdip;
9653 
9654 	if (dipp)
9655 		*dipp = NULL;
9656 
9657 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9658 		return (EINVAL);
9659 
9660 	pdip = ddi_get_parent(rdip);
9661 
9662 	ndi_hold_devi(pdip);
9663 
9664 	if (!e_ddi_branch_held(rdip)) {
9665 		ndi_rele_devi(pdip);
9666 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9667 		    "dip(%p) not held", (void *)rdip);
9668 		return (EINVAL);
9669 	}
9670 
9671 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9672 		/*
9673 		 * First attempt to bind a driver. If we fail, return
9674 		 * success (On some platforms, dips for some device
9675 		 * types (CPUs) may not have a driver)
9676 		 */
9677 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9678 			ndi_rele_devi(pdip);
9679 			return (0);
9680 		}
9681 
9682 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9683 			rv = NDI_FAILURE;
9684 			goto out;
9685 		}
9686 	}
9687 
9688 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9689 
9690 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9691 
9692 	(void) ddi_deviname(rdip, devnm);
9693 
9694 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9695 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9696 		/* release hold from ndi_devi_config_one() */
9697 		ndi_rele_devi(rdip);
9698 	}
9699 
9700 	kmem_free(devnm, MAXNAMELEN + 1);
9701 out:
9702 	if (rv != NDI_SUCCESS && dipp && rdip) {
9703 		ndi_hold_devi(rdip);
9704 		*dipp = rdip;
9705 	}
9706 	ndi_rele_devi(pdip);
9707 	return (ndi2errno(rv));
9708 }
9709 
9710 void
e_ddi_branch_hold(dev_info_t * rdip)9711 e_ddi_branch_hold(dev_info_t *rdip)
9712 {
9713 	if (e_ddi_branch_held(rdip)) {
9714 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9715 		return;
9716 	}
9717 
9718 	mutex_enter(&DEVI(rdip)->devi_lock);
9719 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9720 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9721 		DEVI(rdip)->devi_ref++;
9722 	}
9723 	ASSERT(DEVI(rdip)->devi_ref > 0);
9724 	mutex_exit(&DEVI(rdip)->devi_lock);
9725 }
9726 
9727 int
e_ddi_branch_held(dev_info_t * rdip)9728 e_ddi_branch_held(dev_info_t *rdip)
9729 {
9730 	int rv = 0;
9731 
9732 	mutex_enter(&DEVI(rdip)->devi_lock);
9733 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9734 	    DEVI(rdip)->devi_ref > 0) {
9735 		rv = 1;
9736 	}
9737 	mutex_exit(&DEVI(rdip)->devi_lock);
9738 
9739 	return (rv);
9740 }
9741 
9742 void
e_ddi_branch_rele(dev_info_t * rdip)9743 e_ddi_branch_rele(dev_info_t *rdip)
9744 {
9745 	mutex_enter(&DEVI(rdip)->devi_lock);
9746 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9747 	DEVI(rdip)->devi_ref--;
9748 	mutex_exit(&DEVI(rdip)->devi_lock);
9749 }
9750 
9751 int
e_ddi_branch_unconfigure(dev_info_t * rdip,dev_info_t ** dipp,uint_t flags)9752 e_ddi_branch_unconfigure(
9753 	dev_info_t *rdip,
9754 	dev_info_t **dipp,
9755 	uint_t flags)
9756 {
9757 	int	rv;
9758 	int	destroy;
9759 	char	*devnm;
9760 	uint_t	nflags;
9761 	dev_info_t *pdip;
9762 
9763 	if (dipp)
9764 		*dipp = NULL;
9765 
9766 	if (rdip == NULL)
9767 		return (EINVAL);
9768 
9769 	pdip = ddi_get_parent(rdip);
9770 
9771 	ASSERT(pdip);
9772 
9773 	/*
9774 	 * Check if caller holds pdip busy - can cause deadlocks during
9775 	 * devfs_clean()
9776 	 */
9777 	if (DEVI_BUSY_OWNED(pdip)) {
9778 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9779 		    " devinfo node(%p) is busy held", (void *)pdip);
9780 		return (EINVAL);
9781 	}
9782 
9783 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9784 
9785 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9786 
9787 	ndi_devi_enter(pdip);
9788 	(void) ddi_deviname(rdip, devnm);
9789 	ndi_devi_exit(pdip);
9790 
9791 	/*
9792 	 * ddi_deviname() returns a component name with / prepended.
9793 	 */
9794 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9795 
9796 	ndi_devi_enter(pdip);
9797 
9798 	/*
9799 	 * Recreate device name as it may have changed state (init/uninit)
9800 	 * when parent busy lock was dropped for devfs_clean()
9801 	 */
9802 	(void) ddi_deviname(rdip, devnm);
9803 
9804 	if (!e_ddi_branch_held(rdip)) {
9805 		kmem_free(devnm, MAXNAMELEN + 1);
9806 		ndi_devi_exit(pdip);
9807 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9808 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
9809 		return (EINVAL);
9810 	}
9811 
9812 	/*
9813 	 * Release hold on the branch. This is ok since we are holding the
9814 	 * parent busy. If rdip is not removed, we must do a hold on the
9815 	 * branch before returning.
9816 	 */
9817 	e_ddi_branch_rele(rdip);
9818 
9819 	nflags = NDI_DEVI_OFFLINE;
9820 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9821 		nflags |= NDI_DEVI_REMOVE;
9822 		destroy = 1;
9823 	} else {
9824 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
9825 	}
9826 
9827 	if (flags & DEVI_BRANCH_EVENT)
9828 		nflags |= NDI_POST_EVENT;
9829 
9830 	if (i_ddi_devi_attached(pdip) &&
9831 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9832 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9833 	} else {
9834 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9835 		if (rv == NDI_SUCCESS) {
9836 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9837 			rv = ndi_devi_offline(rdip, nflags);
9838 		}
9839 	}
9840 
9841 	if (!destroy || rv != NDI_SUCCESS) {
9842 		/* The dip still exists, so do a hold */
9843 		e_ddi_branch_hold(rdip);
9844 	}
9845 
9846 	kmem_free(devnm, MAXNAMELEN + 1);
9847 	ndi_devi_exit(pdip);
9848 	return (ndi2errno(rv));
9849 }
9850 
9851 int
e_ddi_branch_destroy(dev_info_t * rdip,dev_info_t ** dipp,uint_t flag)9852 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9853 {
9854 	return (e_ddi_branch_unconfigure(rdip, dipp,
9855 	    flag|DEVI_BRANCH_DESTROY));
9856 }
9857 
9858 /*
9859  * Number of chains for hash table
9860  */
9861 #define	NUMCHAINS	17
9862 
9863 /*
9864  * Devinfo busy arg
9865  */
9866 struct devi_busy {
9867 	int dv_total;
9868 	int s_total;
9869 	mod_hash_t *dv_hash;
9870 	mod_hash_t *s_hash;
9871 	int (*callback)(dev_info_t *, void *, uint_t);
9872 	void *arg;
9873 };
9874 
9875 static int
visit_dip(dev_info_t * dip,void * arg)9876 visit_dip(dev_info_t *dip, void *arg)
9877 {
9878 	uintptr_t sbusy, dvbusy, ref;
9879 	struct devi_busy *bsp = arg;
9880 
9881 	ASSERT(bsp->callback);
9882 
9883 	/*
9884 	 * A dip cannot be busy if its reference count is 0
9885 	 */
9886 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9887 		return (bsp->callback(dip, bsp->arg, 0));
9888 	}
9889 
9890 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9891 		dvbusy = 0;
9892 
9893 	/*
9894 	 * To catch device opens currently maintained on specfs common snodes.
9895 	 */
9896 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9897 		sbusy = 0;
9898 
9899 #ifdef	DEBUG
9900 	if (ref < sbusy || ref < dvbusy) {
9901 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9902 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9903 	}
9904 #endif
9905 
9906 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9907 
9908 	return (bsp->callback(dip, bsp->arg, dvbusy));
9909 }
9910 
9911 static int
visit_snode(struct snode * sp,void * arg)9912 visit_snode(struct snode *sp, void *arg)
9913 {
9914 	uintptr_t sbusy;
9915 	dev_info_t *dip;
9916 	int count;
9917 	struct devi_busy *bsp = arg;
9918 
9919 	ASSERT(sp);
9920 
9921 	/*
9922 	 * The stable lock is held. This prevents
9923 	 * the snode and its associated dip from
9924 	 * going away.
9925 	 */
9926 	dip = NULL;
9927 	count = spec_devi_open_count(sp, &dip);
9928 
9929 	if (count <= 0)
9930 		return (DDI_WALK_CONTINUE);
9931 
9932 	ASSERT(dip);
9933 
9934 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9935 		sbusy = count;
9936 	else
9937 		sbusy += count;
9938 
9939 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9940 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9941 		    "sbusy = %lu", "e_ddi_branch_referenced",
9942 		    (void *)dip, sbusy);
9943 	}
9944 
9945 	bsp->s_total += count;
9946 
9947 	return (DDI_WALK_CONTINUE);
9948 }
9949 
9950 static void
visit_dvnode(struct dv_node * dv,void * arg)9951 visit_dvnode(struct dv_node *dv, void *arg)
9952 {
9953 	uintptr_t dvbusy;
9954 	uint_t count;
9955 	struct vnode *vp;
9956 	struct devi_busy *bsp = arg;
9957 
9958 	ASSERT(dv && dv->dv_devi);
9959 
9960 	vp = DVTOV(dv);
9961 
9962 	mutex_enter(&vp->v_lock);
9963 	count = vp->v_count;
9964 	mutex_exit(&vp->v_lock);
9965 
9966 	if (!count)
9967 		return;
9968 
9969 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9970 	    (mod_hash_val_t *)&dvbusy))
9971 		dvbusy = count;
9972 	else
9973 		dvbusy += count;
9974 
9975 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9976 	    (mod_hash_val_t)dvbusy)) {
9977 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9978 		    "dvbusy=%lu", "e_ddi_branch_referenced",
9979 		    (void *)dv->dv_devi, dvbusy);
9980 	}
9981 
9982 	bsp->dv_total += count;
9983 }
9984 
9985 /*
9986  * Returns reference count on success or -1 on failure.
9987  */
9988 int
e_ddi_branch_referenced(dev_info_t * rdip,int (* callback)(dev_info_t * dip,void * arg,uint_t ref),void * arg)9989 e_ddi_branch_referenced(
9990 	dev_info_t *rdip,
9991 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9992 	void *arg)
9993 {
9994 	char *path;
9995 	dev_info_t *pdip;
9996 	struct devi_busy bsa = {0};
9997 
9998 	ASSERT(rdip);
9999 
10000 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10001 
10002 	ndi_hold_devi(rdip);
10003 
10004 	pdip = ddi_get_parent(rdip);
10005 
10006 	ASSERT(pdip);
10007 
10008 	/*
10009 	 * Check if caller holds pdip busy - can cause deadlocks during
10010 	 * devfs_walk()
10011 	 */
10012 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10013 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10014 		    "devinfo branch(%p) not held or parent busy held",
10015 		    (void *)rdip);
10016 		ndi_rele_devi(rdip);
10017 		kmem_free(path, MAXPATHLEN);
10018 		return (-1);
10019 	}
10020 
10021 	ndi_devi_enter(pdip);
10022 	(void) ddi_pathname(rdip, path);
10023 	ndi_devi_exit(pdip);
10024 
10025 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10026 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10027 
10028 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10029 	    mod_hash_null_valdtor, sizeof (struct snode));
10030 
10031 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10032 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10033 		    "devfs walk failed for: %s", path);
10034 		kmem_free(path, MAXPATHLEN);
10035 		bsa.s_total = bsa.dv_total = -1;
10036 		goto out;
10037 	}
10038 
10039 	kmem_free(path, MAXPATHLEN);
10040 
10041 	/*
10042 	 * Walk the snode table to detect device opens, which are currently
10043 	 * maintained on specfs common snodes.
10044 	 */
10045 	spec_snode_walk(visit_snode, &bsa);
10046 
10047 	if (callback == NULL)
10048 		goto out;
10049 
10050 	bsa.callback = callback;
10051 	bsa.arg = arg;
10052 
10053 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10054 		ndi_devi_enter(rdip);
10055 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10056 		ndi_devi_exit(rdip);
10057 	}
10058 
10059 out:
10060 	ndi_rele_devi(rdip);
10061 	mod_hash_destroy_ptrhash(bsa.s_hash);
10062 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10063 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10064 }
10065