1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/types.h>
27#include <sys/param.h>
28#include <sys/systm.h>
29#include <sys/buf.h>
30#include <sys/uio.h>
31#include <sys/cred.h>
32#include <sys/poll.h>
33#include <sys/mman.h>
34#include <sys/kmem.h>
35#include <sys/model.h>
36#include <sys/file.h>
37#include <sys/proc.h>
38#include <sys/open.h>
39#include <sys/user.h>
40#include <sys/t_lock.h>
41#include <sys/vm.h>
42#include <sys/stat.h>
43#include <vm/hat.h>
44#include <vm/seg.h>
45#include <vm/as.h>
46#include <sys/cmn_err.h>
47#include <sys/debug.h>
48#include <sys/avintr.h>
49#include <sys/autoconf.h>
50#include <sys/sunddi.h>
51#include <sys/esunddi.h>
52#include <sys/sunndi.h>
53#include <sys/ddi.h>
54#include <sys/kstat.h>
55#include <sys/conf.h>
56#include <sys/ddi_impldefs.h>	/* include implementation structure defs */
57#include <sys/ndi_impldefs.h>
58#include <sys/hwconf.h>
59#include <sys/pathname.h>
60#include <sys/modctl.h>
61#include <sys/epm.h>
62#include <sys/devctl.h>
63#include <sys/callb.h>
64#include <sys/bootconf.h>
65#include <sys/dacf_impl.h>
66#include <sys/nvpair.h>
67#include <sys/sunmdi.h>
68#include <sys/fs/dv_node.h>
69#include <sys/sunldi_impl.h>
70
71#ifdef __sparc
72#include <sys/archsystm.h>	/* getpil/setpil */
73#include <sys/membar.h>		/* membar_sync */
74#endif
75
76/*
77 * ndi property handling
78 */
79int
80ndi_prop_update_int(dev_t match_dev, dev_info_t *dip,
81    char *name, int data)
82{
83	return (ddi_prop_update_common(match_dev, dip,
84	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT | DDI_PROP_DONTSLEEP,
85	    name, &data, 1, ddi_prop_fm_encode_ints));
86}
87
88int
89ndi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
90    char *name, int64_t data)
91{
92	return (ddi_prop_update_common(match_dev, dip,
93	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT64 | DDI_PROP_DONTSLEEP,
94	    name, &data, 1, ddi_prop_fm_encode_int64));
95}
96
97int
98ndi_prop_create_boolean(dev_t match_dev, dev_info_t *dip,
99    char *name)
100{
101	return (ddi_prop_update_common(match_dev, dip,
102	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_ANY | DDI_PROP_DONTSLEEP,
103	    name, NULL, 0, ddi_prop_fm_encode_bytes));
104}
105
106int
107ndi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
108    char *name, int *data, uint_t nelements)
109{
110	return (ddi_prop_update_common(match_dev, dip,
111	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT | DDI_PROP_DONTSLEEP,
112	    name, data, nelements, ddi_prop_fm_encode_ints));
113}
114
115int
116ndi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
117    char *name, int64_t *data, uint_t nelements)
118{
119	return (ddi_prop_update_common(match_dev, dip,
120	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT64 | DDI_PROP_DONTSLEEP,
121	    name, data, nelements, ddi_prop_fm_encode_int64));
122}
123
124int
125ndi_prop_update_string(dev_t match_dev, dev_info_t *dip,
126    char *name, char *data)
127{
128	return (ddi_prop_update_common(match_dev, dip,
129	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_STRING | DDI_PROP_DONTSLEEP,
130	    name, &data, 1, ddi_prop_fm_encode_string));
131}
132
133int
134ndi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
135    char *name, char **data, uint_t nelements)
136{
137	return (ddi_prop_update_common(match_dev, dip,
138	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_STRING | DDI_PROP_DONTSLEEP,
139	    name, data, nelements,
140	    ddi_prop_fm_encode_strings));
141}
142
143int
144ndi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
145    char *name, uchar_t *data, uint_t nelements)
146{
147	if (nelements == 0)
148		return (DDI_PROP_INVAL_ARG);
149
150	return (ddi_prop_update_common(match_dev, dip,
151	    DDI_PROP_HW_DEF | DDI_PROP_TYPE_BYTE | DDI_PROP_DONTSLEEP,
152	    name, data, nelements, ddi_prop_fm_encode_bytes));
153}
154
155int
156ndi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
157{
158	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_HW_DEF));
159}
160
161void
162ndi_prop_remove_all(dev_info_t *dip)
163{
164	i_ddi_prop_dyn_parent_set(dip, NULL);
165	ddi_prop_remove_all_common(dip, (int)DDI_PROP_HW_DEF);
166}
167
168/*
169 * Post an event notification to nexus driver responsible for handling
170 * the event.  The responsible nexus is defined in the cookie passed in as
171 * the third parameter.
172 * The dip parameter is an artifact of an older implementation in which all
173 * requests to remove an eventcall would bubble up the tree.  Today, this
174 * parameter is ignored.
175 * Input Parameters:
176 *	dip	- Ignored.
177 *	rdip	- device driver posting the event
178 *	cookie	- valid ddi_eventcookie_t, obtained by caller prior to
179 *		  invocation of this routine
180 *	impl_data - used by framework
181 */
182/*ARGSUSED*/
183int
184ndi_post_event(dev_info_t *dip, dev_info_t *rdip,
185		ddi_eventcookie_t cookie, void *impl_data)
186{
187	dev_info_t *ddip;
188
189	ASSERT(cookie);
190	ddip = NDI_EVENT_DDIP(cookie);
191
192	/*
193	 * perform sanity checks.  These conditions should never be true.
194	 */
195
196	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL);
197	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6);
198	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->bus_post_event != NULL);
199
200	/*
201	 * post the event to the responsible ancestor
202	 */
203	return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_post_event))
204	    (ddip, rdip, cookie, impl_data));
205}
206
207/*
208 * Calls the bus nexus driver's implementation of the
209 * (*bus_remove_eventcall)() interface.
210 */
211int
212ndi_busop_remove_eventcall(dev_info_t *ddip, ddi_callback_id_t id)
213{
214
215	ASSERT(id);
216	/* check for a correct revno before calling up the device tree. */
217	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL);
218	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6);
219
220	if (DEVI(ddip)->devi_ops->devo_bus_ops->bus_remove_eventcall == NULL)
221		return (DDI_FAILURE);
222
223	/*
224	 * request responsible nexus to remove the eventcall
225	 */
226	return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_remove_eventcall))
227	    (ddip, id));
228}
229
230/*
231 * Calls the bus nexus driver's implementation of the
232 * (*bus_add_eventcall)() interface.  The dip parameter is an
233 * artifact of an older implementation in which all requests to
234 * add an eventcall would bubble up the tree.  Today, this parameter is
235 * ignored.
236 */
237/*ARGSUSED*/
238int
239ndi_busop_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
240		ddi_eventcookie_t cookie, void (*callback)(), void *arg,
241		ddi_callback_id_t *cb_id)
242{
243	dev_info_t *ddip = (dev_info_t *)NDI_EVENT_DDIP(cookie);
244
245	/*
246	 * check for a correct revno before calling up the device tree.
247	 */
248	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL);
249	ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6);
250
251	if (DEVI(ddip)->devi_ops->devo_bus_ops->bus_add_eventcall == NULL)
252		return (DDI_FAILURE);
253
254	/*
255	 * request responsible ancestor to add the eventcall
256	 */
257	return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_add_eventcall))
258	    (ddip, rdip, cookie, callback, arg, cb_id));
259}
260
261/*
262 * Calls the bus nexus driver's implementation of the
263 * (*bus_get_eventcookie)() interface up the device tree hierarchy.
264 */
265int
266ndi_busop_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
267		ddi_eventcookie_t *event_cookiep)
268{
269	dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent;
270
271	/* Can not be called from rootnex. */
272	ASSERT(pdip);
273
274	/*
275	 * check for a correct revno before calling up the device tree.
276	 */
277	ASSERT(DEVI(pdip)->devi_ops->devo_bus_ops != NULL);
278
279	if ((DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_6) ||
280	    (DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie == NULL)) {
281#ifdef DEBUG
282		if ((DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev >=
283		    BUSO_REV_3) &&
284		    (DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie)) {
285			cmn_err(CE_WARN,
286			    "Warning: %s%d busops_rev=%d no longer supported"
287			    " by the NDI event framework.\nBUSO_REV_6 or "
288			    "greater must be used.",
289			    DEVI(pdip)->devi_binding_name,
290			    DEVI(pdip)->devi_instance,
291			    DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev);
292		}
293#endif /* DEBUG */
294
295		return (ndi_busop_get_eventcookie(pdip, rdip, name,
296		    event_cookiep));
297	}
298
299	return ((*(DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie))
300	    (pdip, rdip, name, event_cookiep));
301}
302
303/*
304 * Copy in the devctl IOCTL data and return a handle to
305 * the data.
306 */
307int
308ndi_dc_allochdl(void *iocarg, struct devctl_iocdata **rdcp)
309{
310	struct devctl_iocdata *dcp;
311	char *cpybuf;
312
313	ASSERT(rdcp != NULL);
314
315	dcp = kmem_zalloc(sizeof (*dcp), KM_SLEEP);
316
317	if (get_udatamodel() == DATAMODEL_NATIVE) {
318		if (copyin(iocarg, dcp, sizeof (*dcp)) != 0) {
319			kmem_free(dcp, sizeof (*dcp));
320			return (NDI_FAULT);
321		}
322	}
323#ifdef _SYSCALL32_IMPL
324	else {
325		struct devctl_iocdata32 dcp32;
326
327		if (copyin(iocarg, &dcp32, sizeof (dcp32)) != 0) {
328			kmem_free(dcp, sizeof (*dcp));
329			return (NDI_FAULT);
330		}
331		dcp->cmd = (uint_t)dcp32.cmd;
332		dcp->flags = (uint_t)dcp32.flags;
333		dcp->cpyout_buf = (uint_t *)(uintptr_t)dcp32.cpyout_buf;
334		dcp->nvl_user = (nvlist_t *)(uintptr_t)dcp32.nvl_user;
335		dcp->nvl_usersz = (size_t)dcp32.nvl_usersz;
336		dcp->c_nodename = (char *)(uintptr_t)dcp32.c_nodename;
337		dcp->c_unitaddr = (char *)(uintptr_t)dcp32.c_unitaddr;
338	}
339#endif
340	if (dcp->c_nodename != NULL) {
341		cpybuf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
342		if (copyinstr(dcp->c_nodename, cpybuf, MAXNAMELEN, 0) != 0) {
343			kmem_free(cpybuf, MAXNAMELEN);
344			kmem_free(dcp, sizeof (*dcp));
345			return (NDI_FAULT);
346		}
347		cpybuf[MAXNAMELEN - 1] = '\0';
348		dcp->c_nodename = cpybuf;
349	}
350
351	if (dcp->c_unitaddr != NULL) {
352		cpybuf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
353		if (copyinstr(dcp->c_unitaddr, cpybuf, MAXNAMELEN, 0) != 0) {
354			kmem_free(cpybuf, MAXNAMELEN);
355			if (dcp->c_nodename != NULL)
356				kmem_free(dcp->c_nodename, MAXNAMELEN);
357			kmem_free(dcp, sizeof (*dcp));
358			return (NDI_FAULT);
359		}
360		cpybuf[MAXNAMELEN - 1] = '\0';
361		dcp->c_unitaddr = cpybuf;
362	}
363
364	/*
365	 * copyin and unpack a user defined nvlist if one was passed
366	 */
367	if (dcp->nvl_user != NULL) {
368		if ((dcp->nvl_usersz == 0) ||
369		    (dcp->nvl_usersz > DEVCTL_MAX_NVL_USERSZ)) {
370			if (dcp->c_nodename != NULL)
371				kmem_free(dcp->c_nodename, MAXNAMELEN);
372			if (dcp->c_unitaddr != NULL)
373				kmem_free(dcp->c_unitaddr, MAXNAMELEN);
374			kmem_free(dcp, sizeof (*dcp));
375			return (NDI_FAILURE);
376		}
377		cpybuf = kmem_alloc(dcp->nvl_usersz, KM_SLEEP);
378		if (copyin(dcp->nvl_user, cpybuf, dcp->nvl_usersz) != 0) {
379			kmem_free(cpybuf, dcp->nvl_usersz);
380			if (dcp->c_nodename != NULL)
381				kmem_free(dcp->c_nodename, MAXNAMELEN);
382			if (dcp->c_unitaddr != NULL)
383				kmem_free(dcp->c_unitaddr, MAXNAMELEN);
384			kmem_free(dcp, sizeof (*dcp));
385			return (NDI_FAULT);
386		}
387
388		if (nvlist_unpack(cpybuf, dcp->nvl_usersz, &dcp->nvl_user,
389		    KM_SLEEP)) {
390			kmem_free(cpybuf, dcp->nvl_usersz);
391			if (dcp->c_nodename != NULL)
392				kmem_free(dcp->c_nodename, MAXNAMELEN);
393			if (dcp->c_unitaddr != NULL)
394				kmem_free(dcp->c_unitaddr, MAXNAMELEN);
395			kmem_free(dcp, sizeof (*dcp));
396			return (NDI_FAULT);
397		}
398		/*
399		 * free the buffer containing the packed nvlist
400		 */
401		kmem_free(cpybuf, dcp->nvl_usersz);
402
403	}
404
405	*rdcp = dcp;
406	return (NDI_SUCCESS);
407}
408
409/*
410 * free all space allocated to a handle.
411 */
412void
413ndi_dc_freehdl(struct devctl_iocdata *dcp)
414{
415	ASSERT(dcp != NULL);
416
417	if (dcp->c_nodename != NULL)
418		kmem_free(dcp->c_nodename, MAXNAMELEN);
419
420	if (dcp->c_unitaddr != NULL)
421		kmem_free(dcp->c_unitaddr, MAXNAMELEN);
422
423	nvlist_free(dcp->nvl_user);
424
425	kmem_free(dcp, sizeof (*dcp));
426}
427
428char *
429ndi_dc_getname(struct devctl_iocdata *dcp)
430{
431	ASSERT(dcp != NULL);
432	return (dcp->c_nodename);
433
434}
435
436char *
437ndi_dc_getaddr(struct devctl_iocdata *dcp)
438{
439	ASSERT(dcp != NULL);
440	return (dcp->c_unitaddr);
441}
442
443nvlist_t *
444ndi_dc_get_ap_data(struct devctl_iocdata *dcp)
445{
446	ASSERT(dcp != NULL);
447
448	return (dcp->nvl_user);
449}
450
451/*
452 * Transition the child named by "devname@devaddr" to the online state.
453 * For use by a driver's DEVCTL_DEVICE_ONLINE handler.
454 */
455int
456ndi_devctl_device_online(dev_info_t *dip, struct devctl_iocdata *dcp,
457	uint_t flags)
458{
459	int	rval;
460	char	*name;
461	dev_info_t *rdip;
462
463	if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL)
464		return (EINVAL);
465
466	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
467	(void) snprintf(name, MAXNAMELEN, "%s@%s",
468	    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
469
470	if ((rval = ndi_devi_config_one(dip, name, &rdip,
471	    flags | NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
472		ndi_rele_devi(rdip);
473
474		/*
475		 * Invalidate devfs cached directory contents. For the checks
476		 * in the "if" condition see the comment in ndi_devi_online().
477		 */
478		if (i_ddi_devi_attached(dip) && !DEVI_BUSY_OWNED(dip))
479			(void) devfs_clean(dip, NULL, 0);
480
481	} else if (rval == NDI_BUSY) {
482		rval = EBUSY;
483	} else if (rval == NDI_FAILURE) {
484		rval = EIO;
485	}
486
487	NDI_DEBUG(flags, (CE_CONT, "%s%d: online: %s: %s\n",
488	    ddi_driver_name(dip), ddi_get_instance(dip), name,
489	    ((rval == NDI_SUCCESS) ? "ok" : "failed")));
490
491	kmem_free(name, MAXNAMELEN);
492
493	return (rval);
494}
495
496/*
497 * Transition the child named by "devname@devaddr" to the offline state.
498 * For use by a driver's DEVCTL_DEVICE_OFFLINE handler.
499 */
500int
501ndi_devctl_device_offline(dev_info_t *dip, struct devctl_iocdata *dcp,
502	uint_t flags)
503{
504	int	rval;
505	char	*name;
506
507	if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL)
508		return (EINVAL);
509
510	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
511	(void) snprintf(name, MAXNAMELEN, "%s@%s",
512	    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
513
514	(void) devfs_clean(dip, name, DV_CLEAN_FORCE);
515	rval = ndi_devi_unconfig_one(dip, name, NULL,
516	    flags | NDI_DEVI_OFFLINE);
517
518	if (rval == NDI_BUSY) {
519		rval = EBUSY;
520	} else if (rval == NDI_FAILURE) {
521		rval = EIO;
522	}
523
524	NDI_DEBUG(flags, (CE_CONT, "%s%d: offline: %s: %s\n",
525	    ddi_driver_name(dip), ddi_get_instance(dip), name,
526	    (rval == NDI_SUCCESS) ? "ok" : "failed"));
527
528	kmem_free(name, MAXNAMELEN);
529
530	return (rval);
531}
532
533/*
534 * Remove the child named by "devname@devaddr".
535 * For use by a driver's DEVCTL_DEVICE_REMOVE handler.
536 */
537int
538ndi_devctl_device_remove(dev_info_t *dip, struct devctl_iocdata *dcp,
539	uint_t flags)
540{
541	int	rval;
542	char	*name;
543
544	if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL)
545		return (EINVAL);
546
547	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
548	(void) snprintf(name, MAXNAMELEN, "%s@%s",
549	    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
550
551	(void) devfs_clean(dip, name, DV_CLEAN_FORCE);
552
553	rval = ndi_devi_unconfig_one(dip, name, NULL, flags | NDI_DEVI_REMOVE);
554
555	if (rval == NDI_BUSY) {
556		rval = EBUSY;
557	} else if (rval == NDI_FAILURE) {
558		rval = EIO;
559	}
560
561	NDI_DEBUG(flags, (CE_CONT, "%s%d: remove: %s: %s\n",
562	    ddi_driver_name(dip), ddi_get_instance(dip), name,
563	    (rval == NDI_SUCCESS) ? "ok" : "failed"));
564
565	kmem_free(name, MAXNAMELEN);
566
567	return (rval);
568}
569
570/*
571 * Return devctl state of the child named by "name@addr".
572 * For use by a driver's DEVCTL_DEVICE_GETSTATE handler.
573 */
574int
575ndi_devctl_device_getstate(dev_info_t *parent, struct devctl_iocdata *dcp,
576	uint_t *state)
577{
578	dev_info_t *dip;
579	char *name, *addr;
580	char *devname;
581	int devnamelen;
582	int circ;
583
584	if (parent == NULL ||
585	    ((name = ndi_dc_getname(dcp)) == NULL) ||
586	    ((addr = ndi_dc_getaddr(dcp)) == NULL))
587		return (NDI_FAILURE);
588
589	devnamelen = strlen(name) + strlen(addr) + 2;
590	devname = kmem_alloc(devnamelen, KM_SLEEP);
591	if (strlen(addr) > 0) {
592		(void) snprintf(devname, devnamelen, "%s@%s", name, addr);
593	} else {
594		(void) snprintf(devname, devnamelen, "%s", name);
595	}
596
597	ndi_devi_enter(parent, &circ);
598
599	dip = ndi_devi_findchild(parent, devname);
600	kmem_free(devname, devnamelen);
601
602	if (dip == NULL) {
603		ndi_devi_exit(parent, circ);
604		return (NDI_FAILURE);
605	}
606
607	mutex_enter(&(DEVI(dip)->devi_lock));
608	if (DEVI_IS_DEVICE_OFFLINE(dip)) {
609		*state = DEVICE_OFFLINE;
610	} else if (DEVI_IS_DEVICE_DOWN(dip)) {
611		*state = DEVICE_DOWN;
612	} else {
613		*state = DEVICE_ONLINE;
614		if (devi_stillreferenced(dip) == DEVI_REFERENCED)
615			*state |= DEVICE_BUSY;
616	}
617
618	mutex_exit(&(DEVI(dip)->devi_lock));
619	ndi_devi_exit(parent, circ);
620
621	return (NDI_SUCCESS);
622}
623
624/*
625 * return the current state of the device "dip"
626 *
627 * recommend using ndi_devctl_ioctl() or
628 * ndi_devctl_device_getstate() instead
629 */
630int
631ndi_dc_return_dev_state(dev_info_t *dip, struct devctl_iocdata *dcp)
632{
633	dev_info_t *pdip;
634	uint_t devstate = 0;
635	int circ;
636
637	if ((dip == NULL) || (dcp == NULL))
638		return (NDI_FAILURE);
639
640	pdip = ddi_get_parent(dip);
641
642	ndi_devi_enter(pdip, &circ);
643	mutex_enter(&(DEVI(dip)->devi_lock));
644	if (DEVI_IS_DEVICE_OFFLINE(dip)) {
645		devstate = DEVICE_OFFLINE;
646	} else if (DEVI_IS_DEVICE_DOWN(dip)) {
647		devstate = DEVICE_DOWN;
648	} else {
649		devstate = DEVICE_ONLINE;
650		if (devi_stillreferenced(dip) == DEVI_REFERENCED)
651			devstate |= DEVICE_BUSY;
652	}
653
654	mutex_exit(&(DEVI(dip)->devi_lock));
655	ndi_devi_exit(pdip, circ);
656
657	if (copyout(&devstate, dcp->cpyout_buf, sizeof (uint_t)) != 0)
658		return (NDI_FAULT);
659
660	return (NDI_SUCCESS);
661}
662
663/*
664 * Return device's bus state
665 * For use by a driver's DEVCTL_BUS_GETSTATE handler.
666 */
667int
668ndi_devctl_bus_getstate(dev_info_t *dip, struct devctl_iocdata *dcp,
669	uint_t *state)
670{
671	if ((dip == NULL) || (dcp == NULL))
672		return (NDI_FAILURE);
673
674	return (ndi_get_bus_state(dip, state));
675}
676
677/*
678 * Generic devctl ioctl handler
679 */
680int
681ndi_devctl_ioctl(dev_info_t *dip, int cmd, intptr_t arg, int mode, uint_t flags)
682{
683	_NOTE(ARGUNUSED(mode))
684	struct devctl_iocdata *dcp;
685	uint_t state;
686	int rval = ENOTTY;
687
688	/*
689	 * read devctl ioctl data
690	 */
691	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
692		return (EFAULT);
693
694	switch (cmd) {
695
696	case DEVCTL_BUS_GETSTATE:
697		rval = ndi_devctl_bus_getstate(dip, dcp, &state);
698		if (rval == NDI_SUCCESS) {
699			if (copyout(&state, dcp->cpyout_buf,
700			    sizeof (uint_t)) != 0)
701				rval = NDI_FAULT;
702		}
703		break;
704
705	case DEVCTL_DEVICE_ONLINE:
706		rval = ndi_devctl_device_online(dip, dcp, flags);
707		break;
708
709	case DEVCTL_DEVICE_OFFLINE:
710		rval = ndi_devctl_device_offline(dip, dcp, flags);
711		break;
712
713	case DEVCTL_DEVICE_GETSTATE:
714		rval = ndi_devctl_device_getstate(dip, dcp, &state);
715		if (rval == NDI_SUCCESS) {
716			if (copyout(&state, dcp->cpyout_buf,
717			    sizeof (uint_t)) != 0)
718				rval = NDI_FAULT;
719		}
720		break;
721
722	case DEVCTL_DEVICE_REMOVE:
723		rval = ndi_devctl_device_remove(dip, dcp, flags);
724		break;
725
726	case DEVCTL_BUS_DEV_CREATE:
727		rval = ndi_dc_devi_create(dcp, dip, 0, NULL);
728		break;
729
730	/*
731	 * ioctls for which a generic implementation makes no sense
732	 */
733	case DEVCTL_BUS_RESET:
734	case DEVCTL_BUS_RESETALL:
735	case DEVCTL_DEVICE_RESET:
736	case DEVCTL_AP_CONNECT:
737	case DEVCTL_AP_DISCONNECT:
738	case DEVCTL_AP_INSERT:
739	case DEVCTL_AP_REMOVE:
740	case DEVCTL_AP_CONFIGURE:
741	case DEVCTL_AP_UNCONFIGURE:
742	case DEVCTL_AP_GETSTATE:
743	case DEVCTL_AP_CONTROL:
744	case DEVCTL_BUS_QUIESCE:
745	case DEVCTL_BUS_UNQUIESCE:
746		rval = ENOTSUP;
747		break;
748	}
749
750	ndi_dc_freehdl(dcp);
751	return (rval);
752}
753
754/*
755 * Copyout the state of the Attachment Point "ap" to the requesting
756 * user process.
757 */
758int
759ndi_dc_return_ap_state(devctl_ap_state_t *ap, struct devctl_iocdata *dcp)
760{
761	if ((ap == NULL) || (dcp == NULL))
762		return (NDI_FAILURE);
763
764
765	if (get_udatamodel() == DATAMODEL_NATIVE) {
766		if (copyout(ap, dcp->cpyout_buf,
767		    sizeof (devctl_ap_state_t)) != 0)
768			return (NDI_FAULT);
769	}
770#ifdef _SYSCALL32_IMPL
771	else {
772		struct devctl_ap_state32 ap_state32;
773
774		ap_state32.ap_rstate = ap->ap_rstate;
775		ap_state32.ap_ostate = ap->ap_ostate;
776		ap_state32.ap_condition = ap->ap_condition;
777		ap_state32.ap_error_code = ap->ap_error_code;
778		ap_state32.ap_in_transition = ap->ap_in_transition;
779		ap_state32.ap_last_change = (time32_t)ap->ap_last_change;
780		if (copyout(&ap_state32, dcp->cpyout_buf,
781		    sizeof (devctl_ap_state32_t)) != 0)
782			return (NDI_FAULT);
783	}
784#endif
785
786	return (NDI_SUCCESS);
787}
788
789/*
790 * Copyout the bus state of the bus nexus device "dip" to the requesting
791 * user process.
792 */
793int
794ndi_dc_return_bus_state(dev_info_t *dip, struct devctl_iocdata *dcp)
795{
796	uint_t devstate = 0;
797
798	if ((dip == NULL) || (dcp == NULL))
799		return (NDI_FAILURE);
800
801	if (ndi_get_bus_state(dip, &devstate) != NDI_SUCCESS)
802		return (NDI_FAILURE);
803
804	if (copyout(&devstate, dcp->cpyout_buf, sizeof (uint_t)) != 0)
805		return (NDI_FAULT);
806
807	return (NDI_SUCCESS);
808}
809
810static int
811i_dc_devi_create(struct devctl_iocdata *, dev_info_t *, dev_info_t **);
812
813/*
814 * create a child device node given the property definitions
815 * supplied by the userland process
816 */
817int
818ndi_dc_devi_create(struct devctl_iocdata *dcp, dev_info_t *pdip, int flags,
819    dev_info_t **rdip)
820{
821	dev_info_t *cdip;
822	int rv, circular = 0;
823	char devnm[MAXNAMELEN];
824	int nmlen;
825
826	/*
827	 * The child device may have been pre-constructed by an earlier
828	 * call to this function with the flag DEVCTL_CONSTRUCT set.
829	 */
830
831	if ((cdip = (rdip != NULL) ? *rdip : NULL) == NULL)
832		if ((rv = i_dc_devi_create(dcp, pdip, &cdip)) != 0)
833			return (rv);
834
835	ASSERT(cdip != NULL);
836
837	/*
838	 * Return the device node partially constructed if the
839	 * DEVCTL_CONSTRUCT flag is set.
840	 */
841	if (flags & DEVCTL_CONSTRUCT) {
842		if (rdip == NULL) {
843			(void) ndi_devi_free(cdip);
844			return (EINVAL);
845		}
846		*rdip = cdip;
847		return (0);
848	}
849
850	/*
851	 * Bring the node up to a named but OFFLINE state.  The calling
852	 * application will need to manage the node from here on.
853	 */
854	if (dcp->flags & DEVCTL_OFFLINE) {
855		/*
856		 * In the unlikely event that the dip was somehow attached by
857		 * the userland process (and device contracts or LDI opens
858		 * were registered against the dip) after it was created by
859		 * a previous DEVCTL_CONSTRUCT call, we start notify
860		 * proceedings on this dip. Note that we don't need to
861		 * return the dip after a failure of the notify since
862		 * for a contract or LDI handle to be created the dip was
863		 * already available to the user.
864		 */
865		if (e_ddi_offline_notify(cdip) == DDI_FAILURE) {
866			return (EBUSY);
867		}
868
869		/*
870		 * hand set the OFFLINE flag to prevent any asynchronous
871		 * autoconfiguration operations from attaching this node.
872		 */
873		mutex_enter(&(DEVI(cdip)->devi_lock));
874		DEVI_SET_DEVICE_OFFLINE(cdip);
875		mutex_exit(&(DEVI(cdip)->devi_lock));
876
877		e_ddi_offline_finalize(cdip, DDI_SUCCESS);
878
879		rv = ndi_devi_bind_driver(cdip, flags);
880		if (rv != NDI_SUCCESS) {
881			(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
882			return (ENXIO);
883		}
884
885		/*
886		 * remove the dev_info node if it failed to bind to a
887		 * driver above.
888		 */
889		if (i_ddi_node_state(cdip) < DS_BOUND) {
890			(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
891			return (ENXIO);
892		}
893
894		/*
895		 * add the node to the per-driver list and INITCHILD it
896		 * to give it a name.
897		 */
898		ndi_devi_enter(pdip, &circular);
899		if ((rv = ddi_initchild(pdip, cdip)) != DDI_SUCCESS) {
900			(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
901			ndi_devi_exit(pdip, circular);
902			return (EINVAL);
903		}
904		ndi_devi_exit(pdip, circular);
905
906	} else {
907		/*
908		 * Attempt to bring the device ONLINE. If the request to
909		 * fails, remove the dev_info node.
910		 */
911		if (ndi_devi_online(cdip, NDI_ONLINE_ATTACH) != NDI_SUCCESS) {
912			(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
913			return (ENXIO);
914		}
915
916		/*
917		 * if the node was successfully added but there was
918		 * no driver available for the device, remove the node
919		 */
920		if (i_ddi_node_state(cdip) < DS_BOUND) {
921			(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
922			return (ENODEV);
923		}
924	}
925
926	/*
927	 * return a handle to the child device
928	 * copy out the name of the newly attached child device if
929	 * the IOCTL request has provided a copyout buffer.
930	 */
931	if (rdip != NULL)
932		*rdip = cdip;
933
934	if (dcp->cpyout_buf == NULL)
935		return (0);
936
937	ASSERT(ddi_node_name(cdip) != NULL);
938	ASSERT(ddi_get_name_addr(cdip) != NULL);
939
940	nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
941	    ddi_node_name(cdip), ddi_get_name_addr(cdip));
942
943	if (copyout(&devnm, dcp->cpyout_buf, nmlen) != 0) {
944		(void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
945		return (EFAULT);
946	}
947	return (0);
948}
949
950static int
951i_dc_devi_create(struct devctl_iocdata *dcp, dev_info_t *pdip,
952    dev_info_t **rdip)
953{
954
955	dev_info_t *cdip;
956	char *cname = NULL;
957	nvlist_t *nvlp = dcp->nvl_user;
958	nvpair_t *npp;
959	char *np;
960	int rv = 0;
961
962	ASSERT(rdip != NULL && *rdip == NULL);
963
964	if ((nvlp == NULL) ||
965	    (nvlist_lookup_string(nvlp, DC_DEVI_NODENAME, &cname) != 0))
966		return (EINVAL);
967
968	/*
969	 * construct a new dev_info node with a user-provided nodename
970	 */
971	ndi_devi_alloc_sleep(pdip, cname, (pnode_t)DEVI_SID_NODEID, &cdip);
972
973	/*
974	 * create hardware properties for each member in the property
975	 * list.
976	 */
977	for (npp = nvlist_next_nvpair(nvlp, NULL); (npp != NULL && !rv);
978	    npp = nvlist_next_nvpair(nvlp, npp)) {
979
980		np = nvpair_name(npp);
981
982		/*
983		 * skip the nodename property
984		 */
985		if (strcmp(np, DC_DEVI_NODENAME) == 0)
986			continue;
987
988		switch (nvpair_type(npp)) {
989
990		case DATA_TYPE_INT32: {
991			int32_t prop_val;
992
993			if ((rv = nvpair_value_int32(npp, &prop_val)) != 0)
994				break;
995
996			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, np,
997			    (int)prop_val);
998			break;
999		}
1000
1001		case DATA_TYPE_STRING: {
1002			char *prop_val;
1003
1004			if ((rv = nvpair_value_string(npp, &prop_val)) != 0)
1005				break;
1006
1007			(void) ndi_prop_update_string(DDI_DEV_T_NONE, cdip,
1008			    np, prop_val);
1009			break;
1010		}
1011
1012		case DATA_TYPE_BYTE_ARRAY: {
1013			uchar_t *val;
1014			uint_t nelms;
1015
1016			if ((rv = nvpair_value_byte_array(npp, &val,
1017			    &nelms)) != 0)
1018				break;
1019
1020			(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
1021			    cdip, np, (uchar_t *)val, nelms);
1022			break;
1023		}
1024
1025		case DATA_TYPE_INT32_ARRAY: {
1026			int32_t *val;
1027			uint_t nelms;
1028
1029			if ((rv = nvpair_value_int32_array(npp, &val,
1030			    &nelms)) != 0)
1031				break;
1032
1033			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1034			    cdip, np, val, nelms);
1035			break;
1036		}
1037
1038		case DATA_TYPE_STRING_ARRAY: {
1039			char **val;
1040			uint_t nelms;
1041
1042			if ((rv = nvpair_value_string_array(npp, &val,
1043			    &nelms)) != 0)
1044				break;
1045
1046			(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
1047			    cdip, np, val, nelms);
1048			break;
1049		}
1050
1051		/*
1052		 * unsupported property data type
1053		 */
1054		default:
1055			rv = EINVAL;
1056		}
1057	}
1058
1059	/*
1060	 * something above failed
1061	 * destroy the partially child device and abort the request
1062	 */
1063	if (rv != 0) {
1064		(void) ndi_devi_free(cdip);
1065		return (rv);
1066	}
1067
1068	*rdip = cdip;
1069	return (0);
1070}
1071
1072/*
1073 * return current soft bus state of bus nexus "dip"
1074 */
1075int
1076ndi_get_bus_state(dev_info_t *dip, uint_t *rstate)
1077{
1078	if (dip == NULL || rstate == NULL)
1079		return (NDI_FAILURE);
1080
1081	if (DEVI(dip)->devi_ops->devo_bus_ops == NULL)
1082		return (NDI_FAILURE);
1083
1084	mutex_enter(&(DEVI(dip)->devi_lock));
1085	if (DEVI_IS_BUS_QUIESCED(dip))
1086		*rstate = BUS_QUIESCED;
1087	else if (DEVI_IS_BUS_DOWN(dip))
1088		*rstate = BUS_SHUTDOWN;
1089	else
1090		*rstate = BUS_ACTIVE;
1091	mutex_exit(&(DEVI(dip)->devi_lock));
1092	return (NDI_SUCCESS);
1093}
1094
1095/*
1096 * Set the soft state of bus nexus "dip"
1097 */
1098int
1099ndi_set_bus_state(dev_info_t *dip, uint_t state)
1100{
1101	int rv = NDI_SUCCESS;
1102
1103	if (dip == NULL)
1104		return (NDI_FAILURE);
1105
1106	mutex_enter(&(DEVI(dip)->devi_lock));
1107
1108	switch (state) {
1109	case BUS_QUIESCED:
1110		DEVI_SET_BUS_QUIESCE(dip);
1111		break;
1112
1113	case BUS_ACTIVE:
1114		DEVI_SET_BUS_ACTIVE(dip);
1115		DEVI_SET_BUS_UP(dip);
1116		break;
1117
1118	case BUS_SHUTDOWN:
1119		DEVI_SET_BUS_DOWN(dip);
1120		break;
1121
1122	default:
1123		rv = NDI_FAILURE;
1124	}
1125
1126	mutex_exit(&(DEVI(dip)->devi_lock));
1127	return (rv);
1128}
1129
1130/*
1131 * These dummy functions are obsolete and may be removed.
1132 * Retained for existing driver compatibility only.
1133 * Drivers should be fixed not to use these functions.
1134 * Don't write new code using these obsolete interfaces.
1135 */
1136/*ARGSUSED*/
1137void
1138i_ndi_block_device_tree_changes(uint_t *lkcnt)	/* obsolete */
1139{
1140	/* obsolete dummy function */
1141}
1142
1143/*ARGSUSED*/
1144void
1145i_ndi_allow_device_tree_changes(uint_t lkcnt)	/* obsolete */
1146{
1147	/* obsolete dummy function */
1148}
1149
1150/*
1151 * Single thread entry into per-driver list
1152 */
1153/*ARGSUSED*/
1154void
1155e_ddi_enter_driver_list(struct devnames *dnp, int *listcnt)	/* obsolete */
1156{
1157	/* obsolete dummy function */
1158}
1159
1160/*
1161 * release the per-driver list
1162 */
1163/*ARGSUSED*/
1164void
1165e_ddi_exit_driver_list(struct devnames *dnp, int listcnt)	/* obsolete */
1166{
1167	/* obsolete dummy function */
1168}
1169
1170/*
1171 * Attempt to enter driver list
1172 */
1173/*ARGSUSED*/
1174int
1175e_ddi_tryenter_driver_list(struct devnames *dnp, int *listcnt)	/* obsolete */
1176{
1177	return (1);	/* obsolete dummy function */
1178}
1179
1180/*
1181 * ndi event handling support functions:
1182 * The NDI event support model is as follows:
1183 *
1184 * The nexus driver defines a set of events using some static structures (so
1185 * these structures can be shared by all instances of the nexus driver).
1186 * The nexus driver allocates an event handle and binds the event set
1187 * to this handle. The nexus driver's event busop functions can just
1188 * call the appropriate NDI event support function using this handle
1189 * as the first argument.
1190 *
1191 * The reasoning for tying events to the device tree is that the entity
1192 * generating the callback will typically be one of the device driver's
1193 * ancestors in the tree.
1194 */
1195static int ndi_event_debug = 0;
1196
1197#ifdef DEBUG
1198#define	NDI_EVENT_DEBUG	ndi_event_debug
1199#endif /* DEBUG */
1200
1201/*
1202 * allocate a new ndi event handle
1203 */
1204int
1205ndi_event_alloc_hdl(dev_info_t *dip, ddi_iblock_cookie_t cookie,
1206	ndi_event_hdl_t *handle, uint_t flag)
1207{
1208	struct ndi_event_hdl *ndi_event_hdl;
1209
1210	ndi_event_hdl = kmem_zalloc(sizeof (struct ndi_event_hdl),
1211	    ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP));
1212
1213	if (!ndi_event_hdl) {
1214		return (NDI_FAILURE);
1215	}
1216
1217	ndi_event_hdl->ndi_evthdl_dip = dip;
1218	ndi_event_hdl->ndi_evthdl_iblock_cookie = cookie;
1219	mutex_init(&ndi_event_hdl->ndi_evthdl_mutex, NULL,
1220	    MUTEX_DRIVER, (void *)cookie);
1221
1222	mutex_init(&ndi_event_hdl->ndi_evthdl_cb_mutex, NULL,
1223	    MUTEX_DRIVER, (void *)cookie);
1224
1225	*handle = (ndi_event_hdl_t)ndi_event_hdl;
1226
1227	return (NDI_SUCCESS);
1228}
1229
1230/*
1231 * free the ndi event handle
1232 */
1233int
1234ndi_event_free_hdl(ndi_event_hdl_t handle)
1235{
1236	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1237	ndi_event_cookie_t *cookie;
1238	ndi_event_cookie_t *free;
1239
1240	ASSERT(handle);
1241
1242	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1243	mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1244
1245	cookie = ndi_event_hdl->ndi_evthdl_cookie_list;
1246
1247	/* deallocate all defined cookies */
1248	while (cookie != NULL) {
1249		ASSERT(cookie->callback_list == NULL);
1250		free = cookie;
1251		cookie = cookie->next_cookie;
1252
1253		kmem_free(free, sizeof (ndi_event_cookie_t));
1254	}
1255
1256
1257	mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1258	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1259
1260	/* destroy mutexes */
1261	mutex_destroy(&ndi_event_hdl->ndi_evthdl_mutex);
1262	mutex_destroy(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1263
1264	/* free event handle */
1265	kmem_free(ndi_event_hdl, sizeof (struct ndi_event_hdl));
1266
1267	return (NDI_SUCCESS);
1268}
1269
1270
1271/*
1272 * ndi_event_bind_set() adds a set of events to the NDI event
1273 * handle.
1274 *
1275 * Events generated by high level interrupts should not
1276 * be mixed in the same event set with events generated by
1277 * normal interrupts or kernel events.
1278 *
1279 * This function can be called multiple times to bind
1280 * additional sets to the event handle.
1281 * However, events generated by high level interrupts cannot
1282 * be bound to a handle that already has bound events generated
1283 * by normal interrupts or from kernel context and vice versa.
1284 */
1285int
1286ndi_event_bind_set(ndi_event_hdl_t handle,
1287	ndi_event_set_t		*ndi_events,
1288	uint_t			flag)
1289{
1290	struct ndi_event_hdl	*ndi_event_hdl;
1291	ndi_event_cookie_t	*next, *prev, *new_cookie;
1292	uint_t			i, len;
1293	uint_t			dup = 0;
1294	uint_t			high_plevels, other_plevels;
1295	ndi_event_definition_t *ndi_event_defs;
1296
1297	int km_flag = ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
1298
1299	ASSERT(handle);
1300	ASSERT(ndi_events);
1301
1302	/*
1303	 * binding must be performed during attach/detach
1304	 */
1305	if (!DEVI_IS_ATTACHING(handle->ndi_evthdl_dip) &&
1306	    !DEVI_IS_DETACHING(handle->ndi_evthdl_dip)) {
1307		cmn_err(CE_WARN, "ndi_event_bind_set must be called within "
1308		    "attach or detach");
1309		return (NDI_FAILURE);
1310	}
1311
1312	/*
1313	 * if it is not the correct version or the event set is
1314	 * empty, bail out
1315	 */
1316	if (ndi_events->ndi_events_version != NDI_EVENTS_REV1)
1317		return (NDI_FAILURE);
1318
1319	ndi_event_hdl	= (struct ndi_event_hdl *)handle;
1320	ndi_event_defs = ndi_events->ndi_event_defs;
1321	high_plevels	= other_plevels = 0;
1322
1323	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1324
1325	/* check for mixing events at high level with the other types */
1326	for (i = 0; i < ndi_events->ndi_n_events; i++) {
1327		if (ndi_event_defs[i].ndi_event_plevel == EPL_HIGHLEVEL) {
1328			high_plevels++;
1329		} else {
1330			other_plevels++;
1331		}
1332	}
1333
1334	/*
1335	 * bail out if high level events are mixed with other types in this
1336	 * event set or the set is incompatible with the set in the handle
1337	 */
1338	if ((high_plevels && other_plevels) ||
1339	    (other_plevels && ndi_event_hdl->ndi_evthdl_high_plevels) ||
1340	    (high_plevels && ndi_event_hdl->ndi_evthdl_other_plevels)) {
1341		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1342
1343		return (NDI_FAILURE);
1344	}
1345
1346	/*
1347	 * check for duplicate events in both the existing handle
1348	 * and the event set, add events if not duplicates
1349	 */
1350	next = ndi_event_hdl->ndi_evthdl_cookie_list;
1351	for (i = 0; i < ndi_events->ndi_n_events; i++) {
1352		while (next != NULL) {
1353			len = strlen(NDI_EVENT_NAME(next)) + 1;
1354			if (strncmp(NDI_EVENT_NAME(next),
1355			    ndi_event_defs[i].ndi_event_name, len) == 0) {
1356				dup = 1;
1357				break;
1358			}
1359
1360			prev = next;
1361			next = next->next_cookie;
1362		}
1363
1364		if (dup == 0) {
1365			new_cookie = kmem_zalloc(sizeof (ndi_event_cookie_t),
1366			    km_flag);
1367
1368			if (!new_cookie)
1369				return (NDI_FAILURE);
1370
1371			if (ndi_event_hdl->ndi_evthdl_n_events == 0) {
1372				ndi_event_hdl->ndi_evthdl_cookie_list =
1373				    new_cookie;
1374			} else {
1375				prev->next_cookie = new_cookie;
1376			}
1377
1378			ndi_event_hdl->ndi_evthdl_n_events++;
1379
1380			/*
1381			 * set up new cookie
1382			 */
1383			new_cookie->definition = &ndi_event_defs[i];
1384			new_cookie->ddip = ndi_event_hdl->ndi_evthdl_dip;
1385
1386		} else {
1387			/*
1388			 * event not added, must correct plevel numbers
1389			 */
1390			if (ndi_event_defs[i].ndi_event_plevel ==
1391			    EPL_HIGHLEVEL) {
1392				high_plevels--;
1393			} else {
1394				other_plevels--;
1395			}
1396		}
1397
1398		dup = 0;
1399		next = ndi_event_hdl->ndi_evthdl_cookie_list;
1400		prev = NULL;
1401
1402	}
1403
1404	ndi_event_hdl->ndi_evthdl_high_plevels	+= high_plevels;
1405	ndi_event_hdl->ndi_evthdl_other_plevels += other_plevels;
1406
1407	ASSERT((ndi_event_hdl->ndi_evthdl_high_plevels == 0) ||
1408	    (ndi_event_hdl->ndi_evthdl_other_plevels == 0));
1409
1410#ifdef NDI_EVENT_DEBUG
1411	if (ndi_event_debug) {
1412		ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_bind_set");
1413	}
1414#endif /* NDI_EVENT_DEBUG */
1415
1416	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1417
1418	return (NDI_SUCCESS);
1419}
1420
1421/*
1422 * ndi_event_unbind_set() unbinds a set of events, previously
1423 * bound using ndi_event_bind_set(), from the NDI event
1424 * handle.
1425 *
1426 * This routine will unbind all events in the event set.  If an event,
1427 * specified in the event set, is not found in the handle, this
1428 * routine will proceed onto the next member of the set as if the event
1429 * was never specified.
1430 *
1431 * The event set may be a subset of the set of events that
1432 * was previously bound to the handle. For example, events
1433 * can be individually unbound.
1434 *
1435 * An event cannot be unbound if callbacks are still
1436 * registered against the event.
1437 */
1438/*ARGSUSED*/
1439int
1440ndi_event_unbind_set(ndi_event_hdl_t   handle, ndi_event_set_t	*ndi_events,
1441    uint_t flag)
1442{
1443	ndi_event_definition_t	*ndi_event_defs;
1444	int			len;
1445	uint_t			i;
1446	int			rval;
1447	ndi_event_cookie_t *cookie_list;
1448	ndi_event_cookie_t *prev = NULL;
1449
1450	ASSERT(ndi_events);
1451	ASSERT(handle);
1452
1453	/*
1454	 * binding must be performed during attach/detac
1455	 */
1456	if (!DEVI_IS_ATTACHING(handle->ndi_evthdl_dip) &&
1457	    !DEVI_IS_DETACHING(handle->ndi_evthdl_dip)) {
1458		cmn_err(CE_WARN, "ndi_event_bind_set must be called within "
1459		    "attach or detach");
1460		return (NDI_FAILURE);
1461	}
1462
1463	/* bail out if ndi_event_set is outdated */
1464	if (ndi_events->ndi_events_version != NDI_EVENTS_REV1) {
1465		return (NDI_FAILURE);
1466	}
1467
1468	ASSERT(ndi_events->ndi_event_defs);
1469
1470	ndi_event_defs = ndi_events->ndi_event_defs;
1471
1472	mutex_enter(&handle->ndi_evthdl_mutex);
1473	mutex_enter(&handle->ndi_evthdl_cb_mutex);
1474
1475	/*
1476	 * Verify that all events in the event set are eligible
1477	 * for unbinding(ie. there are no outstanding callbacks).
1478	 * If any one of the events are ineligible, fail entire
1479	 * operation.
1480	 */
1481
1482	for (i = 0; i < ndi_events->ndi_n_events; i++) {
1483		cookie_list = handle->ndi_evthdl_cookie_list;
1484		while (cookie_list != NULL) {
1485			len = strlen(NDI_EVENT_NAME(cookie_list)) + 1;
1486			if (strncmp(NDI_EVENT_NAME(cookie_list),
1487			    ndi_event_defs[i].ndi_event_name, len) == 0) {
1488
1489				ASSERT(cookie_list->callback_list == NULL);
1490				if (cookie_list->callback_list) {
1491					rval = NDI_FAILURE;
1492					goto done;
1493				}
1494				break;
1495			} else {
1496				cookie_list = cookie_list->next_cookie;
1497			}
1498		}
1499	}
1500
1501	/*
1502	 * remove all events found within the handle
1503	 * If an event is not found, this function will proceed as if the event
1504	 * was never specified.
1505	 */
1506
1507	for (i = 0; i < ndi_events->ndi_n_events; i++) {
1508		cookie_list = handle->ndi_evthdl_cookie_list;
1509		prev = NULL;
1510		while (cookie_list != NULL) {
1511			len = strlen(NDI_EVENT_NAME(cookie_list)) + 1;
1512			if (strncmp(NDI_EVENT_NAME(cookie_list),
1513			    ndi_event_defs[i].ndi_event_name, len) == 0) {
1514
1515				/*
1516				 * can not unbind an event definition with
1517				 * outstanding callbacks
1518				 */
1519				if (cookie_list->callback_list) {
1520					rval = NDI_FAILURE;
1521					goto done;
1522				}
1523
1524				/* remove this cookie from the list */
1525				if (prev != NULL) {
1526					prev->next_cookie =
1527					    cookie_list->next_cookie;
1528				} else {
1529					handle->ndi_evthdl_cookie_list =
1530					    cookie_list->next_cookie;
1531				}
1532
1533				/* adjust plevel counts */
1534				if (NDI_EVENT_PLEVEL(cookie_list) ==
1535				    EPL_HIGHLEVEL) {
1536					handle->ndi_evthdl_high_plevels--;
1537				} else {
1538					handle->ndi_evthdl_other_plevels--;
1539				}
1540
1541				/* adjust cookie count */
1542				handle->ndi_evthdl_n_events--;
1543
1544				/* free the cookie */
1545				kmem_free(cookie_list,
1546				    sizeof (ndi_event_cookie_t));
1547
1548				cookie_list = handle->ndi_evthdl_cookie_list;
1549				break;
1550
1551			} else {
1552				prev = cookie_list;
1553				cookie_list = cookie_list->next_cookie;
1554			}
1555
1556		}
1557
1558	}
1559
1560#ifdef NDI_EVENT_DEBUG
1561	if (ndi_event_debug) {
1562		ndi_event_dump_hdl(handle, "ndi_event_unbind_set");
1563	}
1564#endif /* NDI_EVENT_DEBUG */
1565
1566	rval = NDI_SUCCESS;
1567
1568done:
1569	mutex_exit(&handle->ndi_evthdl_cb_mutex);
1570	mutex_exit(&handle->ndi_evthdl_mutex);
1571
1572	return (rval);
1573}
1574
1575/*
1576 * ndi_event_retrieve_cookie():
1577 * Return an event cookie for eventname if this nexus driver
1578 * has defined the named event. The event cookie returned
1579 * by this function is used to register callback handlers
1580 * for the event.
1581 *
1582 * ndi_event_retrieve_cookie() is intended to be used in the
1583 * nexus driver's bus_get_eventcookie busop routine.
1584 *
1585 * If the event is not defined by this bus nexus driver, and flag
1586 * does not include NDI_EVENT_NOPASS, then ndi_event_retrieve_cookie()
1587 * will pass the request up the device tree hierarchy by calling
1588 * ndi_busop_get_eventcookie(9N).
1589 * If the event is not defined by this bus nexus driver, and flag
1590 * does include NDI_EVENT_NOPASS, ndi_event_retrieve_cookie()
1591 * will return NDI_FAILURE.  The caller may then determine what further
1592 * action to take, such as using a different handle, passing the
1593 * request up the device tree using ndi_busop_get_eventcookie(9N),
1594 * or returning the failure to the caller, thus blocking the
1595 * progress of the request up the tree.
1596 */
1597int
1598ndi_event_retrieve_cookie(ndi_event_hdl_t handle,
1599	dev_info_t		*rdip,
1600	char			*eventname,
1601	ddi_eventcookie_t	*cookiep,
1602	uint_t			flag)
1603{
1604	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1605	int		len;
1606	ndi_event_cookie_t *cookie_list;
1607
1608	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1609
1610	cookie_list = ndi_event_hdl->ndi_evthdl_cookie_list;
1611	/*
1612	 * search the cookie list for the event name and return
1613	 * cookie if found.
1614	 */
1615	while (cookie_list != NULL) {
1616
1617		len = strlen(NDI_EVENT_NAME(cookie_list)) + 1;
1618		if (strncmp(NDI_EVENT_NAME(cookie_list), eventname,
1619		    len) == 0) {
1620			*cookiep = (ddi_eventcookie_t)cookie_list;
1621
1622			mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1623			return (NDI_SUCCESS);
1624		}
1625
1626		cookie_list = cookie_list->next_cookie;
1627	}
1628
1629	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1630	/*
1631	 * event was not found, pass up or return failure
1632	 */
1633	if ((flag & NDI_EVENT_NOPASS) == 0) {
1634		return (ndi_busop_get_eventcookie(
1635		    ndi_event_hdl->ndi_evthdl_dip, rdip, eventname, cookiep));
1636	} else {
1637		return (NDI_FAILURE);
1638	}
1639}
1640
1641/*
1642 * check whether this nexus defined this event and look up attributes
1643 */
1644static int
1645ndi_event_is_defined(ndi_event_hdl_t handle,
1646	ddi_eventcookie_t cookie, int *attributes)
1647{
1648
1649	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1650	ndi_event_cookie_t *cookie_list;
1651
1652	ASSERT(mutex_owned(&handle->ndi_evthdl_mutex));
1653
1654	cookie_list = ndi_event_hdl->ndi_evthdl_cookie_list;
1655	while (cookie_list != NULL) {
1656		if (cookie_list == NDI_EVENT(cookie)) {
1657			if (attributes)
1658				*attributes =
1659				    NDI_EVENT_ATTRIBUTES(cookie_list);
1660
1661			return (NDI_SUCCESS);
1662		}
1663
1664		cookie_list = cookie_list->next_cookie;
1665	}
1666
1667	return (NDI_FAILURE);
1668}
1669
1670/*
1671 * ndi_event_add_callback(): adds an event callback registration
1672 * to the event cookie defining this event.
1673 *
1674 * Refer also to bus_add_eventcall(9n) and ndi_busop_add_eventcall(9n).
1675 *
1676 * ndi_event_add_callback(9n) is intended to be used in
1677 * the nexus driver's bus_add_eventcall(9n) busop function.
1678 *
1679 * If the event is not defined by this bus nexus driver,
1680 * ndi_event_add_callback() will return NDI_FAILURE.
1681 */
1682int
1683ndi_event_add_callback(ndi_event_hdl_t handle, dev_info_t *child_dip,
1684	ddi_eventcookie_t cookie,
1685	void		(*event_callback)(dev_info_t *,
1686			ddi_eventcookie_t, void *arg, void *impldata),
1687	void		*arg,
1688	uint_t		flag,
1689	ddi_callback_id_t *cb_id)
1690{
1691	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1692	int km_flag = ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
1693	ndi_event_callbacks_t *cb;
1694
1695	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1696
1697	/*
1698	 * if the event was not bound to this handle, return failure
1699	 */
1700	if (ndi_event_is_defined(handle, cookie, NULL) != NDI_SUCCESS) {
1701
1702		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1703		return (NDI_FAILURE);
1704
1705	}
1706
1707	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1708
1709	/*
1710	 * allocate space for a callback structure
1711	 */
1712	cb = kmem_zalloc(sizeof (ndi_event_callbacks_t), km_flag);
1713	if (cb == NULL) {
1714		return (NDI_FAILURE);
1715	}
1716
1717	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1718
1719	/* initialize callback structure */
1720	cb->ndi_evtcb_dip	= child_dip;
1721	cb->ndi_evtcb_callback	= event_callback;
1722	cb->ndi_evtcb_arg	= arg;
1723	cb->ndi_evtcb_cookie	= cookie;
1724	cb->devname		= (char *)ddi_driver_name(child_dip);
1725
1726	*cb_id = (ddi_callback_id_t)cb;
1727	mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1728
1729	/* add this callback structure to the list */
1730	if (NDI_EVENT(cookie)->callback_list) {
1731		cb->ndi_evtcb_next = NDI_EVENT(cookie)->callback_list;
1732		NDI_EVENT(cookie)->callback_list->ndi_evtcb_prev = cb;
1733		NDI_EVENT(cookie)->callback_list = cb;
1734	} else {
1735		NDI_EVENT(cookie)->callback_list = cb;
1736	}
1737#ifdef NDI_EVENT_DEBUG
1738	if (ndi_event_debug) {
1739		ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_add_callback");
1740	}
1741#endif /* NDI_EVENT_DEBUG */
1742
1743	mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1744	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1745
1746	return (NDI_SUCCESS);
1747}
1748
1749/*
1750 * ndi_event_remove_callback():
1751 *
1752 * ndi_event_remove_callback() removes a callback that was
1753 * previously registered using ndi_event_add_callback(9N).
1754 * Refer also to bus_remove_eventcall(9n) and
1755 * ndi_busop_remove_eventcall(9n).
1756 * ndi_event_remove_callback(9n) is intended to be used in
1757 * the nexus driver's bus_remove_eventcall (9n) busop function.
1758 * If the event is not defined by this bus nexus driver,
1759 * ndi_event_remove_callback() will return NDI_FAILURE.
1760 */
1761static void do_ndi_event_remove_callback(struct ndi_event_hdl *ndi_event_hdl,
1762	ddi_callback_id_t cb_id);
1763
1764int
1765ndi_event_remove_callback(ndi_event_hdl_t handle, ddi_callback_id_t cb_id)
1766{
1767	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1768
1769	ASSERT(cb_id);
1770
1771	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1772	mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1773
1774	do_ndi_event_remove_callback(ndi_event_hdl, cb_id);
1775
1776	mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1777	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1778
1779	return (NDI_SUCCESS);
1780}
1781
1782/*ARGSUSED*/
1783static void
1784do_ndi_event_remove_callback(struct ndi_event_hdl *ndi_event_hdl,
1785    ddi_callback_id_t cb_id)
1786{
1787	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)cb_id;
1788	ASSERT(cb);
1789
1790	ASSERT(mutex_owned(&ndi_event_hdl->ndi_evthdl_mutex));
1791	ASSERT(mutex_owned(&ndi_event_hdl->ndi_evthdl_cb_mutex));
1792
1793	/* remove from callback linked list */
1794	if (cb->ndi_evtcb_prev) {
1795		cb->ndi_evtcb_prev->ndi_evtcb_next = cb->ndi_evtcb_next;
1796	}
1797
1798	if (cb->ndi_evtcb_next) {
1799		cb->ndi_evtcb_next->ndi_evtcb_prev = cb->ndi_evtcb_prev;
1800	}
1801
1802	if (NDI_EVENT(cb->ndi_evtcb_cookie)->callback_list == cb) {
1803		NDI_EVENT(cb->ndi_evtcb_cookie)->callback_list =
1804		    cb->ndi_evtcb_next;
1805	}
1806
1807	kmem_free(cb, sizeof (ndi_event_callbacks_t));
1808}
1809
1810/*
1811 * ndi_event_run_callbacks() performs event callbacks for the event
1812 * specified by cookie, if this is among those bound to the
1813 * supplied handle.
1814 * If the event is among those bound to the handle, none,
1815 * some, or all of the handlers registered for the event
1816 * will be called, according to the delivery attributes of
1817 * the event.
1818 * If the event attributes include NDI_EVENT_POST_TO_ALL
1819 * (the default), all the handlers for the event will be
1820 * called in an unspecified order.
1821 * If the event attributes include NDI_EVENT_POST_TO_TGT, only
1822 * the handlers (if any) registered by the driver identified by
1823 * rdip will be called.
1824 * If the event identified by cookie is not bound to the handle,
1825 * NDI_FAILURE will be returned.
1826 */
1827int
1828ndi_event_run_callbacks(ndi_event_hdl_t handle, dev_info_t *child_dip,
1829	ddi_eventcookie_t cookie, void *bus_impldata)
1830{
1831	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1832	ndi_event_callbacks_t *next, *cb;
1833	int attributes;
1834
1835	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1836
1837	/* if this is not our event, fail */
1838	if (ndi_event_is_defined(handle, cookie, &attributes) !=
1839	    NDI_SUCCESS) {
1840
1841		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1842		return (NDI_FAILURE);
1843	}
1844
1845	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1846
1847#ifdef NDI_EVENT_DEBUG
1848	if (ndi_event_debug) {
1849		cmn_err(CE_CONT, "ndi_event_run_callbacks:\n\t"
1850		    "producer dip=%p (%s%d): cookie = %p, name = %s\n",
1851		    (void *)ndi_event_hdl->ndi_evthdl_dip,
1852		    ddi_node_name(ndi_event_hdl->ndi_evthdl_dip),
1853		    ddi_get_instance(ndi_event_hdl->ndi_evthdl_dip),
1854		    (void *)cookie,
1855		    ndi_event_cookie_to_name(handle, cookie));
1856	}
1857#endif /* #ifdef NDI_EVENT_DEBUG */
1858
1859
1860	/*
1861	 * The callback handlers may call conversion functions.  The conversion
1862	 * functions may hold the ndi_evthdl_mutex during execution.  Thus, to
1863	 * avoid a recursive mutex problem, only the ndi_evthdl_cb_mutex is
1864	 * held.  The ndi_evthdl_mutex is not held when running the callbacks.
1865	 */
1866	mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1867
1868	/* perform callbacks */
1869	next = NDI_EVENT(cookie)->callback_list;
1870	while (next != NULL) {
1871
1872		cb = next;
1873		next = next->ndi_evtcb_next;
1874
1875		ASSERT(cb->ndi_evtcb_cookie == cookie);
1876
1877		if (attributes == NDI_EVENT_POST_TO_TGT &&
1878		    child_dip != cb->ndi_evtcb_dip) {
1879			continue;
1880		}
1881
1882		cb->ndi_evtcb_callback(cb->ndi_evtcb_dip, cb->ndi_evtcb_cookie,
1883		    cb->ndi_evtcb_arg, bus_impldata);
1884
1885#ifdef NDI_EVENT_DEBUG
1886		if (ndi_event_debug) {
1887			cmn_err(CE_CONT,
1888			    "\t\tconsumer dip=%p (%s%d)\n",
1889			    (void *)cb->ndi_evtcb_dip,
1890			    ddi_node_name(cb->ndi_evtcb_dip),
1891			    ddi_get_instance(cb->ndi_evtcb_dip));
1892		}
1893#endif
1894
1895	}
1896
1897	mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1898
1899#ifdef NDI_EVENT_DEBUG
1900	if (ndi_event_debug) {
1901		mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1902		ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_run_callbacks");
1903		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1904	}
1905#endif /* NDI_EVENT_DEBUG */
1906
1907	return (NDI_SUCCESS);
1908}
1909
1910
1911/*
1912 * perform one callback for a specified cookie and just one target
1913 */
1914int
1915ndi_event_do_callback(ndi_event_hdl_t handle, dev_info_t *child_dip,
1916	ddi_eventcookie_t cookie, void *bus_impldata)
1917{
1918	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
1919	ndi_event_callbacks_t *next, *cb;
1920	int attributes;
1921
1922	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1923
1924	/* if this is not our event, fail */
1925	if (ndi_event_is_defined(handle, cookie, &attributes) !=
1926	    NDI_SUCCESS) {
1927
1928		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1929
1930		return (NDI_FAILURE);
1931	}
1932
1933	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1934
1935#ifdef NDI_EVENT_DEBUG
1936	if (ndi_event_debug) {
1937		cmn_err(CE_CONT, "ndi_event_run_callbacks:\n\t"
1938		    "producer dip=%p (%s%d): cookie = %p, name = %s\n",
1939		    (void *)ndi_event_hdl->ndi_evthdl_dip,
1940		    ddi_node_name(ndi_event_hdl->ndi_evthdl_dip),
1941		    ddi_get_instance(ndi_event_hdl->ndi_evthdl_dip),
1942		    (void *)cookie,
1943		    ndi_event_cookie_to_name(handle, cookie));
1944	}
1945#endif
1946
1947
1948	/*
1949	 * we only grab the cb mutex because the callback handlers
1950	 * may call the conversion functions which would cause a recursive
1951	 * mutex problem
1952	 */
1953	mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1954
1955	/* perform callbacks */
1956	for (next = NDI_EVENT(cookie)->callback_list; next != NULL; ) {
1957		cb = next;
1958		next = next->ndi_evtcb_next;
1959
1960		if (cb->ndi_evtcb_dip == child_dip) {
1961			cb->ndi_evtcb_callback(cb->ndi_evtcb_dip,
1962			    cb->ndi_evtcb_cookie, cb->ndi_evtcb_arg,
1963			    bus_impldata);
1964
1965#ifdef NDI_EVENT_DEBUG
1966			if (ndi_event_debug) {
1967				cmn_err(CE_CONT,
1968				    "\t\tconsumer dip=%p (%s%d)\n",
1969				    (void *)cb->ndi_evtcb_dip,
1970				    ddi_node_name(cb->ndi_evtcb_dip),
1971				    ddi_get_instance(cb->ndi_evtcb_dip));
1972			}
1973#endif
1974			break;
1975		}
1976	}
1977
1978	mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex);
1979
1980#ifdef NDI_EVENT_DEBUG
1981	if (ndi_event_debug) {
1982		mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
1983		ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_run_callbacks");
1984		mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
1985	}
1986#endif /* NDI_EVENT_DEBUG */
1987
1988	return (NDI_SUCCESS);
1989}
1990
1991
1992/*
1993 * ndi_event_tag_to_cookie: utility function to find an event cookie
1994 * given an event tag
1995 */
1996ddi_eventcookie_t
1997ndi_event_tag_to_cookie(ndi_event_hdl_t handle, int event_tag)
1998{
1999	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
2000	ndi_event_cookie_t *list;
2001
2002	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
2003
2004	list = ndi_event_hdl->ndi_evthdl_cookie_list;
2005	while (list != NULL) {
2006		if (NDI_EVENT_TAG(list) == event_tag) {
2007			mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2008			return ((ddi_eventcookie_t)list);
2009		}
2010
2011		list = list->next_cookie;
2012	}
2013
2014	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2015	return (NULL);
2016}
2017
2018/*
2019 * ndi_event_cookie_to_tag: utility function to find a event tag
2020 * given an event_cookie
2021 */
2022int
2023ndi_event_cookie_to_tag(ndi_event_hdl_t handle, ddi_eventcookie_t cookie)
2024{
2025	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
2026	ndi_event_cookie_t *list;
2027
2028	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
2029
2030	list = ndi_event_hdl->ndi_evthdl_cookie_list;
2031
2032	while (list != NULL) {
2033		if ((ddi_eventcookie_t)list == cookie) {
2034			mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2035			return (NDI_EVENT_TAG(list));
2036		}
2037
2038		list = list->next_cookie;
2039	}
2040
2041	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2042	return (NDI_FAILURE);
2043
2044}
2045
2046/*
2047 * ndi_event_cookie_to_name: utility function to find an event name
2048 * given an event_cookie
2049 */
2050char *
2051ndi_event_cookie_to_name(ndi_event_hdl_t handle, ddi_eventcookie_t cookie)
2052{
2053	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
2054	ndi_event_cookie_t *list;
2055
2056	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
2057
2058	list = ndi_event_hdl->ndi_evthdl_cookie_list;
2059
2060	while (list != NULL) {
2061		if (list == NDI_EVENT(cookie)) {
2062			mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2063			return (NDI_EVENT_NAME(list));
2064		}
2065
2066		list = list->next_cookie;
2067	}
2068
2069	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2070	return (NULL);
2071}
2072
2073/*
2074 * ndi_event_tag_to_name: utility function to find an event name
2075 * given an event tag
2076 */
2077char *
2078ndi_event_tag_to_name(ndi_event_hdl_t handle, int event_tag)
2079{
2080	struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle;
2081	ndi_event_cookie_t *list;
2082
2083	mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex);
2084
2085	list = ndi_event_hdl->ndi_evthdl_cookie_list;
2086
2087	while (list) {
2088		if (NDI_EVENT_TAG(list) == event_tag) {
2089			mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2090			return (NDI_EVENT_NAME(list));
2091		}
2092
2093		list = list->next_cookie;
2094	}
2095
2096	mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex);
2097
2098	return (NULL);
2099}
2100
2101#ifdef NDI_EVENT_DEBUG
2102void
2103ndi_event_dump_hdl(struct ndi_event_hdl *hdl, char *location)
2104{
2105
2106
2107	ndi_event_callbacks_t *next;
2108	ndi_event_cookie_t *list;
2109
2110	ASSERT(mutex_owned(&hdl->ndi_evthdl_mutex));
2111	list = hdl->ndi_evthdl_cookie_list;
2112
2113	cmn_err(CE_CONT, "%s: event handle (%p): dip = %p (%s%d)\n",
2114	    location, (void *)hdl, (void *)hdl->ndi_evthdl_dip,
2115	    ddi_node_name(hdl->ndi_evthdl_dip),
2116	    ddi_get_instance(hdl->ndi_evthdl_dip));
2117	cmn_err(CE_CONT, "\thigh=%d other=%d n=%d\n",
2118	    hdl->ndi_evthdl_high_plevels, hdl->ndi_evthdl_other_plevels,
2119	    hdl->ndi_evthdl_n_events);
2120
2121	cmn_err(CE_CONT, "\tevent cookies:\n");
2122	while (list) {
2123		cmn_err(CE_CONT, "\t\ttag=%d name=%s p=%d a=%x dd=%p\n",
2124		    NDI_EVENT_TAG(list), NDI_EVENT_NAME(list),
2125		    NDI_EVENT_PLEVEL(list), NDI_EVENT_ATTRIBUTES(list),
2126		    (void *)NDI_EVENT_DDIP(list));
2127		cmn_err(CE_CONT, "\t\tcallbacks:\n");
2128		for (next = list->callback_list; next != NULL;
2129		    next = next->ndi_evtcb_next) {
2130			cmn_err(CE_CONT,
2131			    "\t\t  dip=%p (%s%d) cookie=%p arg=%p\n",
2132			    (void*)next->ndi_evtcb_dip,
2133			    ddi_driver_name(next->ndi_evtcb_dip),
2134			    ddi_get_instance(next->ndi_evtcb_dip),
2135			    (void *)next->ndi_evtcb_cookie,
2136			    next->ndi_evtcb_arg);
2137		}
2138
2139		list = list->next_cookie;
2140	}
2141
2142	cmn_err(CE_CONT, "\n");
2143}
2144#endif
2145
2146int
2147ndi_dev_is_prom_node(dev_info_t *dip)
2148{
2149	return (DEVI(dip)->devi_node_class == DDI_NC_PROM);
2150}
2151
2152int
2153ndi_dev_is_pseudo_node(dev_info_t *dip)
2154{
2155	/*
2156	 * NOTE: this does NOT mean the pseudo branch of the device tree,
2157	 * it means the node was created by software (DEVI_SID_NODEID ||
2158	 * DEVI_PSEUDO_NODEID || DEVI_SID_HIDDEN_NODEID) instead of being
2159	 * generated from a PROM node.
2160	 */
2161	return (DEVI(dip)->devi_node_class == DDI_NC_PSEUDO);
2162}
2163
2164int
2165ndi_dev_is_persistent_node(dev_info_t *dip)
2166{
2167	return ((DEVI(dip)->devi_node_attributes & DDI_PERSISTENT) != 0);
2168}
2169
2170int
2171ndi_dev_is_hidden_node(dev_info_t *dip)
2172{
2173	return ((DEVI(dip)->devi_node_attributes & DDI_HIDDEN_NODE) != 0);
2174}
2175
2176int
2177ndi_dev_is_hotplug_node(dev_info_t *dip)
2178{
2179	return ((DEVI(dip)->devi_node_attributes & DDI_HOTPLUG_NODE) != 0);
2180}
2181
2182void
2183ndi_devi_set_hidden(dev_info_t *dip)
2184{
2185	DEVI(dip)->devi_node_attributes |= DDI_HIDDEN_NODE;
2186}
2187
2188void
2189ndi_devi_clr_hidden(dev_info_t *dip)
2190{
2191	DEVI(dip)->devi_node_attributes &= ~DDI_HIDDEN_NODE;
2192}
2193
2194int
2195i_ndi_dev_is_auto_assigned_node(dev_info_t *dip)
2196{
2197	return ((DEVI(dip)->devi_node_attributes &
2198	    DDI_AUTO_ASSIGNED_NODEID) != 0);
2199}
2200
2201void
2202i_ndi_set_node_class(dev_info_t *dip, ddi_node_class_t c)
2203{
2204	DEVI(dip)->devi_node_class = c;
2205}
2206
2207ddi_node_class_t
2208i_ndi_get_node_class(dev_info_t *dip)
2209{
2210	return (DEVI(dip)->devi_node_class);
2211}
2212
2213void
2214i_ndi_set_node_attributes(dev_info_t *dip, int p)
2215{
2216	DEVI(dip)->devi_node_attributes = p;
2217}
2218
2219int
2220i_ndi_get_node_attributes(dev_info_t *dip)
2221{
2222	return (DEVI(dip)->devi_node_attributes);
2223}
2224
2225void
2226i_ndi_set_nodeid(dev_info_t *dip, int n)
2227{
2228	DEVI(dip)->devi_nodeid = n;
2229}
2230
2231void
2232ndi_set_acc_fault(ddi_acc_handle_t ah)
2233{
2234	i_ddi_acc_set_fault(ah);
2235}
2236
2237void
2238ndi_clr_acc_fault(ddi_acc_handle_t ah)
2239{
2240	i_ddi_acc_clr_fault(ah);
2241}
2242
2243void
2244ndi_set_dma_fault(ddi_dma_handle_t dh)
2245{
2246	i_ddi_dma_set_fault(dh);
2247}
2248
2249void
2250ndi_clr_dma_fault(ddi_dma_handle_t dh)
2251{
2252	i_ddi_dma_clr_fault(dh);
2253}
2254
2255/*
2256 *  The default fault-handler, called when the event posted by
2257 *  ddi_dev_report_fault() reaches rootnex.
2258 */
2259static void
2260i_ddi_fault_handler(dev_info_t *dip, struct ddi_fault_event_data *fedp)
2261{
2262	ASSERT(fedp);
2263
2264	mutex_enter(&(DEVI(dip)->devi_lock));
2265	if (!DEVI_IS_DEVICE_OFFLINE(dip)) {
2266		switch (fedp->f_impact) {
2267		case DDI_SERVICE_LOST:
2268			DEVI_SET_DEVICE_DOWN(dip);
2269			break;
2270
2271		case DDI_SERVICE_DEGRADED:
2272			DEVI_SET_DEVICE_DEGRADED(dip);
2273			break;
2274
2275		case DDI_SERVICE_UNAFFECTED:
2276		default:
2277			break;
2278
2279		case DDI_SERVICE_RESTORED:
2280			DEVI_SET_DEVICE_UP(dip);
2281			break;
2282		}
2283	}
2284	mutex_exit(&(DEVI(dip)->devi_lock));
2285}
2286
2287/*
2288 * The default fault-logger, called when the event posted by
2289 * ddi_dev_report_fault() reaches rootnex.
2290 */
2291/*ARGSUSED*/
2292static void
2293i_ddi_fault_logger(dev_info_t *rdip, struct ddi_fault_event_data *fedp)
2294{
2295	ddi_devstate_t newstate;
2296	const char *action;
2297	const char *servstate;
2298	const char *location;
2299	int bad;
2300	int changed;
2301	int level;
2302	int still;
2303
2304	ASSERT(fedp);
2305
2306	bad = 0;
2307	switch (fedp->f_location) {
2308	case DDI_DATAPATH_FAULT:
2309		location = "in datapath to";
2310		break;
2311	case DDI_DEVICE_FAULT:
2312		location = "in";
2313		break;
2314	case DDI_EXTERNAL_FAULT:
2315		location = "external to";
2316		break;
2317	default:
2318		location = "somewhere near";
2319		bad = 1;
2320		break;
2321	}
2322
2323	newstate = ddi_get_devstate(fedp->f_dip);
2324	switch (newstate) {
2325	case DDI_DEVSTATE_OFFLINE:
2326		servstate = "unavailable";
2327		break;
2328	case DDI_DEVSTATE_DOWN:
2329		servstate = "unavailable";
2330		break;
2331	case DDI_DEVSTATE_QUIESCED:
2332		servstate = "suspended";
2333		break;
2334	case DDI_DEVSTATE_DEGRADED:
2335		servstate = "degraded";
2336		break;
2337	default:
2338		servstate = "available";
2339		break;
2340	}
2341
2342	changed = (newstate != fedp->f_oldstate);
2343	level = (newstate < fedp->f_oldstate) ? CE_WARN : CE_NOTE;
2344	switch (fedp->f_impact) {
2345	case DDI_SERVICE_LOST:
2346	case DDI_SERVICE_DEGRADED:
2347	case DDI_SERVICE_UNAFFECTED:
2348		/* fault detected; service [still] <servstate> */
2349		action = "fault detected";
2350		still = !changed;
2351		break;
2352
2353	case DDI_SERVICE_RESTORED:
2354		if (newstate != DDI_DEVSTATE_UP) {
2355			/* fault cleared; service still <servstate> */
2356			action = "fault cleared";
2357			still = 1;
2358		} else if (changed) {
2359			/* fault cleared; service <servstate> */
2360			action = "fault cleared";
2361			still = 0;
2362		} else {
2363			/* no fault; service <servstate> */
2364			action = "no fault";
2365			still = 0;
2366		}
2367		break;
2368
2369	default:
2370		bad = 1;
2371		break;
2372	}
2373
2374	cmn_err(level, "!%s%d: %s %s device; service %s%s"+(bad|changed),
2375	    ddi_driver_name(fedp->f_dip), ddi_get_instance(fedp->f_dip),
2376	    bad ? "invalid report of fault" : action,
2377	    location, still ? "still " : "", servstate);
2378
2379	cmn_err(level, "!%s%d: %s"+(bad|changed),
2380	    ddi_driver_name(fedp->f_dip), ddi_get_instance(fedp->f_dip),
2381	    fedp->f_message);
2382}
2383
2384/*
2385 * Platform-settable pointers to fault handler and logger functions.
2386 * These are called by the default rootnex event-posting code when
2387 * a fault event reaches rootnex.
2388 */
2389void (*plat_fault_handler)(dev_info_t *, struct ddi_fault_event_data *) =
2390	i_ddi_fault_handler;
2391void (*plat_fault_logger)(dev_info_t *, struct ddi_fault_event_data *) =
2392	i_ddi_fault_logger;
2393
2394/*
2395 * Rootnex event definitions ...
2396 */
2397enum rootnex_event_tags {
2398	ROOTNEX_FAULT_EVENT
2399};
2400static ndi_event_hdl_t rootnex_event_hdl;
2401static ndi_event_definition_t rootnex_event_set[] = {
2402	{
2403		ROOTNEX_FAULT_EVENT,
2404		DDI_DEVI_FAULT_EVENT,
2405		EPL_INTERRUPT,
2406		NDI_EVENT_POST_TO_ALL
2407	}
2408};
2409static ndi_event_set_t rootnex_events = {
2410	NDI_EVENTS_REV1,
2411	sizeof (rootnex_event_set) / sizeof (rootnex_event_set[0]),
2412	rootnex_event_set
2413};
2414
2415/*
2416 * Initialize rootnex event handle
2417 */
2418void
2419i_ddi_rootnex_init_events(dev_info_t *dip)
2420{
2421	if (ndi_event_alloc_hdl(dip, (ddi_iblock_cookie_t)(LOCK_LEVEL-1),
2422	    &rootnex_event_hdl, NDI_SLEEP) == NDI_SUCCESS) {
2423		if (ndi_event_bind_set(rootnex_event_hdl,
2424		    &rootnex_events, NDI_SLEEP) != NDI_SUCCESS) {
2425			(void) ndi_event_free_hdl(rootnex_event_hdl);
2426			rootnex_event_hdl = NULL;
2427		}
2428	}
2429}
2430
2431/*
2432 *      Event-handling functions for rootnex
2433 *      These provide the standard implementation of fault handling
2434 */
2435/*ARGSUSED*/
2436int
2437i_ddi_rootnex_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
2438	char *eventname, ddi_eventcookie_t *cookiep)
2439{
2440	if (rootnex_event_hdl == NULL)
2441		return (NDI_FAILURE);
2442	return (ndi_event_retrieve_cookie(rootnex_event_hdl, rdip, eventname,
2443	    cookiep, NDI_EVENT_NOPASS));
2444}
2445
2446/*ARGSUSED*/
2447int
2448i_ddi_rootnex_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
2449	ddi_eventcookie_t eventid, void (*handler)(dev_info_t *dip,
2450	ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
2451	ddi_callback_id_t *cb_id)
2452{
2453	if (rootnex_event_hdl == NULL)
2454		return (NDI_FAILURE);
2455	return (ndi_event_add_callback(rootnex_event_hdl, rdip,
2456	    eventid, handler, arg, NDI_SLEEP, cb_id));
2457}
2458
2459/*ARGSUSED*/
2460int
2461i_ddi_rootnex_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
2462{
2463	if (rootnex_event_hdl == NULL)
2464		return (NDI_FAILURE);
2465
2466	return (ndi_event_remove_callback(rootnex_event_hdl, cb_id));
2467}
2468
2469/*ARGSUSED*/
2470int
2471i_ddi_rootnex_post_event(dev_info_t *dip, dev_info_t *rdip,
2472	ddi_eventcookie_t eventid, void *impl_data)
2473{
2474	int tag;
2475
2476	if (rootnex_event_hdl == NULL)
2477		return (NDI_FAILURE);
2478
2479	tag = ndi_event_cookie_to_tag(rootnex_event_hdl, eventid);
2480	if (tag == ROOTNEX_FAULT_EVENT) {
2481		(*plat_fault_handler)(rdip, impl_data);
2482		(*plat_fault_logger)(rdip, impl_data);
2483	}
2484	return (ndi_event_run_callbacks(rootnex_event_hdl, rdip,
2485	    eventid, impl_data));
2486}
2487
2488/*
2489 * ndi_set_bus_private/ndi_get_bus_private:
2490 * Get/set device bus private data in devinfo.
2491 */
2492void
2493ndi_set_bus_private(dev_info_t *dip, boolean_t up, uint32_t port_type,
2494    void *data)
2495{
2496	if (up) {
2497		DEVI(dip)->devi_bus.port_up.info.port.type = port_type;
2498		DEVI(dip)->devi_bus.port_up.priv_p = data;
2499	} else {
2500		DEVI(dip)->devi_bus.port_down.info.port.type = port_type;
2501		DEVI(dip)->devi_bus.port_down.priv_p = data;
2502	}
2503}
2504
2505void *
2506ndi_get_bus_private(dev_info_t *dip, boolean_t up)
2507{
2508	if (up)
2509		return (DEVI(dip)->devi_bus.port_up.priv_p);
2510	else
2511		return (DEVI(dip)->devi_bus.port_down.priv_p);
2512}
2513
2514boolean_t
2515ndi_port_type(dev_info_t *dip, boolean_t up, uint32_t port_type)
2516{
2517	if (up) {
2518		return ((DEVI(dip)->devi_bus.port_up.info.port.type) ==
2519		    port_type);
2520	} else {
2521		return ((DEVI(dip)->devi_bus.port_down.info.port.type) ==
2522		    port_type);
2523	}
2524}
2525
2526/* Interfaces for 'self' to set/get a child's flavor */
2527void
2528ndi_flavor_set(dev_info_t *child, ndi_flavor_t child_flavor)
2529{
2530	DEVI(child)->devi_flavor = child_flavor;
2531}
2532
2533ndi_flavor_t
2534ndi_flavor_get(dev_info_t *child)
2535{
2536	return (DEVI(child)->devi_flavor);
2537}
2538
2539/*
2540 * Interfaces to maintain flavor-specific private data of flavored
2541 * children of self.
2542 *
2543 * The flavor count always includes the default (0) vanilla flavor,
2544 * but storage for the vanilla flavor data pointer is in the same
2545 * place that ddi_[sg]et_driver_private uses, so the flavorv
2546 * storage is just for flavors 1..{nflavors-1}.
2547 */
2548void
2549ndi_flavorv_alloc(dev_info_t *self, int nflavors)
2550{
2551	ASSERT(nflavors > 0 && (DEVI(self)->devi_flavorv == NULL ||
2552	    nflavors == DEVI(self)->devi_flavorv_n));
2553	if (nflavors <= 1 || (DEVI(self)->devi_flavorv)) {
2554		return;
2555	}
2556	DEVI(self)->devi_flavorv =
2557	    kmem_zalloc((nflavors - 1) * sizeof (void *), KM_SLEEP);
2558	DEVI(self)->devi_flavorv_n = nflavors;
2559}
2560
2561void
2562ndi_flavorv_set(dev_info_t *self, ndi_flavor_t child_flavor, void *v)
2563{
2564	if (child_flavor == NDI_FLAVOR_VANILLA) {
2565		ddi_set_driver_private(self, v);
2566	} else {
2567		ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
2568		    DEVI(self)->devi_flavorv != NULL);
2569		if (child_flavor > DEVI(self)->devi_flavorv_n ||
2570		    DEVI(self)->devi_flavorv == NULL) {
2571			return;
2572		}
2573		DEVI(self)->devi_flavorv[child_flavor - 1] = v;
2574	}
2575}
2576
2577void	*
2578ndi_flavorv_get(dev_info_t *self, ndi_flavor_t child_flavor)
2579{
2580	if (child_flavor == NDI_FLAVOR_VANILLA) {
2581		return (ddi_get_driver_private(self));
2582	} else {
2583		ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
2584		    DEVI(self)->devi_flavorv != NULL);
2585		if (child_flavor > DEVI(self)->devi_flavorv_n ||
2586		    DEVI(self)->devi_flavorv == NULL) {
2587			return (NULL);
2588		}
2589		return (DEVI(self)->devi_flavorv[child_flavor - 1]);
2590	}
2591}
2592