xref: /illumos-gate/usr/src/uts/sun4v/io/cnex.c (revision 7636cb21)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Logical domain channel devices are devices implemented entirely
30  * in software; cnex is the nexus for channel-devices. They use
31  * the HV channel interfaces via the LDC transport module to send
32  * and receive data and to register callbacks.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/cmn_err.h>
37 #include <sys/conf.h>
38 #include <sys/ddi.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/devops.h>
41 #include <sys/instance.h>
42 #include <sys/modctl.h>
43 #include <sys/open.h>
44 #include <sys/stat.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/systm.h>
48 #include <sys/mkdev.h>
49 #include <sys/machsystm.h>
50 #include <sys/intr.h>
51 #include <sys/ddi_intr_impl.h>
52 #include <sys/ivintr.h>
53 #include <sys/hypervisor_api.h>
54 #include <sys/ldc.h>
55 #include <sys/cnex.h>
56 #include <sys/mach_descrip.h>
57 
58 /*
59  * Internal functions/information
60  */
61 static struct cnex_pil_map cnex_class_to_pil[] = {
62 	{LDC_DEV_GENERIC,	PIL_3},
63 	{LDC_DEV_BLK,		PIL_4},
64 	{LDC_DEV_BLK_SVC,	PIL_3},
65 	{LDC_DEV_NT,		PIL_6},
66 	{LDC_DEV_NT_SVC,	PIL_4},
67 	{LDC_DEV_SERIAL,	PIL_6}
68 };
69 #define	CNEX_MAX_DEVS (sizeof (cnex_class_to_pil) / \
70 				sizeof (cnex_class_to_pil[0]))
71 
72 #define	SUN4V_REG_SPEC2CFG_HDL(x)	((x >> 32) & ~(0xfull << 28))
73 
74 static hrtime_t cnex_pending_tmout = 2ull * NANOSEC; /* 2 secs in nsecs */
75 static void *cnex_state;
76 
77 static void cnex_intr_redist(void *arg);
78 static uint_t cnex_intr_wrapper(caddr_t arg);
79 
80 /*
81  * Debug info
82  */
83 #ifdef DEBUG
84 
85 /*
86  * Print debug messages
87  *
88  * set cnexdbg to 0xf for enabling all msgs
89  * 0x8 - Errors
90  * 0x4 - Warnings
91  * 0x2 - All debug messages
92  * 0x1 - Minimal debug messages
93  */
94 
95 int cnexdbg = 0x8;
96 
97 static void
98 cnexdebug(const char *fmt, ...)
99 {
100 	char buf[512];
101 	va_list ap;
102 
103 	va_start(ap, fmt);
104 	(void) vsprintf(buf, fmt, ap);
105 	va_end(ap);
106 
107 	cmn_err(CE_CONT, "%s\n", buf);
108 }
109 
110 #define	D1		\
111 if (cnexdbg & 0x01)	\
112 	cnexdebug
113 
114 #define	D2		\
115 if (cnexdbg & 0x02)	\
116 	cnexdebug
117 
118 #define	DWARN		\
119 if (cnexdbg & 0x04)	\
120 	cnexdebug
121 
122 #define	DERR		\
123 if (cnexdbg & 0x08)	\
124 	cnexdebug
125 
126 #else
127 
128 #define	D1
129 #define	D2
130 #define	DWARN
131 #define	DERR
132 
133 #endif
134 
135 /*
136  * Config information
137  */
138 static int cnex_attach(dev_info_t *, ddi_attach_cmd_t);
139 static int cnex_detach(dev_info_t *, ddi_detach_cmd_t);
140 static int cnex_open(dev_t *, int, int, cred_t *);
141 static int cnex_close(dev_t, int, int, cred_t *);
142 static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
143 static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
144     void *);
145 
146 static struct bus_ops cnex_bus_ops = {
147 	BUSO_REV,
148 	nullbusmap,		/* bus_map */
149 	NULL,			/* bus_get_intrspec */
150 	NULL,			/* bus_add_intrspec */
151 	NULL,			/* bus_remove_intrspec */
152 	i_ddi_map_fault,	/* bus_map_fault */
153 	ddi_no_dma_map,		/* bus_dma_map */
154 	ddi_no_dma_allochdl,	/* bus_dma_allochdl */
155 	NULL,			/* bus_dma_freehdl */
156 	NULL,			/* bus_dma_bindhdl */
157 	NULL,			/* bus_dma_unbindhdl */
158 	NULL,			/* bus_dma_flush */
159 	NULL,			/* bus_dma_win */
160 	NULL,			/* bus_dma_ctl */
161 	cnex_ctl,		/* bus_ctl */
162 	ddi_bus_prop_op,	/* bus_prop_op */
163 	0,			/* bus_get_eventcookie */
164 	0,			/* bus_add_eventcall */
165 	0,			/* bus_remove_eventcall	*/
166 	0,			/* bus_post_event */
167 	NULL,			/* bus_intr_ctl */
168 	NULL,			/* bus_config */
169 	NULL,			/* bus_unconfig */
170 	NULL,			/* bus_fm_init */
171 	NULL,			/* bus_fm_fini */
172 	NULL,			/* bus_fm_access_enter */
173 	NULL,			/* bus_fm_access_exit */
174 	NULL,			/* bus_power */
175 	NULL			/* bus_intr_op */
176 };
177 
178 static struct cb_ops cnex_cb_ops = {
179 	cnex_open,			/* open */
180 	cnex_close,			/* close */
181 	nodev,				/* strategy */
182 	nodev,				/* print */
183 	nodev,				/* dump */
184 	nodev,				/* read */
185 	nodev,				/* write */
186 	cnex_ioctl,			/* ioctl */
187 	nodev,				/* devmap */
188 	nodev,				/* mmap */
189 	nodev,				/* segmap */
190 	nochpoll,			/* poll */
191 	ddi_prop_op,			/* cb_prop_op */
192 	0,				/* streamtab  */
193 	D_MP | D_NEW | D_HOTPLUG	/* Driver compatibility flag */
194 };
195 
196 static struct dev_ops cnex_ops = {
197 	DEVO_REV,		/* devo_rev, */
198 	0,			/* refcnt  */
199 	ddi_getinfo_1to1,	/* info */
200 	nulldev,		/* identify */
201 	nulldev,		/* probe */
202 	cnex_attach,		/* attach */
203 	cnex_detach,		/* detach */
204 	nodev,			/* reset */
205 	&cnex_cb_ops,		/* driver operations */
206 	&cnex_bus_ops,		/* bus operations */
207 	nulldev			/* power */
208 };
209 
210 /*
211  * Module linkage information for the kernel.
212  */
213 static struct modldrv modldrv = {
214 	&mod_driverops,
215 	"sun4v channel-devices nexus driver v%I%",
216 	&cnex_ops,
217 };
218 
219 static struct modlinkage modlinkage = {
220 	MODREV_1, (void *)&modldrv, NULL
221 };
222 
223 int
224 _init(void)
225 {
226 	int err;
227 
228 	if ((err = ddi_soft_state_init(&cnex_state,
229 		sizeof (cnex_soft_state_t), 0)) != 0) {
230 		return (err);
231 	}
232 	if ((err = mod_install(&modlinkage)) != 0) {
233 		ddi_soft_state_fini(&cnex_state);
234 		return (err);
235 	}
236 	return (0);
237 }
238 
239 int
240 _fini(void)
241 {
242 	int err;
243 
244 	if ((err = mod_remove(&modlinkage)) != 0)
245 		return (err);
246 	ddi_soft_state_fini(&cnex_state);
247 	return (0);
248 }
249 
250 int
251 _info(struct modinfo *modinfop)
252 {
253 	return (mod_info(&modlinkage, modinfop));
254 }
255 
256 /*
257  * Callback function invoked by the interrupt redistribution
258  * framework. This will redirect interrupts at CPUs that are
259  * currently available in the system.
260  */
261 static void
262 cnex_intr_redist(void *arg)
263 {
264 	cnex_ldc_t		*cldcp;
265 	cnex_soft_state_t	*cnex_ssp = arg;
266 	int			intr_state;
267 	hrtime_t 		start;
268 	uint64_t		cpuid;
269 	int 			rv;
270 
271 	ASSERT(cnex_ssp != NULL);
272 	mutex_enter(&cnex_ssp->clist_lock);
273 
274 	cldcp = cnex_ssp->clist;
275 	while (cldcp != NULL) {
276 
277 		mutex_enter(&cldcp->lock);
278 
279 		if (cldcp->tx.hdlr) {
280 			/*
281 			 * Don't do anything for disabled interrupts.
282 			 */
283 			rv = hvldc_intr_getvalid(cnex_ssp->cfghdl,
284 			    cldcp->tx.ino, &intr_state);
285 			if (rv) {
286 				DWARN("cnex_intr_redist: tx ino=0x%llx, "
287 				    "can't get valid\n", cldcp->tx.ino);
288 				mutex_exit(&cldcp->lock);
289 				mutex_exit(&cnex_ssp->clist_lock);
290 				return;
291 			}
292 			if (intr_state == HV_INTR_NOTVALID) {
293 				mutex_exit(&cldcp->lock);
294 				cldcp = cldcp->next;
295 				continue;
296 			}
297 
298 			cpuid = intr_dist_cpuid();
299 
300 			/* disable interrupts */
301 			rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
302 			    cldcp->tx.ino, HV_INTR_NOTVALID);
303 			if (rv) {
304 				DWARN("cnex_intr_redist: tx ino=0x%llx, "
305 				    "can't set valid\n", cldcp->tx.ino);
306 				mutex_exit(&cldcp->lock);
307 				mutex_exit(&cnex_ssp->clist_lock);
308 				return;
309 			}
310 
311 			/*
312 			 * Make a best effort to wait for pending interrupts
313 			 * to finish. There is not much we can do if we timeout.
314 			 */
315 			start = gethrtime();
316 
317 			do {
318 				rv = hvldc_intr_getstate(cnex_ssp->cfghdl,
319 				    cldcp->tx.ino, &intr_state);
320 				if (rv) {
321 					DWARN("cnex_intr_redist: tx ino=0x%llx,"
322 					    "can't get state\n", cldcp->tx.ino);
323 					mutex_exit(&cldcp->lock);
324 					mutex_exit(&cnex_ssp->clist_lock);
325 					return;
326 				}
327 
328 				if ((gethrtime() - start) > cnex_pending_tmout)
329 					break;
330 
331 			} while (!panicstr &&
332 			    intr_state == HV_INTR_DELIVERED_STATE);
333 
334 			(void) hvldc_intr_settarget(cnex_ssp->cfghdl,
335 			    cldcp->tx.ino, cpuid);
336 			(void) hvldc_intr_setvalid(cnex_ssp->cfghdl,
337 			    cldcp->tx.ino, HV_INTR_VALID);
338 		}
339 
340 		if (cldcp->rx.hdlr) {
341 			/*
342 			 * Don't do anything for disabled interrupts.
343 			 */
344 			rv = hvldc_intr_getvalid(cnex_ssp->cfghdl,
345 			    cldcp->rx.ino, &intr_state);
346 			if (rv) {
347 				DWARN("cnex_intr_redist: rx ino=0x%llx, "
348 				    "can't get valid\n", cldcp->rx.ino);
349 				mutex_exit(&cldcp->lock);
350 				mutex_exit(&cnex_ssp->clist_lock);
351 				return;
352 			}
353 			if (intr_state == HV_INTR_NOTVALID) {
354 				mutex_exit(&cldcp->lock);
355 				cldcp = cldcp->next;
356 				continue;
357 			}
358 
359 			cpuid = intr_dist_cpuid();
360 
361 			/* disable interrupts */
362 			rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
363 			    cldcp->rx.ino, HV_INTR_NOTVALID);
364 			if (rv) {
365 				DWARN("cnex_intr_redist: rx ino=0x%llx, "
366 				    "can't set valid\n", cldcp->rx.ino);
367 				mutex_exit(&cldcp->lock);
368 				mutex_exit(&cnex_ssp->clist_lock);
369 				return;
370 			}
371 
372 			/*
373 			 * Make a best effort to wait for pending interrupts
374 			 * to finish. There is not much we can do if we timeout.
375 			 */
376 			start = gethrtime();
377 
378 			do {
379 				rv = hvldc_intr_getstate(cnex_ssp->cfghdl,
380 				    cldcp->rx.ino, &intr_state);
381 				if (rv) {
382 					DWARN("cnex_intr_redist: rx ino=0x%llx,"
383 					    "can't set state\n", cldcp->rx.ino);
384 					mutex_exit(&cldcp->lock);
385 					mutex_exit(&cnex_ssp->clist_lock);
386 					return;
387 				}
388 
389 				if ((gethrtime() - start) > cnex_pending_tmout)
390 					break;
391 
392 			} while (!panicstr &&
393 			    intr_state == HV_INTR_DELIVERED_STATE);
394 
395 			(void) hvldc_intr_settarget(cnex_ssp->cfghdl,
396 			    cldcp->rx.ino, cpuid);
397 			(void) hvldc_intr_setvalid(cnex_ssp->cfghdl,
398 			    cldcp->rx.ino, HV_INTR_VALID);
399 		}
400 
401 		mutex_exit(&cldcp->lock);
402 
403 		/* next channel */
404 		cldcp = cldcp->next;
405 	}
406 
407 	mutex_exit(&cnex_ssp->clist_lock);
408 }
409 
410 /*
411  * Exported interface to register a LDC endpoint with
412  * the channel nexus
413  */
414 static int
415 cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass)
416 {
417 	int		idx;
418 	cnex_ldc_t	*cldcp;
419 	int		listsz, num_nodes, num_channels;
420 	md_t		*mdp = NULL;
421 	mde_cookie_t	rootnode, *listp = NULL;
422 	uint64_t	tmp_id, rxino, txino;
423 	cnex_soft_state_t *cnex_ssp;
424 	int		status, instance;
425 
426 	/* Get device instance and structure */
427 	instance = ddi_get_instance(dip);
428 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
429 
430 	/* Check to see if channel is already registered */
431 	mutex_enter(&cnex_ssp->clist_lock);
432 	cldcp = cnex_ssp->clist;
433 	while (cldcp) {
434 		if (cldcp->id == id) {
435 			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
436 			mutex_exit(&cnex_ssp->clist_lock);
437 			return (EINVAL);
438 		}
439 		cldcp = cldcp->next;
440 	}
441 
442 	/* Get the Tx/Rx inos from the MD */
443 	if ((mdp = md_get_handle()) == NULL) {
444 		DWARN("cnex_reg_chan: cannot init MD\n");
445 		mutex_exit(&cnex_ssp->clist_lock);
446 		return (ENXIO);
447 	}
448 	num_nodes = md_node_count(mdp);
449 	ASSERT(num_nodes > 0);
450 
451 	listsz = num_nodes * sizeof (mde_cookie_t);
452 	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
453 
454 	rootnode = md_root_node(mdp);
455 
456 	/* search for all channel_endpoint nodes */
457 	num_channels = md_scan_dag(mdp, rootnode,
458 	    md_find_name(mdp, "channel-endpoint"),
459 	    md_find_name(mdp, "fwd"), listp);
460 	if (num_channels <= 0) {
461 		DWARN("cnex_reg_chan: invalid channel id\n");
462 		kmem_free(listp, listsz);
463 		(void) md_fini_handle(mdp);
464 		mutex_exit(&cnex_ssp->clist_lock);
465 		return (EINVAL);
466 	}
467 
468 	for (idx = 0; idx < num_channels; idx++) {
469 
470 		/* Get the channel ID */
471 		status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id);
472 		if (status) {
473 			DWARN("cnex_reg_chan: cannot read LDC ID\n");
474 			kmem_free(listp, listsz);
475 			(void) md_fini_handle(mdp);
476 			mutex_exit(&cnex_ssp->clist_lock);
477 			return (ENXIO);
478 		}
479 		if (tmp_id != id)
480 			continue;
481 
482 		/* Get the Tx and Rx ino */
483 		status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino);
484 		if (status) {
485 			DWARN("cnex_reg_chan: cannot read Tx ino\n");
486 			kmem_free(listp, listsz);
487 			(void) md_fini_handle(mdp);
488 			mutex_exit(&cnex_ssp->clist_lock);
489 			return (ENXIO);
490 		}
491 		status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino);
492 		if (status) {
493 			DWARN("cnex_reg_chan: cannot read Rx ino\n");
494 			kmem_free(listp, listsz);
495 			(void) md_fini_handle(mdp);
496 			mutex_exit(&cnex_ssp->clist_lock);
497 			return (ENXIO);
498 		}
499 	}
500 	kmem_free(listp, listsz);
501 	(void) md_fini_handle(mdp);
502 
503 	/* Allocate a new channel structure */
504 	cldcp = kmem_zalloc(sizeof (*cldcp), KM_SLEEP);
505 
506 	/* Initialize the channel */
507 	mutex_init(&cldcp->lock, NULL, MUTEX_DRIVER, NULL);
508 
509 	cldcp->id = id;
510 	cldcp->tx.ino = txino;
511 	cldcp->rx.ino = rxino;
512 	cldcp->devclass = devclass;
513 
514 	/* add channel to nexus channel list */
515 	cldcp->next = cnex_ssp->clist;
516 	cnex_ssp->clist = cldcp;
517 
518 	mutex_exit(&cnex_ssp->clist_lock);
519 
520 	return (0);
521 }
522 
523 /*
524  * Add Tx/Rx interrupt handler for the channel
525  */
526 static int
527 cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype,
528     uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2)
529 {
530 	int		rv, idx, pil;
531 	cnex_ldc_t	*cldcp;
532 	cnex_intr_t	*iinfo;
533 	uint64_t	cpuid;
534 	cnex_soft_state_t *cnex_ssp;
535 	int		instance;
536 
537 	/* Get device instance and structure */
538 	instance = ddi_get_instance(dip);
539 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
540 
541 	/* get channel info */
542 	mutex_enter(&cnex_ssp->clist_lock);
543 	cldcp = cnex_ssp->clist;
544 	while (cldcp) {
545 		if (cldcp->id == id)
546 			break;
547 		cldcp = cldcp->next;
548 	}
549 	if (cldcp == NULL) {
550 		DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id);
551 		mutex_exit(&cnex_ssp->clist_lock);
552 		return (EINVAL);
553 	}
554 	mutex_exit(&cnex_ssp->clist_lock);
555 
556 	/* get channel lock */
557 	mutex_enter(&cldcp->lock);
558 
559 	/* get interrupt type */
560 	if (itype == CNEX_TX_INTR) {
561 		iinfo = &(cldcp->tx);
562 	} else if (itype == CNEX_RX_INTR) {
563 		iinfo = &(cldcp->rx);
564 	} else {
565 		DWARN("cnex_add_intr: invalid interrupt type\n", id);
566 		mutex_exit(&cldcp->lock);
567 		return (EINVAL);
568 	}
569 
570 	/* check if a handler is already added */
571 	if (iinfo->hdlr != 0) {
572 		DWARN("cnex_add_intr: interrupt handler exists\n");
573 		mutex_exit(&cldcp->lock);
574 		return (EINVAL);
575 	}
576 
577 	/* save interrupt handler info */
578 	iinfo->hdlr = hdlr;
579 	iinfo->arg1 = arg1;
580 	iinfo->arg2 = arg2;
581 
582 	iinfo->ssp = cnex_ssp;
583 
584 	/*
585 	 * FIXME - generate the interrupt cookie
586 	 * using the interrupt registry
587 	 */
588 	iinfo->icookie = cnex_ssp->cfghdl | iinfo->ino;
589 
590 	D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, "
591 	    "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
592 
593 	/* Pick a PIL on the basis of the channel's devclass */
594 	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
595 		if (cldcp->devclass == cnex_class_to_pil[idx].devclass) {
596 			pil = cnex_class_to_pil[idx].pil;
597 			break;
598 		}
599 	}
600 
601 	/* add interrupt to solaris ivec table */
602 	VERIFY(add_ivintr(iinfo->icookie, pil, cnex_intr_wrapper,
603 		(caddr_t)iinfo, NULL) == 0);
604 
605 	/* set the cookie in the HV */
606 	rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
607 
608 	/* pick next CPU in the domain for this channel */
609 	cpuid = intr_dist_cpuid();
610 
611 	/* set the target CPU and then enable interrupts */
612 	rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, cpuid);
613 	if (rv) {
614 		DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n",
615 		    iinfo->ino);
616 		goto hv_error;
617 	}
618 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
619 	    HV_INTR_IDLE_STATE);
620 	if (rv) {
621 		DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n",
622 		    iinfo->ino);
623 		goto hv_error;
624 	}
625 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID);
626 	if (rv) {
627 		DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n",
628 		    iinfo->ino);
629 		goto hv_error;
630 	}
631 
632 	mutex_exit(&cldcp->lock);
633 	return (0);
634 
635 hv_error:
636 	(void) rem_ivintr(iinfo->icookie, NULL);
637 	mutex_exit(&cldcp->lock);
638 	return (ENXIO);
639 }
640 
641 
642 /*
643  * Exported interface to unregister a LDC endpoint with
644  * the channel nexus
645  */
646 static int
647 cnex_unreg_chan(dev_info_t *dip, uint64_t id)
648 {
649 	cnex_ldc_t	*cldcp, *prev_cldcp;
650 	cnex_soft_state_t *cnex_ssp;
651 	int		instance;
652 
653 	/* Get device instance and structure */
654 	instance = ddi_get_instance(dip);
655 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
656 
657 	/* find and remove channel from list */
658 	mutex_enter(&cnex_ssp->clist_lock);
659 	prev_cldcp = NULL;
660 	cldcp = cnex_ssp->clist;
661 	while (cldcp) {
662 		if (cldcp->id == id)
663 			break;
664 		prev_cldcp = cldcp;
665 		cldcp = cldcp->next;
666 	}
667 
668 	if (cldcp == 0) {
669 		DWARN("cnex_unreg_chan: invalid channel %d\n", id);
670 		mutex_exit(&cnex_ssp->clist_lock);
671 		return (EINVAL);
672 	}
673 
674 	if (cldcp->tx.hdlr || cldcp->rx.hdlr) {
675 		DWARN("cnex_unreg_chan: handlers still exist\n");
676 		mutex_exit(&cnex_ssp->clist_lock);
677 		return (ENXIO);
678 	}
679 
680 	if (prev_cldcp)
681 		prev_cldcp->next = cldcp->next;
682 	else
683 		cnex_ssp->clist = cldcp->next;
684 
685 	mutex_exit(&cnex_ssp->clist_lock);
686 
687 	/* destroy mutex */
688 	mutex_destroy(&cldcp->lock);
689 
690 	/* free channel */
691 	kmem_free(cldcp, sizeof (*cldcp));
692 
693 	return (0);
694 }
695 
696 /*
697  * Remove Tx/Rx interrupt handler for the channel
698  */
699 static int
700 cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
701 {
702 	int			rv;
703 	cnex_ldc_t		*cldcp;
704 	cnex_intr_t		*iinfo;
705 	cnex_soft_state_t	*cnex_ssp;
706 	hrtime_t 		start;
707 	int			instance, istate;
708 
709 	/* Get device instance and structure */
710 	instance = ddi_get_instance(dip);
711 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
712 
713 	/* get channel info */
714 	mutex_enter(&cnex_ssp->clist_lock);
715 	cldcp = cnex_ssp->clist;
716 	while (cldcp) {
717 		if (cldcp->id == id)
718 			break;
719 		cldcp = cldcp->next;
720 	}
721 	if (cldcp == NULL) {
722 		DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id);
723 		mutex_exit(&cnex_ssp->clist_lock);
724 		return (EINVAL);
725 	}
726 	mutex_exit(&cnex_ssp->clist_lock);
727 
728 	/* get rid of the channel intr handler */
729 	mutex_enter(&cldcp->lock);
730 
731 	/* get interrupt type */
732 	if (itype == CNEX_TX_INTR) {
733 		iinfo = &(cldcp->tx);
734 	} else if (itype == CNEX_RX_INTR) {
735 		iinfo = &(cldcp->rx);
736 	} else {
737 		DWARN("cnex_rem_intr: invalid interrupt type\n");
738 		mutex_exit(&cldcp->lock);
739 		return (EINVAL);
740 	}
741 
742 	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
743 
744 	/* check if a handler is already added */
745 	if (iinfo->hdlr == 0) {
746 		DWARN("cnex_rem_intr: interrupt handler does not exist\n");
747 		mutex_exit(&cldcp->lock);
748 		return (EINVAL);
749 	}
750 
751 	D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino);
752 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
753 	    iinfo->ino, HV_INTR_NOTVALID);
754 	if (rv) {
755 		DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino);
756 		mutex_exit(&cldcp->lock);
757 		return (ENXIO);
758 	}
759 
760 	/*
761 	 * Make a best effort to wait for pending interrupts
762 	 * to finish. There is not much we can do if we timeout.
763 	 */
764 	start = gethrtime();
765 	do {
766 		rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate);
767 		if (rv) {
768 			DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n",
769 			    iinfo->ino);
770 			mutex_exit(&cldcp->lock);
771 			return (ENXIO);
772 		}
773 
774 		if ((gethrtime() - start) > cnex_pending_tmout)
775 			break;
776 
777 	} while (!panicstr && istate == HV_INTR_DELIVERED_STATE);
778 
779 	/* if interrupts are still pending print warning */
780 	if (istate != HV_INTR_IDLE_STATE) {
781 		DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n",
782 		    iinfo->ino);
783 		mutex_exit(&cldcp->lock);
784 		return (EAGAIN);
785 	}
786 
787 	/* remove interrupt */
788 	rem_ivintr(iinfo->icookie, NULL);
789 
790 	/* clear interrupt info */
791 	bzero(iinfo, sizeof (*iinfo));
792 
793 	mutex_exit(&cldcp->lock);
794 
795 	return (0);
796 }
797 
798 
799 /*
800  * Clear pending Tx/Rx interrupt
801  */
802 static int
803 cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
804 {
805 	int			rv;
806 	cnex_ldc_t		*cldcp;
807 	cnex_intr_t		*iinfo;
808 	cnex_soft_state_t	*cnex_ssp;
809 	int			instance;
810 
811 	/* Get device instance and structure */
812 	instance = ddi_get_instance(dip);
813 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
814 
815 	/* get channel info */
816 	mutex_enter(&cnex_ssp->clist_lock);
817 	cldcp = cnex_ssp->clist;
818 	while (cldcp) {
819 		if (cldcp->id == id)
820 			break;
821 		cldcp = cldcp->next;
822 	}
823 	if (cldcp == NULL) {
824 		DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id);
825 		mutex_exit(&cnex_ssp->clist_lock);
826 		return (EINVAL);
827 	}
828 	mutex_exit(&cnex_ssp->clist_lock);
829 
830 	mutex_enter(&cldcp->lock);
831 
832 	/* get interrupt type */
833 	if (itype == CNEX_TX_INTR) {
834 		iinfo = &(cldcp->tx);
835 	} else if (itype == CNEX_RX_INTR) {
836 		iinfo = &(cldcp->rx);
837 	} else {
838 		DWARN("cnex_clr_intr: invalid interrupt type\n");
839 		mutex_exit(&cldcp->lock);
840 		return (EINVAL);
841 	}
842 
843 	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
844 
845 	/* check if a handler is already added */
846 	if (iinfo->hdlr == 0) {
847 		DWARN("cnex_clr_intr: interrupt handler does not exist\n");
848 		mutex_exit(&cldcp->lock);
849 		return (EINVAL);
850 	}
851 
852 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
853 	    HV_INTR_IDLE_STATE);
854 	if (rv) {
855 		DWARN("cnex_clr_intr: cannot clear interrupt state\n");
856 		mutex_exit(&cldcp->lock);
857 		return (ENXIO);
858 	}
859 
860 	mutex_exit(&cldcp->lock);
861 
862 	return (0);
863 }
864 
865 /*
866  * Channel nexus interrupt handler wrapper
867  */
868 static uint_t
869 cnex_intr_wrapper(caddr_t arg)
870 {
871 	int 			res;
872 	uint_t 			(*handler)();
873 	caddr_t 		handler_arg1;
874 	caddr_t 		handler_arg2;
875 	cnex_intr_t 		*iinfo = (cnex_intr_t *)arg;
876 
877 	ASSERT(iinfo != NULL);
878 
879 	handler = iinfo->hdlr;
880 	handler_arg1 = iinfo->arg1;
881 	handler_arg2 = iinfo->arg2;
882 
883 	D1("cnex_intr_wrapper: ino=0x%llx invoke client handler\n", iinfo->ino);
884 	res = (*handler)(handler_arg1, handler_arg2);
885 
886 	return (res);
887 }
888 
889 /*ARGSUSED*/
890 static int
891 cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
892 {
893 	int 		rv, instance, reglen;
894 	cnex_regspec_t	*reg_p;
895 	ldc_cnex_t	cinfo;
896 	cnex_soft_state_t *cnex_ssp;
897 
898 	switch (cmd) {
899 	case DDI_ATTACH:
900 		break;
901 	case DDI_RESUME:
902 		return (DDI_SUCCESS);
903 	default:
904 		return (DDI_FAILURE);
905 	}
906 
907 	/*
908 	 * Get the instance specific soft state structure.
909 	 * Save the devi for this instance in the soft_state data.
910 	 */
911 	instance = ddi_get_instance(devi);
912 	if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS)
913 		return (DDI_FAILURE);
914 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
915 
916 	cnex_ssp->devi = devi;
917 	cnex_ssp->clist = NULL;
918 
919 	if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
920 		"reg", (caddr_t)&reg_p, &reglen) != DDI_SUCCESS) {
921 		return (DDI_FAILURE);
922 	}
923 
924 	/* get the sun4v config handle for this device */
925 	cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr);
926 	kmem_free(reg_p, reglen);
927 
928 	D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl);
929 
930 	/* init channel list mutex */
931 	mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL);
932 
933 	/* Register with LDC module */
934 	cinfo.dip = devi;
935 	cinfo.reg_chan = cnex_reg_chan;
936 	cinfo.unreg_chan = cnex_unreg_chan;
937 	cinfo.add_intr = cnex_add_intr;
938 	cinfo.rem_intr = cnex_rem_intr;
939 	cinfo.clr_intr = cnex_clr_intr;
940 
941 	/*
942 	 * LDC register will fail if an nexus instance had already
943 	 * registered with the LDC framework
944 	 */
945 	rv = ldc_register(&cinfo);
946 	if (rv) {
947 		DWARN("cnex_attach: unable to register with LDC\n");
948 		ddi_soft_state_free(cnex_state, instance);
949 		mutex_destroy(&cnex_ssp->clist_lock);
950 		return (DDI_FAILURE);
951 	}
952 
953 	if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance,
954 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
955 		ddi_remove_minor_node(devi, NULL);
956 		ddi_soft_state_free(cnex_state, instance);
957 		mutex_destroy(&cnex_ssp->clist_lock);
958 		return (DDI_FAILURE);
959 	}
960 
961 	/* Add interrupt redistribution callback. */
962 	intr_dist_add(cnex_intr_redist, cnex_ssp);
963 
964 	ddi_report_dev(devi);
965 	return (DDI_SUCCESS);
966 }
967 
968 /*ARGSUSED*/
969 static int
970 cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
971 {
972 	int 		instance;
973 	ldc_cnex_t	cinfo;
974 	cnex_soft_state_t *cnex_ssp;
975 
976 	switch (cmd) {
977 	case DDI_DETACH:
978 		break;
979 	case DDI_SUSPEND:
980 		return (DDI_SUCCESS);
981 	default:
982 		return (DDI_FAILURE);
983 	}
984 
985 	instance = ddi_get_instance(devi);
986 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
987 
988 	/* check if there are any channels still registered */
989 	if (cnex_ssp->clist) {
990 		cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n",
991 		    ddi_get_instance(devi));
992 		return (DDI_FAILURE);
993 	}
994 
995 	/* Unregister with LDC module */
996 	cinfo.dip = devi;
997 	(void) ldc_unregister(&cinfo);
998 
999 	/* Remove interrupt redistribution callback. */
1000 	intr_dist_rem(cnex_intr_redist, cnex_ssp);
1001 
1002 	/* destroy mutex */
1003 	mutex_destroy(&cnex_ssp->clist_lock);
1004 
1005 	/* free soft state structure */
1006 	ddi_soft_state_free(cnex_state, instance);
1007 
1008 	return (DDI_SUCCESS);
1009 }
1010 
1011 /*ARGSUSED*/
1012 static int
1013 cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1014 {
1015 	int instance;
1016 
1017 	if (otyp != OTYP_CHR)
1018 		return (EINVAL);
1019 
1020 	instance = getminor(*devp);
1021 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1022 		return (ENXIO);
1023 
1024 	return (0);
1025 }
1026 
1027 /*ARGSUSED*/
1028 static int
1029 cnex_close(dev_t dev, int flags, int otyp, cred_t *credp)
1030 {
1031 	int instance;
1032 
1033 	if (otyp != OTYP_CHR)
1034 		return (EINVAL);
1035 
1036 	instance = getminor(dev);
1037 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1038 		return (ENXIO);
1039 
1040 	return (0);
1041 }
1042 
1043 /*ARGSUSED*/
1044 static int
1045 cnex_ioctl(dev_t dev,
1046     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
1047 {
1048 	int instance;
1049 	cnex_soft_state_t *cnex_ssp;
1050 
1051 	instance = getminor(dev);
1052 	if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL)
1053 		return (ENXIO);
1054 	ASSERT(cnex_ssp->devi);
1055 	return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0));
1056 }
1057 
1058 static int
1059 cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
1060     void *arg, void *result)
1061 {
1062 	char		name[MAXNAMELEN];
1063 	uint32_t	reglen;
1064 	int		*cnex_regspec;
1065 
1066 	switch (ctlop) {
1067 	case DDI_CTLOPS_REPORTDEV:
1068 		if (rdip == NULL)
1069 			return (DDI_FAILURE);
1070 		cmn_err(CE_CONT, "?channel-device: %s%d\n",
1071 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1072 		return (DDI_SUCCESS);
1073 
1074 	case DDI_CTLOPS_INITCHILD:
1075 	{
1076 		dev_info_t *child = (dev_info_t *)arg;
1077 
1078 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
1079 			DDI_PROP_DONTPASS, "reg",
1080 			&cnex_regspec, &reglen) != DDI_SUCCESS) {
1081 			return (DDI_FAILURE);
1082 		}
1083 
1084 		(void) snprintf(name, sizeof (name), "%x", *cnex_regspec);
1085 		ddi_set_name_addr(child, name);
1086 		ddi_set_parent_data(child, NULL);
1087 		ddi_prop_free(cnex_regspec);
1088 		return (DDI_SUCCESS);
1089 	}
1090 
1091 	case DDI_CTLOPS_UNINITCHILD:
1092 	{
1093 		dev_info_t *child = (dev_info_t *)arg;
1094 
1095 		NDI_CONFIG_DEBUG((CE_NOTE,
1096 		    "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)",
1097 		    ddi_driver_name(child), DEVI(child)->devi_instance));
1098 
1099 		ddi_set_name_addr(child, NULL);
1100 
1101 		return (DDI_SUCCESS);
1102 	}
1103 
1104 	case DDI_CTLOPS_DMAPMAPC:
1105 	case DDI_CTLOPS_REPORTINT:
1106 	case DDI_CTLOPS_REGSIZE:
1107 	case DDI_CTLOPS_NREGS:
1108 	case DDI_CTLOPS_SIDDEV:
1109 	case DDI_CTLOPS_SLAVEONLY:
1110 	case DDI_CTLOPS_AFFINITY:
1111 	case DDI_CTLOPS_POKE:
1112 	case DDI_CTLOPS_PEEK:
1113 		/*
1114 		 * These ops correspond to functions that "shouldn't" be called
1115 		 * by a channel-device driver.  So we whine when we're called.
1116 		 */
1117 		cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n",
1118 		    ddi_driver_name(dip), ddi_get_instance(dip), ctlop,
1119 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1120 		return (DDI_FAILURE);
1121 
1122 	case DDI_CTLOPS_ATTACH:
1123 	case DDI_CTLOPS_BTOP:
1124 	case DDI_CTLOPS_BTOPR:
1125 	case DDI_CTLOPS_DETACH:
1126 	case DDI_CTLOPS_DVMAPAGESIZE:
1127 	case DDI_CTLOPS_IOMIN:
1128 	case DDI_CTLOPS_POWER:
1129 	case DDI_CTLOPS_PTOB:
1130 	default:
1131 		/*
1132 		 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
1133 		 */
1134 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
1135 	}
1136 }
1137 
1138 /* -------------------------------------------------------------------------- */
1139