xref: /illumos-gate/usr/src/uts/common/xen/os/xvdi.c (revision 2952f70a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2014 by Delphix. All rights reserved.
29  * Copyright 2018 Nexenta Systems, Inc.
30  */
31 
32 /*
33  * Xen virtual device driver interfaces
34  */
35 
36 /*
37  * todo:
38  * + name space clean up:
39  *	xvdi_* - public xen interfaces, for use by all leaf drivers
40  *	xd_* - public xen data structures
41  *	i_xvdi_* - implementation private functions
42  *	xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
43  * + add mdb dcmds to dump ring status
44  * + implement xvdi_xxx to wrap xenbus_xxx read/write function
45  * + convert (xendev_ring_t *) into xvdi_ring_handle_t
46  */
47 #include <sys/conf.h>
48 #include <sys/param.h>
49 #include <sys/kmem.h>
50 #include <vm/seg_kmem.h>
51 #include <sys/debug.h>
52 #include <sys/modctl.h>
53 #include <sys/autoconf.h>
54 #include <sys/ddi_impldefs.h>
55 #include <sys/ddi_subrdefs.h>
56 #include <sys/ddi.h>
57 #include <sys/sunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/sunldi.h>
60 #include <sys/fs/dv_node.h>
61 #include <sys/avintr.h>
62 #include <sys/psm.h>
63 #include <sys/spl.h>
64 #include <sys/promif.h>
65 #include <sys/list.h>
66 #include <sys/bootconf.h>
67 #include <sys/bootsvcs.h>
68 #include <sys/bootinfo.h>
69 #include <sys/note.h>
70 #include <sys/sysmacros.h>
71 #ifdef XPV_HVM_DRIVER
72 #include <sys/xpv_support.h>
73 #include <sys/hypervisor.h>
74 #include <public/grant_table.h>
75 #include <public/xen.h>
76 #include <public/io/xenbus.h>
77 #include <public/io/xs_wire.h>
78 #include <public/event_channel.h>
79 #include <public/io/xenbus.h>
80 #else /* XPV_HVM_DRIVER */
81 #include <sys/hypervisor.h>
82 #include <sys/xen_mmu.h>
83 #include <xen/sys/xenbus_impl.h>
84 #include <sys/evtchn_impl.h>
85 #endif /* XPV_HVM_DRIVER */
86 #include <sys/gnttab.h>
87 #include <xen/sys/xendev.h>
88 #include <vm/hat_i86.h>
89 #include <sys/scsi/generic/inquiry.h>
90 #include <util/sscanf.h>
91 #include <xen/public/io/xs_wire.h>
92 
93 
94 #define	isdigit(ch)	((ch) >= '0' && (ch) <= '9')
95 #define	isxdigit(ch)	(isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
96 			((ch) >= 'A' && (ch) <= 'F'))
97 
98 static void xvdi_ring_init_sring(xendev_ring_t *);
99 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
100 #ifndef XPV_HVM_DRIVER
101 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
102 #endif
103 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
104 
105 static int i_xvdi_add_watches(dev_info_t *);
106 static void i_xvdi_rem_watches(dev_info_t *);
107 
108 static int i_xvdi_add_watch_oestate(dev_info_t *);
109 static void i_xvdi_rem_watch_oestate(dev_info_t *);
110 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
111 static void i_xvdi_oestate_handler(void *);
112 
113 static int i_xvdi_add_watch_hpstate(dev_info_t *);
114 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
115 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
116     unsigned int);
117 static void i_xvdi_hpstate_handler(void *);
118 
119 static int i_xvdi_add_watch_bepath(dev_info_t *);
120 static void i_xvdi_rem_watch_bepath(dev_info_t *);
121 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
122     unsigned in);
123 
124 static void xendev_offline_device(void *);
125 
126 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
127     unsigned int);
128 static void i_xvdi_probe_path_handler(void *);
129 
130 typedef struct oestate_evt {
131 	dev_info_t *dip;
132 	XenbusState state;
133 } i_oestate_evt_t;
134 
135 typedef struct xd_cfg {
136 	xendev_devclass_t devclass;
137 	char *xsdev;
138 	char *xs_path_fe;
139 	char *xs_path_be;
140 	char *node_fe;
141 	char *node_be;
142 	char *device_type;
143 	int xd_ipl;
144 	int flags;
145 } i_xd_cfg_t;
146 
147 #define	XD_DOM_ZERO	0x01	/* dom0 only. */
148 #define	XD_DOM_GUEST	0x02	/* Guest domains (i.e. non-dom0). */
149 #define	XD_DOM_IO	0x04	/* IO domains. */
150 
151 #define	XD_DOM_ALL	(XD_DOM_ZERO | XD_DOM_GUEST)
152 
153 static i_xd_cfg_t xdci[] = {
154 #ifndef XPV_HVM_DRIVER
155 	{ XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
156 	    "console", IPL_CONS, XD_DOM_ALL, },
157 #endif
158 
159 	{ XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
160 	    "network", IPL_VIF, XD_DOM_ALL, },
161 
162 	{ XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
163 	    "block", IPL_VBD, XD_DOM_ALL, },
164 
165 	{ XEN_BLKTAP, "tap", NULL, "backend/tap", NULL, "xpvtap",
166 	    "block", IPL_VBD, XD_DOM_ALL, },
167 
168 #ifndef XPV_HVM_DRIVER
169 	{ XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
170 	    NULL, 0, XD_DOM_ALL, },
171 
172 	{ XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
173 	    NULL, 0, XD_DOM_ALL, },
174 
175 	{ XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
176 	    NULL, 0, XD_DOM_ALL, },
177 #endif
178 
179 	{ XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
180 	    NULL, 0, XD_DOM_ZERO, },
181 
182 	{ XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
183 	    NULL, 0, XD_DOM_ZERO, },
184 };
185 #define	NXDC	(sizeof (xdci) / sizeof (xdci[0]))
186 
187 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
188 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
189 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
190 
191 /*
192  * Xen device channel device access and DMA attributes
193  */
194 static ddi_device_acc_attr_t xendev_dc_accattr = {
195 	DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
196 };
197 
198 static ddi_dma_attr_t xendev_dc_dmaattr = {
199 	DMA_ATTR_V0,		/* version of this structure */
200 	0,			/* lowest usable address */
201 	0xffffffffffffffffULL,	/* highest usable address */
202 	0x7fffffff,		/* maximum DMAable byte count */
203 	MMU_PAGESIZE,		/* alignment in bytes */
204 	0x7ff,			/* bitmap of burst sizes */
205 	1,			/* minimum transfer */
206 	0xffffffffU,		/* maximum transfer */
207 	0xffffffffffffffffULL,	/* maximum segment length */
208 	1,			/* maximum number of segments */
209 	1,			/* granularity */
210 	0,			/* flags (reserved) */
211 };
212 
213 static dev_info_t *xendev_dip = NULL;
214 
215 #define	XVDI_DBG_STATE	0x01
216 #define	XVDI_DBG_PROBE	0x02
217 
218 #ifdef DEBUG
219 int i_xvdi_debug = 0;
220 
221 #define	XVDI_DPRINTF(flag, format, ...)			\
222 {							\
223 	if (i_xvdi_debug & (flag))			\
224 		prom_printf((format), __VA_ARGS__);	\
225 }
226 #else
227 #define	XVDI_DPRINTF(flag, format, ...)
228 #endif /* DEBUG */
229 
230 static i_xd_cfg_t *
231 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
232 {
233 	i_xd_cfg_t *xdcp;
234 	int i;
235 
236 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
237 		if (xdcp->devclass == devclass)
238 			return (xdcp);
239 
240 	return (NULL);
241 }
242 
243 int
244 xvdi_init_dev(dev_info_t *dip)
245 {
246 	xendev_devclass_t devcls;
247 	int vdevnum;
248 	domid_t domid;
249 	struct xendev_ppd *pdp;
250 	i_xd_cfg_t *xdcp;
251 	boolean_t backend;
252 	char xsnamebuf[TYPICALMAXPATHLEN];
253 	char *xsname;
254 	void *prop_str;
255 	unsigned int prop_len;
256 	char unitaddr[16];
257 
258 	devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
259 	    DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
260 	vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
261 	    DDI_PROP_DONTPASS, "vdev", VDEV_NOXS);
262 	domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
263 	    DDI_PROP_DONTPASS, "domain", DOMID_SELF);
264 
265 	backend = (domid != DOMID_SELF);
266 	xdcp = i_xvdi_devclass2cfg(devcls);
267 	if (xdcp->device_type != NULL)
268 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
269 		    "device_type", xdcp->device_type);
270 
271 	pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
272 	pdp->xd_domain = domid;
273 	pdp->xd_vdevnum = vdevnum;
274 	pdp->xd_devclass = devcls;
275 	pdp->xd_evtchn = INVALID_EVTCHN;
276 	list_create(&pdp->xd_xb_watches, sizeof (xd_xb_watches_t),
277 	    offsetof(xd_xb_watches_t, xxw_list));
278 	mutex_init(&pdp->xd_evt_lk, NULL, MUTEX_DRIVER, NULL);
279 	mutex_init(&pdp->xd_ndi_lk, NULL, MUTEX_DRIVER, NULL);
280 	ddi_set_parent_data(dip, pdp);
281 
282 	/*
283 	 * devices that do not need to interact with xenstore
284 	 */
285 	if (vdevnum == VDEV_NOXS) {
286 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
287 		    "unit-address", "0");
288 		if (devcls == XEN_CONSOLE)
289 			(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
290 			    "pm-hardware-state", "needs-suspend-resume");
291 		return (DDI_SUCCESS);
292 	}
293 
294 	/*
295 	 * PV devices that need to probe xenstore
296 	 */
297 
298 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
299 	    "pm-hardware-state", "needs-suspend-resume");
300 
301 	xsname = xsnamebuf;
302 	if (!backend)
303 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
304 		    "%s/%d", xdcp->xs_path_fe, vdevnum);
305 	else
306 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
307 		    "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
308 	if ((xenbus_read_driver_state(xsname) >= XenbusStateClosing)) {
309 		/* Don't try to init a dev that may be closing */
310 		mutex_destroy(&pdp->xd_ndi_lk);
311 		mutex_destroy(&pdp->xd_evt_lk);
312 		kmem_free(pdp, sizeof (*pdp));
313 		ddi_set_parent_data(dip, NULL);
314 		return (DDI_FAILURE);
315 	}
316 
317 	pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
318 	pdp->xd_xsdev.devicetype = xdcp->xsdev;
319 	pdp->xd_xsdev.frontend = (backend ? 0 : 1);
320 	pdp->xd_xsdev.data = dip;
321 	pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
322 	if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
323 		cmn_err(CE_WARN, "xvdi_init_dev: "
324 		    "cannot add watches for %s", xsname);
325 		xvdi_uninit_dev(dip);
326 		return (DDI_FAILURE);
327 	}
328 
329 	if (backend)
330 		return (DDI_SUCCESS);
331 
332 	/*
333 	 * The unit-address for frontend devices is the name of the
334 	 * of the xenstore node containing the device configuration
335 	 * and is contained in the 'vdev' property.
336 	 * VIF devices are named using an incrementing integer.
337 	 * VBD devices are either named using the 32-bit dev_t value
338 	 * for linux 'hd' and 'xvd' devices, or a simple integer value
339 	 * in the range 0..767.  768 is the base value of the linux
340 	 * dev_t namespace, the dev_t value for 'hda'.
341 	 */
342 	(void) snprintf(unitaddr, sizeof (unitaddr), "%d", vdevnum);
343 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "unit-address",
344 	    unitaddr);
345 
346 	switch (devcls) {
347 	case XEN_VNET:
348 		if (xenbus_read(XBT_NULL, xsname, "mac", (void *)&prop_str,
349 		    &prop_len) != 0)
350 			break;
351 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "mac",
352 		    prop_str);
353 		kmem_free(prop_str, prop_len);
354 		break;
355 	case XEN_VBLK:
356 		/*
357 		 * cache a copy of the otherend name
358 		 * for ease of observeability
359 		 */
360 		if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend, "dev",
361 		    &prop_str, &prop_len) != 0)
362 			break;
363 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
364 		    "dev-address", prop_str);
365 		kmem_free(prop_str, prop_len);
366 		break;
367 	default:
368 		break;
369 	}
370 
371 	return (DDI_SUCCESS);
372 }
373 
374 void
375 xvdi_uninit_dev(dev_info_t *dip)
376 {
377 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
378 
379 	if (pdp != NULL) {
380 		/* Remove any registered callbacks. */
381 		xvdi_remove_event_handler(dip, NULL);
382 
383 		/* Remove any registered watches. */
384 		i_xvdi_rem_watches(dip);
385 
386 		/* tell other end to close */
387 		if (pdp->xd_xsdev.otherend_id != (domid_t)-1)
388 			(void) xvdi_switch_state(dip, XBT_NULL,
389 			    XenbusStateClosed);
390 
391 		if (pdp->xd_xsdev.nodename != NULL)
392 			kmem_free((char *)(pdp->xd_xsdev.nodename),
393 			    strlen(pdp->xd_xsdev.nodename) + 1);
394 
395 		ddi_set_parent_data(dip, NULL);
396 
397 		mutex_destroy(&pdp->xd_ndi_lk);
398 		mutex_destroy(&pdp->xd_evt_lk);
399 		kmem_free(pdp, sizeof (*pdp));
400 	}
401 }
402 
403 /*
404  * Bind the event channel for this device instance.
405  * Currently we only support one evtchn per device instance.
406  */
407 int
408 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
409 {
410 	struct xendev_ppd *pdp;
411 	domid_t oeid;
412 	int r;
413 
414 	pdp = ddi_get_parent_data(dip);
415 	ASSERT(pdp != NULL);
416 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
417 
418 	mutex_enter(&pdp->xd_evt_lk);
419 	if (pdp->xd_devclass == XEN_CONSOLE) {
420 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
421 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
422 		} else {
423 			pdp->xd_evtchn = INVALID_EVTCHN;
424 			mutex_exit(&pdp->xd_evt_lk);
425 			return (DDI_SUCCESS);
426 		}
427 	} else {
428 		oeid = pdp->xd_xsdev.otherend_id;
429 		if (oeid == (domid_t)-1) {
430 			mutex_exit(&pdp->xd_evt_lk);
431 			return (DDI_FAILURE);
432 		}
433 
434 		if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
435 			xvdi_dev_error(dip, r, "bind event channel");
436 			mutex_exit(&pdp->xd_evt_lk);
437 			return (DDI_FAILURE);
438 		}
439 	}
440 #ifndef XPV_HVM_DRIVER
441 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
442 #endif
443 	mutex_exit(&pdp->xd_evt_lk);
444 
445 	return (DDI_SUCCESS);
446 }
447 
448 /*
449  * Allocate an event channel for this device instance.
450  * Currently we only support one evtchn per device instance.
451  */
452 int
453 xvdi_alloc_evtchn(dev_info_t *dip)
454 {
455 	struct xendev_ppd *pdp;
456 	domid_t oeid;
457 	int rv;
458 
459 	pdp = ddi_get_parent_data(dip);
460 	ASSERT(pdp != NULL);
461 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
462 
463 	mutex_enter(&pdp->xd_evt_lk);
464 	if (pdp->xd_devclass == XEN_CONSOLE) {
465 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
466 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
467 		} else {
468 			pdp->xd_evtchn = INVALID_EVTCHN;
469 			mutex_exit(&pdp->xd_evt_lk);
470 			return (DDI_SUCCESS);
471 		}
472 	} else {
473 		oeid = pdp->xd_xsdev.otherend_id;
474 		if (oeid == (domid_t)-1) {
475 			mutex_exit(&pdp->xd_evt_lk);
476 			return (DDI_FAILURE);
477 		}
478 
479 		if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
480 			xvdi_dev_error(dip, rv, "bind event channel");
481 			mutex_exit(&pdp->xd_evt_lk);
482 			return (DDI_FAILURE);
483 		}
484 	}
485 #ifndef XPV_HVM_DRIVER
486 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
487 #endif
488 	mutex_exit(&pdp->xd_evt_lk);
489 
490 	return (DDI_SUCCESS);
491 }
492 
493 /*
494  * Unbind the event channel for this device instance.
495  * Currently we only support one evtchn per device instance.
496  */
497 void
498 xvdi_free_evtchn(dev_info_t *dip)
499 {
500 	struct xendev_ppd *pdp;
501 
502 	pdp = ddi_get_parent_data(dip);
503 	ASSERT(pdp != NULL);
504 
505 	mutex_enter(&pdp->xd_evt_lk);
506 	if (pdp->xd_evtchn != INVALID_EVTCHN) {
507 #ifndef XPV_HVM_DRIVER
508 		ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
509 		pdp->xd_ispec.intrspec_vec = 0;
510 #endif
511 		pdp->xd_evtchn = INVALID_EVTCHN;
512 	}
513 	mutex_exit(&pdp->xd_evt_lk);
514 }
515 
516 #ifndef XPV_HVM_DRIVER
517 /*
518  * Map an inter-domain communication ring for a virtual device.
519  * This is used by backend drivers.
520  */
521 int
522 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
523     grant_ref_t gref, xendev_ring_t **ringpp)
524 {
525 	domid_t oeid;
526 	gnttab_map_grant_ref_t mapop;
527 	gnttab_unmap_grant_ref_t unmapop;
528 	caddr_t ringva;
529 	ddi_acc_hdl_t *ap;
530 	ddi_acc_impl_t *iap;
531 	xendev_ring_t *ring;
532 	int err;
533 	char errstr[] = "mapping in ring buffer";
534 
535 	ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
536 	oeid = xvdi_get_oeid(dip);
537 
538 	/* alloc va in backend dom for ring buffer */
539 	ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
540 	    0, 0, 0, 0, VM_SLEEP);
541 
542 	/* map in ring page */
543 	hat_prepare_mapping(kas.a_hat, ringva, NULL);
544 	mapop.host_addr = (uint64_t)(uintptr_t)ringva;
545 	mapop.flags = GNTMAP_host_map;
546 	mapop.ref = gref;
547 	mapop.dom = oeid;
548 	err = xen_map_gref(GNTTABOP_map_grant_ref, &mapop, 1, B_FALSE);
549 	if (err) {
550 		xvdi_fatal_error(dip, err, errstr);
551 		goto errout1;
552 	}
553 
554 	if (mapop.status != 0) {
555 		xvdi_fatal_error(dip, err, errstr);
556 		goto errout2;
557 	}
558 	ring->xr_vaddr = ringva;
559 	ring->xr_grant_hdl = mapop.handle;
560 	ring->xr_gref = gref;
561 
562 	/*
563 	 * init an acc handle and associate it w/ this ring
564 	 * this is only for backend drivers. we get the memory by calling
565 	 * vmem_xalloc(), instead of calling any ddi function, so we have
566 	 * to init an acc handle by ourselves
567 	 */
568 	ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
569 	ap = impl_acc_hdl_get(ring->xr_acc_hdl);
570 	ap->ah_vers = VERS_ACCHDL;
571 	ap->ah_dip = dip;
572 	ap->ah_xfermodes = DDI_DMA_CONSISTENT;
573 	ap->ah_acc = xendev_dc_accattr;
574 	iap = (ddi_acc_impl_t *)ap->ah_platform_private;
575 	iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
576 	impl_acc_hdl_init(ap);
577 	ap->ah_offset = 0;
578 	ap->ah_len = (off_t)PAGESIZE;
579 	ap->ah_addr = ring->xr_vaddr;
580 
581 	/* init backend ring */
582 	xvdi_ring_init_back_ring(ring, nentry, entrysize);
583 
584 	*ringpp = ring;
585 
586 	return (DDI_SUCCESS);
587 
588 errout2:
589 	/* unmap ring page */
590 	unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
591 	unmapop.handle = ring->xr_grant_hdl;
592 	unmapop.dev_bus_addr = 0;
593 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
594 	hat_release_mapping(kas.a_hat, ringva);
595 errout1:
596 	vmem_xfree(heap_arena, ringva, PAGESIZE);
597 	kmem_free(ring, sizeof (xendev_ring_t));
598 	return (DDI_FAILURE);
599 }
600 
601 /*
602  * Unmap a ring for a virtual device.
603  * This is used by backend drivers.
604  */
605 void
606 xvdi_unmap_ring(xendev_ring_t *ring)
607 {
608 	gnttab_unmap_grant_ref_t unmapop;
609 
610 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
611 
612 	impl_acc_hdl_free(ring->xr_acc_hdl);
613 	unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
614 	unmapop.handle = ring->xr_grant_hdl;
615 	unmapop.dev_bus_addr = 0;
616 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
617 	hat_release_mapping(kas.a_hat, ring->xr_vaddr);
618 	vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
619 	kmem_free(ring, sizeof (xendev_ring_t));
620 }
621 #endif /* XPV_HVM_DRIVER */
622 
623 /*
624  * Re-initialise an inter-domain communications ring for the backend domain.
625  * ring will be re-initialized after re-grant succeed
626  * ring will be freed if fails to re-grant access to backend domain
627  * so, don't keep useful data in the ring
628  * used only in frontend driver
629  */
630 static void
631 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
632 {
633 	paddr_t rpaddr;
634 	maddr_t rmaddr;
635 
636 	ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
637 	rpaddr = ringp->xr_paddr;
638 
639 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
640 	gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
641 	    rmaddr >> PAGESHIFT, 0);
642 	*gref = ringp->xr_gref;
643 
644 	/* init frontend ring */
645 	xvdi_ring_init_sring(ringp);
646 	xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
647 	    ringp->xr_entry_size);
648 }
649 
650 /*
651  * allocate Xen inter-domain communications ring for Xen virtual devices
652  * used only in frontend driver
653  * if *ringpp is not NULL, we'll simply re-init it
654  */
655 int
656 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
657     grant_ref_t *gref, xendev_ring_t **ringpp)
658 {
659 	size_t len;
660 	xendev_ring_t *ring;
661 	ddi_dma_cookie_t dma_cookie;
662 	uint_t ncookies;
663 	grant_ref_t ring_gref;
664 	domid_t oeid;
665 	maddr_t rmaddr;
666 
667 	if (*ringpp) {
668 		xvdi_reinit_ring(dip, gref, *ringpp);
669 		return (DDI_SUCCESS);
670 	}
671 
672 	*ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
673 	oeid = xvdi_get_oeid(dip);
674 
675 	/*
676 	 * Allocate page for this ring buffer
677 	 */
678 	if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
679 	    0, &ring->xr_dma_hdl) != DDI_SUCCESS)
680 		goto err;
681 
682 	if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
683 	    &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
684 	    &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
685 		ddi_dma_free_handle(&ring->xr_dma_hdl);
686 		goto err;
687 	}
688 
689 	if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
690 	    ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
691 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
692 		ddi_dma_mem_free(&ring->xr_acc_hdl);
693 		ring->xr_vaddr = NULL;
694 		ddi_dma_free_handle(&ring->xr_dma_hdl);
695 		goto err;
696 	}
697 	ASSERT(ncookies == 1);
698 	ring->xr_paddr = dma_cookie.dmac_laddress;
699 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
700 	    pa_to_ma(ring->xr_paddr);
701 
702 	if ((ring_gref = gnttab_grant_foreign_access(oeid,
703 	    rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
704 		(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
705 		ddi_dma_mem_free(&ring->xr_acc_hdl);
706 		ring->xr_vaddr = NULL;
707 		ddi_dma_free_handle(&ring->xr_dma_hdl);
708 		goto err;
709 	}
710 	*gref = ring->xr_gref = ring_gref;
711 
712 	/* init frontend ring */
713 	xvdi_ring_init_sring(ring);
714 	xvdi_ring_init_front_ring(ring, nentry, entrysize);
715 
716 	return (DDI_SUCCESS);
717 
718 err:
719 	kmem_free(ring, sizeof (xendev_ring_t));
720 	return (DDI_FAILURE);
721 }
722 
723 /*
724  * Release ring buffers allocated for Xen devices
725  * used for frontend driver
726  */
727 void
728 xvdi_free_ring(xendev_ring_t *ring)
729 {
730 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
731 
732 	(void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
733 	(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
734 	ddi_dma_mem_free(&ring->xr_acc_hdl);
735 	ddi_dma_free_handle(&ring->xr_dma_hdl);
736 	kmem_free(ring, sizeof (xendev_ring_t));
737 }
738 
739 dev_info_t *
740 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
741     domid_t dom, int vdev)
742 {
743 	dev_info_t *dip;
744 	boolean_t backend;
745 	i_xd_cfg_t *xdcp;
746 	char xsnamebuf[TYPICALMAXPATHLEN];
747 	char *type, *node = NULL, *xsname = NULL;
748 	unsigned int tlen;
749 	int ret;
750 
751 	ASSERT(DEVI_BUSY_OWNED(parent));
752 
753 	backend = (dom != DOMID_SELF);
754 	xdcp = i_xvdi_devclass2cfg(devclass);
755 	ASSERT(xdcp != NULL);
756 
757 	if (vdev != VDEV_NOXS) {
758 		if (!backend) {
759 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
760 			    "%s/%d", xdcp->xs_path_fe, vdev);
761 			xsname = xsnamebuf;
762 			node = xdcp->node_fe;
763 		} else {
764 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
765 			    "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
766 			xsname = xsnamebuf;
767 			node = xdcp->node_be;
768 		}
769 	} else {
770 		node = xdcp->node_fe;
771 	}
772 
773 	/* Must have a driver to use. */
774 	if (node == NULL)
775 		return (NULL);
776 
777 	/*
778 	 * We need to check the state of this device before we go
779 	 * further, otherwise we'll end up with a dead loop if
780 	 * anything goes wrong.
781 	 */
782 	if ((xsname != NULL) &&
783 	    (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
784 		return (NULL);
785 
786 	ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
787 
788 	/*
789 	 * Driver binding uses the compatible property _before_ the
790 	 * node name, so we set the node name to the 'model' of the
791 	 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
792 	 * encode both the model and the type in a compatible property
793 	 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac').  This allows a
794 	 * driver binding based on the <model,type> pair _before_ a
795 	 * binding based on the node name.
796 	 */
797 	if ((xsname != NULL) &&
798 	    (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
799 	    == 0)) {
800 		size_t clen;
801 		char *c[1];
802 
803 		clen = strlen(node) + strlen(type) + 2;
804 		c[0] = kmem_alloc(clen, KM_SLEEP);
805 		(void) snprintf(c[0], clen, "%s,%s", node, type);
806 
807 		(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
808 		    dip, "compatible", (char **)c, 1);
809 
810 		kmem_free(c[0], clen);
811 		kmem_free(type, tlen);
812 	}
813 
814 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
815 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
816 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
817 
818 	if (i_ddi_devi_attached(parent))
819 		ret = ndi_devi_online(dip, 0);
820 	else
821 		ret = ndi_devi_bind_driver(dip, 0);
822 	if (ret != NDI_SUCCESS)
823 		(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
824 
825 	return (dip);
826 }
827 
828 /*
829  * xendev_enum_class()
830  */
831 void
832 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
833 {
834 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
835 	boolean_t domU = !dom0;
836 	i_xd_cfg_t *xdcp;
837 
838 	xdcp = i_xvdi_devclass2cfg(devclass);
839 	ASSERT(xdcp != NULL);
840 
841 	if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
842 		return;
843 
844 	if (domU && !(xdcp->flags & XD_DOM_GUEST))
845 		return;
846 
847 	if (xdcp->xsdev == NULL) {
848 		int circ;
849 
850 		/*
851 		 * Don't need to probe this kind of device from the
852 		 * store, just create one if it doesn't exist.
853 		 */
854 
855 		ndi_devi_enter(parent, &circ);
856 		if (xvdi_find_dev(parent, devclass, DOMID_SELF, VDEV_NOXS)
857 		    == NULL)
858 			(void) xvdi_create_dev(parent, devclass,
859 			    DOMID_SELF, VDEV_NOXS);
860 		ndi_devi_exit(parent, circ);
861 	} else {
862 		/*
863 		 * Probe this kind of device from the store, both
864 		 * frontend and backend.
865 		 */
866 		if (xdcp->node_fe != NULL) {
867 			i_xvdi_enum_fe(parent, xdcp);
868 		}
869 		if (xdcp->node_be != NULL) {
870 			i_xvdi_enum_be(parent, xdcp);
871 		}
872 	}
873 }
874 
875 /*
876  * xendev_enum_all()
877  */
878 void
879 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
880 {
881 	int i;
882 	i_xd_cfg_t *xdcp;
883 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
884 
885 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
886 		/*
887 		 * Dom0 relies on watchpoints to create non-soft
888 		 * devices - don't attempt to iterate over the store.
889 		 */
890 		if (dom0 && (xdcp->xsdev != NULL))
891 			continue;
892 
893 		/*
894 		 * If the store is not yet available, don't attempt to
895 		 * iterate.
896 		 */
897 		if (store_unavailable && (xdcp->xsdev != NULL))
898 			continue;
899 
900 		xendev_enum_class(parent, xdcp->devclass);
901 	}
902 }
903 
904 xendev_devclass_t
905 xendev_nodename_to_devclass(char *nodename)
906 {
907 	int i;
908 	i_xd_cfg_t *xdcp;
909 
910 	/*
911 	 * This relies on the convention that variants of a base
912 	 * driver share the same prefix and that there are no drivers
913 	 * which share a common prefix with the name of any other base
914 	 * drivers.
915 	 *
916 	 * So for a base driver 'xnb' (which is the name listed in
917 	 * xdci) the variants all begin with the string 'xnb' (in fact
918 	 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
919 	 * base drivers which have the prefix 'xnb'.
920 	 */
921 	ASSERT(nodename != NULL);
922 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
923 		if (((xdcp->node_fe != NULL) &&
924 		    (strncmp(nodename, xdcp->node_fe,
925 		    strlen(xdcp->node_fe)) == 0)) ||
926 		    ((xdcp->node_be != NULL) &&
927 		    (strncmp(nodename, xdcp->node_be,
928 		    strlen(xdcp->node_be)) == 0)))
929 
930 			return (xdcp->devclass);
931 	}
932 	return (XEN_INVAL);
933 }
934 
935 int
936 xendev_devclass_ipl(xendev_devclass_t devclass)
937 {
938 	i_xd_cfg_t *xdcp;
939 
940 	xdcp = i_xvdi_devclass2cfg(devclass);
941 	ASSERT(xdcp != NULL);
942 
943 	return (xdcp->xd_ipl);
944 }
945 
946 /*
947  * Determine if a devinfo instance exists of a particular device
948  * class, domain and xenstore virtual device number.
949  */
950 dev_info_t *
951 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
952     domid_t dom, int vdev)
953 {
954 	dev_info_t *dip;
955 
956 	ASSERT(DEVI_BUSY_OWNED(parent));
957 
958 	switch (devclass) {
959 	case XEN_CONSOLE:
960 	case XEN_XENBUS:
961 	case XEN_DOMCAPS:
962 	case XEN_BALLOON:
963 	case XEN_EVTCHN:
964 	case XEN_PRIVCMD:
965 		/* Console and soft devices have no vdev. */
966 		vdev = VDEV_NOXS;
967 		break;
968 	default:
969 		break;
970 	}
971 
972 	for (dip = ddi_get_child(parent); dip != NULL;
973 	    dip = ddi_get_next_sibling(dip)) {
974 		int *vdevnump, *domidp, *devclsp, vdevnum;
975 		uint_t ndomid, nvdevnum, ndevcls;
976 		xendev_devclass_t devcls;
977 		domid_t domid;
978 		struct xendev_ppd *pdp = ddi_get_parent_data(dip);
979 
980 		if (pdp == NULL) {
981 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
982 			    DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
983 			    DDI_PROP_SUCCESS)
984 				continue;
985 			ASSERT(ndomid == 1);
986 			domid = (domid_t)*domidp;
987 			ddi_prop_free(domidp);
988 
989 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
990 			    DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
991 			    DDI_PROP_SUCCESS)
992 				continue;
993 			ASSERT(nvdevnum == 1);
994 			vdevnum = *vdevnump;
995 			ddi_prop_free(vdevnump);
996 
997 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
998 			    DDI_PROP_DONTPASS, "devclass", &devclsp,
999 			    &ndevcls) != DDI_PROP_SUCCESS)
1000 				continue;
1001 			ASSERT(ndevcls == 1);
1002 			devcls = (xendev_devclass_t)*devclsp;
1003 			ddi_prop_free(devclsp);
1004 		} else {
1005 			domid = pdp->xd_domain;
1006 			vdevnum = pdp->xd_vdevnum;
1007 			devcls = pdp->xd_devclass;
1008 		}
1009 
1010 		if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
1011 			return (dip);
1012 	}
1013 	return (NULL);
1014 }
1015 
1016 int
1017 xvdi_get_evtchn(dev_info_t *xdip)
1018 {
1019 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1020 
1021 	ASSERT(pdp != NULL);
1022 	return (pdp->xd_evtchn);
1023 }
1024 
1025 int
1026 xvdi_get_vdevnum(dev_info_t *xdip)
1027 {
1028 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1029 
1030 	ASSERT(pdp != NULL);
1031 	return (pdp->xd_vdevnum);
1032 }
1033 
1034 char *
1035 xvdi_get_xsname(dev_info_t *xdip)
1036 {
1037 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1038 
1039 	ASSERT(pdp != NULL);
1040 	return ((char *)(pdp->xd_xsdev.nodename));
1041 }
1042 
1043 char *
1044 xvdi_get_oename(dev_info_t *xdip)
1045 {
1046 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1047 
1048 	ASSERT(pdp != NULL);
1049 	if (pdp->xd_devclass == XEN_CONSOLE)
1050 		return (NULL);
1051 	return ((char *)(pdp->xd_xsdev.otherend));
1052 }
1053 
1054 struct xenbus_device *
1055 xvdi_get_xsd(dev_info_t *xdip)
1056 {
1057 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1058 
1059 	ASSERT(pdp != NULL);
1060 	return (&pdp->xd_xsdev);
1061 }
1062 
1063 domid_t
1064 xvdi_get_oeid(dev_info_t *xdip)
1065 {
1066 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1067 
1068 	ASSERT(pdp != NULL);
1069 	if (pdp->xd_devclass == XEN_CONSOLE)
1070 		return ((domid_t)-1);
1071 	return ((domid_t)(pdp->xd_xsdev.otherend_id));
1072 }
1073 
1074 void
1075 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1076 {
1077 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1078 
1079 	ASSERT(pdp != NULL);
1080 	xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1081 }
1082 
1083 void
1084 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1085 {
1086 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1087 
1088 	ASSERT(pdp != NULL);
1089 	xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1090 }
1091 
1092 static void
1093 i_xvdi_oestate_handler(void *arg)
1094 {
1095 	i_oestate_evt_t *evt = (i_oestate_evt_t *)arg;
1096 	dev_info_t *dip = evt->dip;
1097 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1098 	XenbusState oestate = pdp->xd_xsdev.otherend_state;
1099 	XenbusState curr_oestate = evt->state;
1100 	ddi_eventcookie_t evc;
1101 
1102 	/* evt is alloc'ed in i_xvdi_oestate_cb */
1103 	kmem_free(evt, sizeof (i_oestate_evt_t));
1104 
1105 	/*
1106 	 * If the oestate we're handling is not the latest one,
1107 	 * it does not make any sense to continue handling it.
1108 	 */
1109 	if (curr_oestate != oestate)
1110 		return;
1111 
1112 	mutex_enter(&pdp->xd_ndi_lk);
1113 
1114 	if (pdp->xd_oe_ehid != NULL) {
1115 		/* send notification to driver */
1116 		if (ddi_get_eventcookie(dip, XS_OE_STATE,
1117 		    &evc) == DDI_SUCCESS) {
1118 			mutex_exit(&pdp->xd_ndi_lk);
1119 			(void) ndi_post_event(dip, dip, evc, &oestate);
1120 			mutex_enter(&pdp->xd_ndi_lk);
1121 		}
1122 	} else {
1123 		/*
1124 		 * take default action, if driver hasn't registered its
1125 		 * event handler yet
1126 		 */
1127 		if (oestate == XenbusStateClosing) {
1128 			(void) xvdi_switch_state(dip, XBT_NULL,
1129 			    XenbusStateClosed);
1130 		} else if (oestate == XenbusStateClosed) {
1131 			(void) xvdi_switch_state(dip, XBT_NULL,
1132 			    XenbusStateClosed);
1133 			(void) xvdi_post_event(dip, XEN_HP_REMOVE);
1134 		}
1135 	}
1136 
1137 	mutex_exit(&pdp->xd_ndi_lk);
1138 
1139 	/*
1140 	 * We'll try to remove the devinfo node of this device if the
1141 	 * other end has closed.
1142 	 */
1143 	if (oestate == XenbusStateClosed)
1144 		(void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1145 		    xendev_offline_device, dip, DDI_SLEEP);
1146 }
1147 
1148 static void
1149 i_xvdi_hpstate_handler(void *arg)
1150 {
1151 	dev_info_t *dip = (dev_info_t *)arg;
1152 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1153 	ddi_eventcookie_t evc;
1154 	char *hp_status;
1155 	unsigned int hpl;
1156 
1157 	mutex_enter(&pdp->xd_ndi_lk);
1158 	if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1159 	    (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1160 	    (void *)&hp_status, &hpl) == 0)) {
1161 
1162 		xendev_hotplug_state_t new_state = Unrecognized;
1163 
1164 		if (strcmp(hp_status, "connected") == 0)
1165 			new_state = Connected;
1166 
1167 		mutex_exit(&pdp->xd_ndi_lk);
1168 
1169 		(void) ndi_post_event(dip, dip, evc, &new_state);
1170 		kmem_free(hp_status, hpl);
1171 		return;
1172 	}
1173 	mutex_exit(&pdp->xd_ndi_lk);
1174 }
1175 
1176 void
1177 xvdi_notify_oe(dev_info_t *dip)
1178 {
1179 	struct xendev_ppd *pdp;
1180 
1181 	pdp = ddi_get_parent_data(dip);
1182 	ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1183 	ec_notify_via_evtchn(pdp->xd_evtchn);
1184 }
1185 
1186 static void
1187 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1188 {
1189 	dev_info_t *dip = (dev_info_t *)w->dev;
1190 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1191 	char *be = NULL;
1192 	unsigned int bel;
1193 
1194 	ASSERT(len > XS_WATCH_PATH);
1195 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1196 
1197 	/*
1198 	 * If the backend is not the same as that we already stored,
1199 	 * re-set our watch for its' state.
1200 	 */
1201 	if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1202 	    == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1203 		(void) i_xvdi_add_watch_oestate(dip);
1204 
1205 	if (be != NULL) {
1206 		ASSERT(bel > 0);
1207 		kmem_free(be, bel);
1208 	}
1209 }
1210 
1211 static void
1212 i_xvdi_xb_watch_free(xd_xb_watches_t *xxwp)
1213 {
1214 	ASSERT(xxwp->xxw_ref == 0);
1215 	strfree((char *)xxwp->xxw_watch.node);
1216 	kmem_free(xxwp, sizeof (*xxwp));
1217 }
1218 
1219 static void
1220 i_xvdi_xb_watch_release(xd_xb_watches_t *xxwp)
1221 {
1222 	ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1223 	ASSERT(xxwp->xxw_ref > 0);
1224 	if (--xxwp->xxw_ref == 0)
1225 		i_xvdi_xb_watch_free(xxwp);
1226 }
1227 
1228 static void
1229 i_xvdi_xb_watch_hold(xd_xb_watches_t *xxwp)
1230 {
1231 	ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1232 	ASSERT(xxwp->xxw_ref > 0);
1233 	xxwp->xxw_ref++;
1234 }
1235 
1236 static void
1237 i_xvdi_xb_watch_cb_tq(void *arg)
1238 {
1239 	xd_xb_watches_t		*xxwp = (xd_xb_watches_t *)arg;
1240 	dev_info_t		*dip = (dev_info_t *)xxwp->xxw_watch.dev;
1241 	struct xendev_ppd	*pdp = xxwp->xxw_xppd;
1242 
1243 	xxwp->xxw_cb(dip, xxwp->xxw_watch.node, xxwp->xxw_arg);
1244 
1245 	mutex_enter(&pdp->xd_ndi_lk);
1246 	i_xvdi_xb_watch_release(xxwp);
1247 	mutex_exit(&pdp->xd_ndi_lk);
1248 }
1249 
1250 static void
1251 i_xvdi_xb_watch_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1252 {
1253 	dev_info_t		*dip = (dev_info_t *)w->dev;
1254 	struct xendev_ppd	*pdp = ddi_get_parent_data(dip);
1255 	xd_xb_watches_t		*xxwp;
1256 
1257 	ASSERT(len > XS_WATCH_PATH);
1258 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1259 
1260 	mutex_enter(&pdp->xd_ndi_lk);
1261 	for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1262 	    xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1263 		if (w == &xxwp->xxw_watch)
1264 			break;
1265 	}
1266 
1267 	if (xxwp == NULL) {
1268 		mutex_exit(&pdp->xd_ndi_lk);
1269 		return;
1270 	}
1271 
1272 	i_xvdi_xb_watch_hold(xxwp);
1273 	(void) ddi_taskq_dispatch(pdp->xd_xb_watch_taskq,
1274 	    i_xvdi_xb_watch_cb_tq, xxwp, DDI_SLEEP);
1275 	mutex_exit(&pdp->xd_ndi_lk);
1276 }
1277 
1278 /*
1279  * Any watches registered with xvdi_add_xb_watch_handler() get torn down during
1280  * a suspend operation.  So if a frontend driver want's to use these interfaces,
1281  * that driver is responsible for re-registering any watches it had before
1282  * the suspend operation.
1283  */
1284 int
1285 xvdi_add_xb_watch_handler(dev_info_t *dip, const char *dir, const char *node,
1286     xvdi_xb_watch_cb_t cb, void *arg)
1287 {
1288 	struct xendev_ppd	*pdp = ddi_get_parent_data(dip);
1289 	xd_xb_watches_t		*xxw_new, *xxwp;
1290 	char			*path;
1291 	int			n;
1292 
1293 	ASSERT((dip != NULL) && (dir != NULL) && (node != NULL));
1294 	ASSERT(cb != NULL);
1295 
1296 	n = strlen(dir) + 1 + strlen(node) + 1;
1297 	path = kmem_zalloc(n, KM_SLEEP);
1298 	(void) strlcat(path, dir, n);
1299 	(void) strlcat(path, "/", n);
1300 	(void) strlcat(path, node, n);
1301 	ASSERT((strlen(path) + 1) == n);
1302 
1303 	xxw_new = kmem_zalloc(sizeof (*xxw_new), KM_SLEEP);
1304 	xxw_new->xxw_ref = 1;
1305 	xxw_new->xxw_watch.node = path;
1306 	xxw_new->xxw_watch.callback = i_xvdi_xb_watch_cb;
1307 	xxw_new->xxw_watch.dev = (struct xenbus_device *)dip;
1308 	xxw_new->xxw_xppd = pdp;
1309 	xxw_new->xxw_cb = cb;
1310 	xxw_new->xxw_arg = arg;
1311 
1312 	mutex_enter(&pdp->xd_ndi_lk);
1313 
1314 	/*
1315 	 * If this is the first watch we're setting up, create a taskq
1316 	 * to dispatch watch events and initialize the watch list.
1317 	 */
1318 	if (pdp->xd_xb_watch_taskq == NULL) {
1319 		char tq_name[TASKQ_NAMELEN];
1320 
1321 		ASSERT(list_is_empty(&pdp->xd_xb_watches));
1322 
1323 		(void) snprintf(tq_name, sizeof (tq_name),
1324 		    "%s_xb_watch_tq", ddi_get_name(dip));
1325 
1326 		if ((pdp->xd_xb_watch_taskq = ddi_taskq_create(dip, tq_name,
1327 		    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1328 			i_xvdi_xb_watch_release(xxw_new);
1329 			mutex_exit(&pdp->xd_ndi_lk);
1330 			return (DDI_FAILURE);
1331 		}
1332 	}
1333 
1334 	/* Don't allow duplicate watches to be registered */
1335 	for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1336 	    xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1337 
1338 		ASSERT(strcmp(xxwp->xxw_watch.node, path) != 0);
1339 		if (strcmp(xxwp->xxw_watch.node, path) != 0)
1340 			continue;
1341 		i_xvdi_xb_watch_release(xxw_new);
1342 		mutex_exit(&pdp->xd_ndi_lk);
1343 		return (DDI_FAILURE);
1344 	}
1345 
1346 	if (register_xenbus_watch(&xxw_new->xxw_watch) != 0) {
1347 		if (list_is_empty(&pdp->xd_xb_watches)) {
1348 			ddi_taskq_destroy(pdp->xd_xb_watch_taskq);
1349 			pdp->xd_xb_watch_taskq = NULL;
1350 		}
1351 		i_xvdi_xb_watch_release(xxw_new);
1352 		mutex_exit(&pdp->xd_ndi_lk);
1353 		return (DDI_FAILURE);
1354 	}
1355 
1356 	list_insert_head(&pdp->xd_xb_watches, xxw_new);
1357 	mutex_exit(&pdp->xd_ndi_lk);
1358 	return (DDI_SUCCESS);
1359 }
1360 
1361 /*
1362  * Tear down all xenbus watches registered by the specified dip.
1363  */
1364 void
1365 xvdi_remove_xb_watch_handlers(dev_info_t *dip)
1366 {
1367 	struct xendev_ppd	*pdp = ddi_get_parent_data(dip);
1368 	xd_xb_watches_t		*xxwp;
1369 	ddi_taskq_t		*tq;
1370 
1371 	mutex_enter(&pdp->xd_ndi_lk);
1372 
1373 	while ((xxwp = list_remove_head(&pdp->xd_xb_watches)) != NULL) {
1374 		mutex_exit(&pdp->xd_ndi_lk);
1375 		unregister_xenbus_watch(&xxwp->xxw_watch);
1376 		mutex_enter(&pdp->xd_ndi_lk);
1377 		i_xvdi_xb_watch_release(xxwp);
1378 	}
1379 	ASSERT(list_is_empty(&pdp->xd_xb_watches));
1380 
1381 	/*
1382 	 * We can't hold xd_ndi_lk while we destroy the xd_xb_watch_taskq.
1383 	 * This is because if there are currently any executing taskq threads,
1384 	 * we will block until they are finished, and to finish they need
1385 	 * to aquire xd_ndi_lk in i_xvdi_xb_watch_cb_tq() so they can release
1386 	 * their reference on their corresponding xxwp structure.
1387 	 */
1388 	tq = pdp->xd_xb_watch_taskq;
1389 	pdp->xd_xb_watch_taskq = NULL;
1390 	mutex_exit(&pdp->xd_ndi_lk);
1391 	if (tq != NULL)
1392 		ddi_taskq_destroy(tq);
1393 }
1394 
1395 static int
1396 i_xvdi_add_watch_oestate(dev_info_t *dip)
1397 {
1398 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1399 
1400 	ASSERT(pdp != NULL);
1401 	ASSERT(pdp->xd_xsdev.nodename != NULL);
1402 	ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1403 
1404 	/*
1405 	 * Create taskq for delivering other end state change event to
1406 	 * this device later.
1407 	 *
1408 	 * Set nthreads to 1 to make sure that events can be delivered
1409 	 * in order.
1410 	 *
1411 	 * Note: It is _not_ guaranteed that driver can see every
1412 	 * xenstore change under the path that it is watching. If two
1413 	 * changes happen consecutively in a very short amount of
1414 	 * time, it is likely that the driver will see only the last
1415 	 * one.
1416 	 */
1417 	if (pdp->xd_oe_taskq == NULL)
1418 		if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1419 		    "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1420 			return (DDI_FAILURE);
1421 
1422 	/*
1423 	 * Watch for changes to the XenbusState of otherend.
1424 	 */
1425 	pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1426 	pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1427 
1428 	if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1429 		i_xvdi_rem_watch_oestate(dip);
1430 		return (DDI_FAILURE);
1431 	}
1432 
1433 	return (DDI_SUCCESS);
1434 }
1435 
1436 static void
1437 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1438 {
1439 	struct xendev_ppd *pdp;
1440 	struct xenbus_device *dev;
1441 
1442 	pdp = ddi_get_parent_data(dip);
1443 	ASSERT(pdp != NULL);
1444 	ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1445 
1446 	dev = &pdp->xd_xsdev;
1447 
1448 	/* Unwatch for changes to XenbusState of otherend */
1449 	if (dev->otherend_watch.node != NULL) {
1450 		mutex_exit(&pdp->xd_ndi_lk);
1451 		unregister_xenbus_watch(&dev->otherend_watch);
1452 		mutex_enter(&pdp->xd_ndi_lk);
1453 	}
1454 
1455 	/* make sure no event handler is running */
1456 	if (pdp->xd_oe_taskq != NULL) {
1457 		mutex_exit(&pdp->xd_ndi_lk);
1458 		ddi_taskq_destroy(pdp->xd_oe_taskq);
1459 		mutex_enter(&pdp->xd_ndi_lk);
1460 		pdp->xd_oe_taskq = NULL;
1461 	}
1462 
1463 	/* clean up */
1464 	dev->otherend_state = XenbusStateUnknown;
1465 	dev->otherend_id = (domid_t)-1;
1466 	if (dev->otherend_watch.node != NULL)
1467 		kmem_free((void *)dev->otherend_watch.node,
1468 		    strlen(dev->otherend_watch.node) + 1);
1469 	dev->otherend_watch.node = NULL;
1470 	if (dev->otherend != NULL)
1471 		kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1472 	dev->otherend = NULL;
1473 }
1474 
1475 static int
1476 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1477 {
1478 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1479 
1480 	ASSERT(pdp != NULL);
1481 	ASSERT(pdp->xd_xsdev.frontend == 0);
1482 	ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1483 
1484 	/*
1485 	 * Create taskq for delivering hotplug status change event to
1486 	 * this device later.
1487 	 *
1488 	 * Set nthreads to 1 to make sure that events can be delivered
1489 	 * in order.
1490 	 *
1491 	 * Note: It is _not_ guaranteed that driver can see every
1492 	 * hotplug status change under the path that it is
1493 	 * watching. If two changes happen consecutively in a very
1494 	 * short amount of time, it is likely that the driver only
1495 	 * sees the last one.
1496 	 */
1497 	if (pdp->xd_hp_taskq == NULL)
1498 		if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1499 		    "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1500 			return (DDI_FAILURE);
1501 
1502 	if (pdp->xd_hp_watch.node == NULL) {
1503 		size_t len;
1504 		char *path;
1505 
1506 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1507 
1508 		len = strlen(pdp->xd_xsdev.nodename) +
1509 		    strlen("/hotplug-status") + 1;
1510 		path = kmem_alloc(len, KM_SLEEP);
1511 		(void) snprintf(path, len, "%s/hotplug-status",
1512 		    pdp->xd_xsdev.nodename);
1513 
1514 		pdp->xd_hp_watch.node = path;
1515 		pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1516 		pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1517 		if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1518 			i_xvdi_rem_watch_hpstate(dip);
1519 			return (DDI_FAILURE);
1520 		}
1521 	}
1522 
1523 	return (DDI_SUCCESS);
1524 }
1525 
1526 static void
1527 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1528 {
1529 	struct xendev_ppd *pdp;
1530 	pdp = ddi_get_parent_data(dip);
1531 
1532 	ASSERT(pdp != NULL);
1533 	ASSERT(pdp->xd_xsdev.frontend == 0);
1534 	ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1535 
1536 	/* Unwatch for changes to "hotplug-status" node for backend device. */
1537 	if (pdp->xd_hp_watch.node != NULL) {
1538 		mutex_exit(&pdp->xd_ndi_lk);
1539 		unregister_xenbus_watch(&pdp->xd_hp_watch);
1540 		mutex_enter(&pdp->xd_ndi_lk);
1541 	}
1542 
1543 	/* Make sure no event handler is running. */
1544 	if (pdp->xd_hp_taskq != NULL) {
1545 		mutex_exit(&pdp->xd_ndi_lk);
1546 		ddi_taskq_destroy(pdp->xd_hp_taskq);
1547 		mutex_enter(&pdp->xd_ndi_lk);
1548 		pdp->xd_hp_taskq = NULL;
1549 	}
1550 
1551 	/* Clean up. */
1552 	if (pdp->xd_hp_watch.node != NULL) {
1553 		kmem_free((void *)pdp->xd_hp_watch.node,
1554 		    strlen(pdp->xd_hp_watch.node) + 1);
1555 		pdp->xd_hp_watch.node = NULL;
1556 	}
1557 }
1558 
1559 static int
1560 i_xvdi_add_watches(dev_info_t *dip)
1561 {
1562 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1563 
1564 	ASSERT(pdp != NULL);
1565 
1566 	mutex_enter(&pdp->xd_ndi_lk);
1567 
1568 	if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1569 		mutex_exit(&pdp->xd_ndi_lk);
1570 		return (DDI_FAILURE);
1571 	}
1572 
1573 	if (pdp->xd_xsdev.frontend == 1) {
1574 		/*
1575 		 * Frontend devices must watch for the backend path
1576 		 * changing.
1577 		 */
1578 		if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1579 			goto unwatch_and_fail;
1580 	} else {
1581 		/*
1582 		 * Backend devices must watch for hotplug events.
1583 		 */
1584 		if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1585 			goto unwatch_and_fail;
1586 	}
1587 
1588 	mutex_exit(&pdp->xd_ndi_lk);
1589 
1590 	return (DDI_SUCCESS);
1591 
1592 unwatch_and_fail:
1593 	i_xvdi_rem_watch_oestate(dip);
1594 	mutex_exit(&pdp->xd_ndi_lk);
1595 
1596 	return (DDI_FAILURE);
1597 }
1598 
1599 static void
1600 i_xvdi_rem_watches(dev_info_t *dip)
1601 {
1602 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1603 
1604 	ASSERT(pdp != NULL);
1605 
1606 	mutex_enter(&pdp->xd_ndi_lk);
1607 
1608 	i_xvdi_rem_watch_oestate(dip);
1609 
1610 	if (pdp->xd_xsdev.frontend == 1)
1611 		i_xvdi_rem_watch_bepath(dip);
1612 	else
1613 		i_xvdi_rem_watch_hpstate(dip);
1614 
1615 	mutex_exit(&pdp->xd_ndi_lk);
1616 
1617 	xvdi_remove_xb_watch_handlers(dip);
1618 }
1619 
1620 static int
1621 i_xvdi_add_watch_bepath(dev_info_t *dip)
1622 {
1623 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1624 
1625 	ASSERT(pdp != NULL);
1626 	ASSERT(pdp->xd_xsdev.frontend == 1);
1627 
1628 	/*
1629 	 * Frontend devices need to watch for the backend path changing.
1630 	 */
1631 	if (pdp->xd_bepath_watch.node == NULL) {
1632 		size_t len;
1633 		char *path;
1634 
1635 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1636 
1637 		len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1638 		path = kmem_alloc(len, KM_SLEEP);
1639 		(void) snprintf(path, len, "%s/backend",
1640 		    pdp->xd_xsdev.nodename);
1641 
1642 		pdp->xd_bepath_watch.node = path;
1643 		pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1644 		pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1645 		if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1646 			kmem_free(path, len);
1647 			pdp->xd_bepath_watch.node = NULL;
1648 			return (DDI_FAILURE);
1649 		}
1650 	}
1651 
1652 	return (DDI_SUCCESS);
1653 }
1654 
1655 static void
1656 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1657 {
1658 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1659 
1660 	ASSERT(pdp != NULL);
1661 	ASSERT(pdp->xd_xsdev.frontend == 1);
1662 	ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1663 
1664 	if (pdp->xd_bepath_watch.node != NULL) {
1665 		mutex_exit(&pdp->xd_ndi_lk);
1666 		unregister_xenbus_watch(&pdp->xd_bepath_watch);
1667 		mutex_enter(&pdp->xd_ndi_lk);
1668 
1669 		kmem_free((void *)(pdp->xd_bepath_watch.node),
1670 		    strlen(pdp->xd_bepath_watch.node) + 1);
1671 		pdp->xd_bepath_watch.node = NULL;
1672 	}
1673 }
1674 
1675 int
1676 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1677     XenbusState newState)
1678 {
1679 	int rv;
1680 	struct xendev_ppd *pdp;
1681 
1682 	pdp = ddi_get_parent_data(dip);
1683 	ASSERT(pdp != NULL);
1684 
1685 	XVDI_DPRINTF(XVDI_DBG_STATE,
1686 	    "xvdi_switch_state: %s@%s's xenbus state moves to %d\n",
1687 	    ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
1688 	    ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
1689 	    newState);
1690 
1691 	rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1692 	if (rv > 0)
1693 		cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1694 
1695 	return (rv);
1696 }
1697 
1698 /*
1699  * Notify hotplug script running in userland
1700  */
1701 int
1702 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1703 {
1704 	struct xendev_ppd *pdp;
1705 	nvlist_t *attr_list = NULL;
1706 	i_xd_cfg_t *xdcp;
1707 	sysevent_id_t eid;
1708 	int err;
1709 	char devname[256]; /* XXPV dme: ? */
1710 
1711 	pdp = ddi_get_parent_data(dip);
1712 	ASSERT(pdp != NULL);
1713 
1714 	xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1715 	ASSERT(xdcp != NULL);
1716 
1717 	(void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1718 	    ddi_driver_name(dip),  ddi_get_instance(dip));
1719 
1720 	err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1721 	if (err != DDI_SUCCESS)
1722 		goto failure;
1723 
1724 	err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1725 	if (err != DDI_SUCCESS)
1726 		goto failure;
1727 	err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1728 	if (err != DDI_SUCCESS)
1729 		goto failure;
1730 	err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1731 	if (err != DDI_SUCCESS)
1732 		goto failure;
1733 	err = nvlist_add_string(attr_list, "device", devname);
1734 	if (err != DDI_SUCCESS)
1735 		goto failure;
1736 	err = nvlist_add_string(attr_list, "fob",
1737 	    ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1738 	if (err != DDI_SUCCESS)
1739 		goto failure;
1740 
1741 	switch (hpc) {
1742 	case XEN_HP_ADD:
1743 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1744 		    "add", attr_list, &eid, DDI_NOSLEEP);
1745 		break;
1746 	case XEN_HP_REMOVE:
1747 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1748 		    "remove", attr_list, &eid, DDI_NOSLEEP);
1749 		break;
1750 	default:
1751 		err = DDI_FAILURE;
1752 		goto failure;
1753 	}
1754 
1755 failure:
1756 	nvlist_free(attr_list);
1757 
1758 	return (err);
1759 }
1760 
1761 /* ARGSUSED */
1762 static void
1763 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1764     unsigned int len)
1765 {
1766 	char *path;
1767 
1768 	if (xendev_dip == NULL)
1769 		xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1770 
1771 	path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1772 
1773 	(void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1774 	    i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1775 }
1776 
1777 static void
1778 i_xvdi_watch_device(char *path)
1779 {
1780 	struct xenbus_watch *w;
1781 
1782 	ASSERT(path != NULL);
1783 
1784 	w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1785 	w->node = path;
1786 	w->callback = &i_xvdi_probe_path_cb;
1787 	w->dev = NULL;
1788 
1789 	if (register_xenbus_watch(w) != 0) {
1790 		cmn_err(CE_WARN, "i_xvdi_watch_device: "
1791 		    "cannot set watch on %s", path);
1792 		kmem_free(w, sizeof (*w));
1793 		return;
1794 	}
1795 }
1796 
1797 void
1798 xvdi_watch_devices(int newstate)
1799 {
1800 	int devclass;
1801 
1802 	/*
1803 	 * Watch for devices being created in the store.
1804 	 */
1805 	if (newstate == XENSTORE_DOWN)
1806 		return;
1807 	for (devclass = 0; devclass < NXDC; devclass++) {
1808 		if (xdci[devclass].xs_path_fe != NULL)
1809 			i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1810 		if (xdci[devclass].xs_path_be != NULL)
1811 			i_xvdi_watch_device(xdci[devclass].xs_path_be);
1812 	}
1813 }
1814 
1815 /*
1816  * Iterate over the store looking for backend devices to create.
1817  */
1818 static void
1819 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1820 {
1821 	char **domains;
1822 	unsigned int ndomains;
1823 	int ldomains, i;
1824 
1825 	if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1826 	    &ndomains)) == NULL)
1827 		return;
1828 
1829 	for (i = 0, ldomains = 0; i < ndomains; i++) {
1830 		ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1831 
1832 		i_xvdi_enum_worker(parent, xdcp, domains[i]);
1833 	}
1834 	kmem_free(domains, ldomains);
1835 }
1836 
1837 /*
1838  * Iterate over the store looking for frontend devices to create.
1839  */
1840 static void
1841 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1842 {
1843 	i_xvdi_enum_worker(parent, xdcp, NULL);
1844 }
1845 
1846 static void
1847 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1848     char *domain)
1849 {
1850 	char *path, *domain_path, *ep;
1851 	char **devices;
1852 	unsigned int ndevices;
1853 	int ldevices, j, circ;
1854 	domid_t dom;
1855 	long tmplong;
1856 
1857 	if (domain == NULL) {
1858 		dom = DOMID_SELF;
1859 		path = xdcp->xs_path_fe;
1860 		domain_path = "";
1861 	} else {
1862 		(void) ddi_strtol(domain, &ep, 0, &tmplong);
1863 		dom = tmplong;
1864 		path = xdcp->xs_path_be;
1865 		domain_path = domain;
1866 	}
1867 
1868 	if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1869 	    &ndevices)) == NULL)
1870 		return;
1871 
1872 	for (j = 0, ldevices = 0; j < ndevices; j++) {
1873 		int vdev;
1874 
1875 		ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1876 		(void) ddi_strtol(devices[j], &ep, 0, &tmplong);
1877 		vdev = tmplong;
1878 
1879 		ndi_devi_enter(parent, &circ);
1880 
1881 		if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL)
1882 			(void) xvdi_create_dev(parent, xdcp->devclass,
1883 			    dom, vdev);
1884 
1885 		ndi_devi_exit(parent, circ);
1886 	}
1887 	kmem_free(devices, ldevices);
1888 }
1889 
1890 /*
1891  * Leaf drivers should call this in their detach() routine during suspend.
1892  */
1893 void
1894 xvdi_suspend(dev_info_t *dip)
1895 {
1896 	i_xvdi_rem_watches(dip);
1897 }
1898 
1899 /*
1900  * Leaf drivers should call this in their attach() routine during resume.
1901  */
1902 int
1903 xvdi_resume(dev_info_t *dip)
1904 {
1905 	return (i_xvdi_add_watches(dip));
1906 }
1907 
1908 /*
1909  * Add event handler for the leaf driver
1910  * to handle event triggered by the change in xenstore
1911  */
1912 int
1913 xvdi_add_event_handler(dev_info_t *dip, char *name,
1914     void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
1915     void *arg)
1916 {
1917 	ddi_eventcookie_t ecv;
1918 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1919 	ddi_callback_id_t *cbid;
1920 	boolean_t call_handler;
1921 	i_oestate_evt_t *evt = NULL;
1922 	XenbusState oestate;
1923 
1924 	ASSERT(pdp != NULL);
1925 
1926 	mutex_enter(&pdp->xd_ndi_lk);
1927 
1928 	if (strcmp(name, XS_OE_STATE) == 0) {
1929 		ASSERT(pdp->xd_xsdev.otherend != NULL);
1930 
1931 		cbid = &pdp->xd_oe_ehid;
1932 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1933 		if (pdp->xd_xsdev.frontend == 1) {
1934 			mutex_exit(&pdp->xd_ndi_lk);
1935 			return (DDI_FAILURE);
1936 		}
1937 
1938 		ASSERT(pdp->xd_hp_watch.node != NULL);
1939 
1940 		cbid = &pdp->xd_hp_ehid;
1941 	} else {
1942 		/* Unsupported watch. */
1943 		mutex_exit(&pdp->xd_ndi_lk);
1944 		return (DDI_FAILURE);
1945 	}
1946 
1947 	/*
1948 	 * No event handler provided, take default action to handle
1949 	 * event.
1950 	 */
1951 	if (evthandler == NULL) {
1952 		mutex_exit(&pdp->xd_ndi_lk);
1953 		return (DDI_SUCCESS);
1954 	}
1955 
1956 	ASSERT(*cbid == NULL);
1957 
1958 	if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1959 		cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1960 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1961 		mutex_exit(&pdp->xd_ndi_lk);
1962 		return (DDI_FAILURE);
1963 	}
1964 	if (ddi_add_event_handler(dip, ecv, evthandler, arg, cbid)
1965 	    != DDI_SUCCESS) {
1966 		cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1967 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1968 		*cbid = NULL;
1969 		mutex_exit(&pdp->xd_ndi_lk);
1970 		return (DDI_FAILURE);
1971 	}
1972 
1973 	/*
1974 	 * if we're adding an oe state callback, and the ring has already
1975 	 * transitioned out of Unknown, call the handler after we release
1976 	 * the mutex.
1977 	 */
1978 	call_handler = B_FALSE;
1979 	if ((strcmp(name, XS_OE_STATE) == 0) &&
1980 	    (pdp->xd_xsdev.otherend_state != XenbusStateUnknown)) {
1981 		oestate = pdp->xd_xsdev.otherend_state;
1982 		call_handler = B_TRUE;
1983 	}
1984 
1985 	mutex_exit(&pdp->xd_ndi_lk);
1986 
1987 	if (call_handler) {
1988 		evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
1989 		evt->dip = dip;
1990 		evt->state = oestate;
1991 		(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
1992 		    i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
1993 	}
1994 
1995 	return (DDI_SUCCESS);
1996 }
1997 
1998 /*
1999  * Remove event handler for the leaf driver and unwatch xenstore
2000  * so, driver will not be notified when xenstore entry changed later
2001  */
2002 void
2003 xvdi_remove_event_handler(dev_info_t *dip, char *name)
2004 {
2005 	struct xendev_ppd *pdp;
2006 	boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
2007 	ddi_callback_id_t oeid = NULL, hpid = NULL;
2008 
2009 	pdp = ddi_get_parent_data(dip);
2010 	ASSERT(pdp != NULL);
2011 
2012 	if (name == NULL) {
2013 		rem_oe = B_TRUE;
2014 		rem_hp = B_TRUE;
2015 	} else if (strcmp(name, XS_OE_STATE) == 0) {
2016 		rem_oe = B_TRUE;
2017 	} else if (strcmp(name, XS_HP_STATE) == 0) {
2018 		rem_hp = B_TRUE;
2019 	} else {
2020 		cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
2021 		return;
2022 	}
2023 
2024 	mutex_enter(&pdp->xd_ndi_lk);
2025 
2026 	if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
2027 		oeid = pdp->xd_oe_ehid;
2028 		pdp->xd_oe_ehid = NULL;
2029 	}
2030 
2031 	if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
2032 		hpid = pdp->xd_hp_ehid;
2033 		pdp->xd_hp_ehid = NULL;
2034 	}
2035 
2036 	mutex_exit(&pdp->xd_ndi_lk);
2037 
2038 	if (oeid != NULL)
2039 		(void) ddi_remove_event_handler(oeid);
2040 	if (hpid != NULL)
2041 		(void) ddi_remove_event_handler(hpid);
2042 }
2043 
2044 
2045 /*
2046  * common ring interfaces
2047  */
2048 
2049 #define	FRONT_RING(_ringp)	(&(_ringp)->xr_sring.fr)
2050 #define	BACK_RING(_ringp)	(&(_ringp)->xr_sring.br)
2051 #define	GET_RING_SIZE(_ringp)	RING_SIZE(FRONT_RING(ringp))
2052 #define	GET_RING_ENTRY_FE(_ringp, _idx)		\
2053 	(FRONT_RING(_ringp)->sring->ring +	\
2054 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2055 #define	GET_RING_ENTRY_BE(_ringp, _idx)		\
2056 	(BACK_RING(_ringp)->sring->ring +	\
2057 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2058 
2059 unsigned int
2060 xvdi_ring_avail_slots(xendev_ring_t *ringp)
2061 {
2062 	comif_ring_fe_t *frp;
2063 	comif_ring_be_t *brp;
2064 
2065 	if (ringp->xr_frontend) {
2066 		frp = FRONT_RING(ringp);
2067 		return (GET_RING_SIZE(ringp) -
2068 		    (frp->req_prod_pvt - frp->rsp_cons));
2069 	} else {
2070 		brp = BACK_RING(ringp);
2071 		return (GET_RING_SIZE(ringp) -
2072 		    (brp->rsp_prod_pvt - brp->req_cons));
2073 	}
2074 }
2075 
2076 int
2077 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
2078 {
2079 	comif_ring_be_t *brp;
2080 
2081 	ASSERT(!ringp->xr_frontend);
2082 	brp = BACK_RING(ringp);
2083 	return ((brp->req_cons !=
2084 	    ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
2085 	    ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
2086 }
2087 
2088 int
2089 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
2090 {
2091 	comif_ring_fe_t *frp;
2092 
2093 	ASSERT(ringp->xr_frontend);
2094 	frp = FRONT_RING(ringp);
2095 	return (frp->req_prod_pvt !=
2096 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2097 }
2098 
2099 int
2100 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
2101 {
2102 	comif_ring_fe_t *frp;
2103 
2104 	ASSERT(ringp->xr_frontend);
2105 	frp = FRONT_RING(ringp);
2106 	return (frp->rsp_cons !=
2107 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2108 }
2109 
2110 /* NOTE: req_event will be increased as needed */
2111 void *
2112 xvdi_ring_get_request(xendev_ring_t *ringp)
2113 {
2114 	comif_ring_fe_t *frp;
2115 	comif_ring_be_t *brp;
2116 
2117 	if (ringp->xr_frontend) {
2118 		/* for frontend ring */
2119 		frp = FRONT_RING(ringp);
2120 		if (!RING_FULL(frp))
2121 			return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
2122 		else
2123 			return (NULL);
2124 	} else {
2125 		/* for backend ring */
2126 		brp = BACK_RING(ringp);
2127 		/* RING_FINAL_CHECK_FOR_REQUESTS() */
2128 		if (xvdi_ring_has_unconsumed_requests(ringp))
2129 			return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
2130 		else {
2131 			ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
2132 			    brp->req_cons + 1);
2133 			membar_enter();
2134 			if (xvdi_ring_has_unconsumed_requests(ringp))
2135 				return (GET_RING_ENTRY_BE(ringp,
2136 				    brp->req_cons++));
2137 			else
2138 				return (NULL);
2139 		}
2140 	}
2141 }
2142 
2143 int
2144 xvdi_ring_push_request(xendev_ring_t *ringp)
2145 {
2146 	RING_IDX old, new, reqevt;
2147 	comif_ring_fe_t *frp;
2148 
2149 	/* only frontend should be able to push request */
2150 	ASSERT(ringp->xr_frontend);
2151 
2152 	/* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
2153 	frp = FRONT_RING(ringp);
2154 	old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
2155 	new = frp->req_prod_pvt;
2156 	ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
2157 	membar_enter();
2158 	reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
2159 	return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
2160 }
2161 
2162 /* NOTE: rsp_event will be increased as needed */
2163 void *
2164 xvdi_ring_get_response(xendev_ring_t *ringp)
2165 {
2166 	comif_ring_fe_t *frp;
2167 	comif_ring_be_t *brp;
2168 
2169 	if (!ringp->xr_frontend) {
2170 		/* for backend ring */
2171 		brp = BACK_RING(ringp);
2172 		return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
2173 	} else {
2174 		/* for frontend ring */
2175 		frp = FRONT_RING(ringp);
2176 		/* RING_FINAL_CHECK_FOR_RESPONSES() */
2177 		if (xvdi_ring_has_unconsumed_responses(ringp))
2178 			return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
2179 		else {
2180 			ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
2181 			    frp->rsp_cons + 1);
2182 			membar_enter();
2183 			if (xvdi_ring_has_unconsumed_responses(ringp))
2184 				return (GET_RING_ENTRY_FE(ringp,
2185 				    frp->rsp_cons++));
2186 			else
2187 				return (NULL);
2188 		}
2189 	}
2190 }
2191 
2192 int
2193 xvdi_ring_push_response(xendev_ring_t *ringp)
2194 {
2195 	RING_IDX old, new, rspevt;
2196 	comif_ring_be_t *brp;
2197 
2198 	/* only backend should be able to push response */
2199 	ASSERT(!ringp->xr_frontend);
2200 
2201 	/* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
2202 	brp = BACK_RING(ringp);
2203 	old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
2204 	new = brp->rsp_prod_pvt;
2205 	ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
2206 	membar_enter();
2207 	rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
2208 	return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
2209 }
2210 
2211 static void
2212 xvdi_ring_init_sring(xendev_ring_t *ringp)
2213 {
2214 	ddi_acc_handle_t acchdl;
2215 	comif_sring_t *xsrp;
2216 	int i;
2217 
2218 	xsrp = (comif_sring_t *)ringp->xr_vaddr;
2219 	acchdl = ringp->xr_acc_hdl;
2220 
2221 	/* shared ring initialization */
2222 	ddi_put32(acchdl, &xsrp->req_prod, 0);
2223 	ddi_put32(acchdl, &xsrp->rsp_prod, 0);
2224 	ddi_put32(acchdl, &xsrp->req_event, 1);
2225 	ddi_put32(acchdl, &xsrp->rsp_event, 1);
2226 	for (i = 0; i < sizeof (xsrp->pad); i++)
2227 		ddi_put8(acchdl, xsrp->pad + i, 0);
2228 }
2229 
2230 static void
2231 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2232 {
2233 	comif_ring_fe_t *xfrp;
2234 
2235 	xfrp = &ringp->xr_sring.fr;
2236 	xfrp->req_prod_pvt = 0;
2237 	xfrp->rsp_cons = 0;
2238 	xfrp->nr_ents = nentry;
2239 	xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2240 
2241 	ringp->xr_frontend = 1;
2242 	ringp->xr_entry_size = entrysize;
2243 }
2244 
2245 #ifndef XPV_HVM_DRIVER
2246 static void
2247 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2248 {
2249 	comif_ring_be_t *xbrp;
2250 
2251 	xbrp = &ringp->xr_sring.br;
2252 	xbrp->rsp_prod_pvt = 0;
2253 	xbrp->req_cons = 0;
2254 	xbrp->nr_ents = nentry;
2255 	xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2256 
2257 	ringp->xr_frontend = 0;
2258 	ringp->xr_entry_size = entrysize;
2259 }
2260 #endif /* XPV_HVM_DRIVER */
2261 
2262 static void
2263 xendev_offline_device(void *arg)
2264 {
2265 	dev_info_t *dip = (dev_info_t *)arg;
2266 	char devname[MAXNAMELEN] = {0};
2267 
2268 	/*
2269 	 * This is currently the only chance to delete a devinfo node, which
2270 	 * is _not_ always successful.
2271 	 */
2272 	(void) ddi_deviname(dip, devname);
2273 	(void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
2274 	(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
2275 }
2276 
2277 static void
2278 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
2279 {
2280 	dev_info_t *dip = (dev_info_t *)dev->data;
2281 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2282 	i_oestate_evt_t *evt = NULL;
2283 	boolean_t call_handler;
2284 
2285 	XVDI_DPRINTF(XVDI_DBG_STATE,
2286 	    "i_xvdi_oestate_cb: %s@%s sees oestate change to %d\n",
2287 	    ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
2288 	    ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
2289 	    oestate);
2290 
2291 	/* only call the handler if our state has changed */
2292 	call_handler = B_FALSE;
2293 	mutex_enter(&pdp->xd_ndi_lk);
2294 	if (dev->otherend_state != oestate) {
2295 		dev->otherend_state = oestate;
2296 		call_handler = B_TRUE;
2297 	}
2298 	mutex_exit(&pdp->xd_ndi_lk);
2299 
2300 	if (call_handler) {
2301 		/*
2302 		 * Try to deliver the oestate change event to the dip
2303 		 */
2304 		evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
2305 		evt->dip = dip;
2306 		evt->state = oestate;
2307 		(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2308 		    i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
2309 	}
2310 }
2311 
2312 /*ARGSUSED*/
2313 static void
2314 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2315     unsigned int len)
2316 {
2317 	dev_info_t *dip = (dev_info_t *)w->dev;
2318 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2319 
2320 #ifdef DEBUG
2321 	char *hp_status = NULL;
2322 	unsigned int hpl = 0;
2323 
2324 	(void) xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
2325 	    (void *)&hp_status, &hpl);
2326 	XVDI_DPRINTF(XVDI_DBG_STATE,
2327 	    "i_xvdi_hpstate_cb: %s@%s sees hpstate change to %s\n",
2328 	    ddi_binding_name(dip) == NULL ?  "null" : ddi_binding_name(dip),
2329 	    ddi_get_name_addr(dip) == NULL ?  "null" : ddi_get_name_addr(dip),
2330 	    hp_status == NULL ? "null" : hp_status);
2331 	if (hp_status != NULL)
2332 		kmem_free(hp_status, hpl);
2333 #endif /* DEBUG */
2334 
2335 	(void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2336 	    i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2337 }
2338 
2339 static void
2340 i_xvdi_probe_path_handler(void *arg)
2341 {
2342 	dev_info_t *parent;
2343 	char *path = arg, *p = NULL;
2344 	int i, vdev, circ;
2345 	i_xd_cfg_t *xdcp;
2346 	boolean_t frontend;
2347 	domid_t dom;
2348 
2349 	for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2350 
2351 		if ((xdcp->xs_path_fe != NULL) &&
2352 		    (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2353 		    == 0)) {
2354 
2355 			frontend = B_TRUE;
2356 			p = path + strlen(xdcp->xs_path_fe);
2357 			break;
2358 		}
2359 
2360 		if ((xdcp->xs_path_be != NULL) &&
2361 		    (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2362 		    == 0)) {
2363 
2364 			frontend = B_FALSE;
2365 			p = path + strlen(xdcp->xs_path_be);
2366 			break;
2367 		}
2368 
2369 	}
2370 
2371 	if (p == NULL) {
2372 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2373 		    "unexpected path prefix in %s", path);
2374 		goto done;
2375 	}
2376 
2377 	if (frontend) {
2378 		dom = DOMID_SELF;
2379 		if (sscanf(p, "/%d/", &vdev) != 1) {
2380 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2381 			    "i_xvdi_probe_path_handler: "
2382 			    "cannot parse frontend path %s",
2383 			    path);
2384 			goto done;
2385 		}
2386 	} else {
2387 		if (sscanf(p, "/%hu/%d/", &dom, &vdev) != 2) {
2388 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2389 			    "i_xvdi_probe_path_handler: "
2390 			    "cannot parse backend path %s",
2391 			    path);
2392 			goto done;
2393 		}
2394 	}
2395 
2396 	/*
2397 	 * This is an oxymoron, so indicates a bogus configuration we
2398 	 * must check for.
2399 	 */
2400 	if (vdev == VDEV_NOXS) {
2401 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2402 		    "invalid path %s", path);
2403 		goto done;
2404 	}
2405 
2406 	parent = xendev_dip;
2407 	ASSERT(parent != NULL);
2408 
2409 	ndi_devi_enter(parent, &circ);
2410 
2411 	if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2412 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2413 		    "i_xvdi_probe_path_handler: create for %s", path);
2414 		(void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2415 	} else {
2416 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2417 		    "i_xvdi_probe_path_handler: %s already exists", path);
2418 	}
2419 
2420 	ndi_devi_exit(parent, circ);
2421 
2422 done:
2423 	kmem_free(path, strlen(path) + 1);
2424 }
2425