xref: /illumos-gate/usr/src/uts/common/xen/os/xvdi.c (revision 843e1988)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Xen virtual device driver interfaces
31  */
32 
33 /*
34  * todo:
35  * + name space clean up:
36  *	xvdi_* - public xen interfaces, for use by all leaf drivers
37  *	xd_* - public xen data structures
38  *	i_xvdi_* - implementation private functions
39  *	xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
40  * + add mdb dcmds to dump ring status
41  * + implement xvdi_xxx to wrap xenbus_xxx read/write function
42  * + convert (xendev_ring_t *) into xvdi_ring_handle_t
43  */
44 #include <sys/conf.h>
45 #include <sys/param.h>
46 #include <sys/hypervisor.h>
47 #include <sys/xen_mmu.h>
48 #include <sys/kmem.h>
49 #include <vm/seg_kmem.h>
50 #include <sys/debug.h>
51 #include <sys/modctl.h>
52 #include <sys/autoconf.h>
53 #include <sys/ddi_impldefs.h>
54 #include <sys/ddi_subrdefs.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57 #include <sys/sunndi.h>
58 #include <sys/sunldi.h>
59 #include <sys/fs/dv_node.h>
60 #include <sys/evtchn_impl.h>
61 #include <sys/gnttab.h>
62 #include <sys/avintr.h>
63 #include <sys/psm.h>
64 #include <sys/spl.h>
65 #include <sys/promif.h>
66 #include <sys/list.h>
67 #include <sys/bootconf.h>
68 #include <sys/bootsvcs.h>
69 #include <sys/bootinfo.h>
70 #include <sys/note.h>
71 #include <sys/xen_mmu.h>
72 #include <xen/sys/xenbus_impl.h>
73 #include <xen/sys/xendev.h>
74 #include <vm/hat_i86.h>
75 #include <sys/scsi/generic/inquiry.h>
76 #include <util/sscanf.h>
77 #include <xen/public/io/xs_wire.h>
78 
79 
80 static void xvdi_ring_init_sring(xendev_ring_t *);
81 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
82 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
83 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
84 
85 static int i_xvdi_add_watches(dev_info_t *);
86 static void i_xvdi_rem_watches(dev_info_t *);
87 
88 static int i_xvdi_add_watch_oestate(dev_info_t *);
89 static void i_xvdi_rem_watch_oestate(dev_info_t *);
90 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
91 static void i_xvdi_oestate_handler(void *);
92 
93 static int i_xvdi_add_watch_hpstate(dev_info_t *);
94 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
95 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
96     unsigned int);
97 static void i_xvdi_hpstate_handler(void *);
98 
99 static int i_xvdi_add_watch_bepath(dev_info_t *);
100 static void i_xvdi_rem_watch_bepath(dev_info_t *);
101 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
102     unsigned in);
103 
104 static void xendev_offline_device(void *);
105 
106 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
107     unsigned int);
108 static void i_xvdi_probe_path_handler(void *);
109 
110 typedef struct xd_cfg {
111 	xendev_devclass_t devclass;
112 	char *xsdev;
113 	char *xs_path_fe;
114 	char *xs_path_be;
115 	char *node_fe;
116 	char *node_be;
117 	char *device_type;
118 	int xd_ipl;
119 	int flags;
120 } i_xd_cfg_t;
121 
122 #define	XD_DOM_ZERO	0x01	/* dom0 only. */
123 #define	XD_DOM_GUEST	0x02	/* Guest domains (i.e. non-dom0). */
124 #define	XD_DOM_IO	0x04	/* IO domains. */
125 
126 #define	XD_DOM_ALL	(XD_DOM_ZERO | XD_DOM_GUEST)
127 
128 static i_xd_cfg_t xdci[] = {
129 	{ XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
130 	    "console", IPL_CONS, XD_DOM_ALL, },
131 
132 	{ XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
133 	    "network", IPL_VIF, XD_DOM_ALL, },
134 
135 	{ XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
136 	    "block", IPL_VBD, XD_DOM_ALL, },
137 
138 	{ XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
139 	    NULL, 0, XD_DOM_ALL, },
140 
141 	{ XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
142 	    NULL, 0, XD_DOM_ALL, },
143 
144 	{ XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
145 	    NULL, 0, XD_DOM_ALL, },
146 
147 	{ XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
148 	    NULL, 0, XD_DOM_ZERO, },
149 
150 	{ XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
151 	    NULL, 0, XD_DOM_ZERO, },
152 };
153 #define	NXDC	(sizeof (xdci) / sizeof (xdci[0]))
154 
155 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
156 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
157 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
158 
159 /*
160  * Xen device channel device access and DMA attributes
161  */
162 static ddi_device_acc_attr_t xendev_dc_accattr = {
163 	DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
164 };
165 
166 static ddi_dma_attr_t xendev_dc_dmaattr = {
167 	DMA_ATTR_V0,		/* version of this structure */
168 	0,			/* lowest usable address */
169 	0xffffffffffffffffULL,	/* highest usable address */
170 	0x7fffffff,		/* maximum DMAable byte count */
171 	MMU_PAGESIZE,		/* alignment in bytes */
172 	0x7ff,			/* bitmap of burst sizes */
173 	1,			/* minimum transfer */
174 	0xffffffffU,		/* maximum transfer */
175 	0xffffffffffffffffULL,	/* maximum segment length */
176 	1,			/* maximum number of segments */
177 	1,			/* granularity */
178 	0,			/* flags (reserved) */
179 };
180 
181 static dev_info_t *xendev_dip = NULL;
182 
183 #define	XVDI_DBG_STATE	0x01
184 #define	XVDI_DBG_PROBE	0x02
185 
186 #ifdef DEBUG
187 static int i_xvdi_debug = 0;
188 
189 #define	XVDI_DPRINTF(flag, format, ...)			\
190 {							\
191 	if (i_xvdi_debug & (flag))			\
192 		prom_printf((format), __VA_ARGS__);	\
193 }
194 #else
195 #define	XVDI_DPRINTF(flag, format, ...)
196 #endif /* DEBUG */
197 
198 static i_xd_cfg_t *
199 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
200 {
201 	i_xd_cfg_t *xdcp;
202 	int i;
203 
204 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
205 		if (xdcp->devclass == devclass)
206 			return (xdcp);
207 
208 	return (NULL);
209 }
210 
211 int
212 xvdi_init_dev(dev_info_t *dip)
213 {
214 	xendev_devclass_t devcls;
215 	int vdevnum;
216 	domid_t domid;
217 	struct xendev_ppd *pdp;
218 	i_xd_cfg_t *xdcp;
219 	boolean_t backend;
220 	char xsnamebuf[TYPICALMAXPATHLEN];
221 	char *xsname;
222 
223 	devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
224 	    DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
225 	vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
226 	    DDI_PROP_DONTPASS, "vdev", -1);
227 	domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
228 	    DDI_PROP_DONTPASS, "domain", DOMID_SELF);
229 
230 	backend = (domid != DOMID_SELF);
231 	xdcp = i_xvdi_devclass2cfg(devcls);
232 	if (xdcp->device_type != NULL)
233 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
234 		    "device_type", xdcp->device_type);
235 
236 	pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
237 	pdp->xd_domain = domid;
238 	pdp->xd_vdevnum = vdevnum;
239 	pdp->xd_devclass = devcls;
240 	pdp->xd_evtchn = INVALID_EVTCHN;
241 	mutex_init(&pdp->xd_lk, NULL, MUTEX_DRIVER, NULL);
242 	ddi_set_parent_data(dip, pdp);
243 
244 	/*
245 	 * devices that do not need to interact with xenstore
246 	 */
247 	if (vdevnum == -1) {
248 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
249 		    "unit-address", "0");
250 		if (devcls == XEN_CONSOLE)
251 			(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
252 			    "pm-hardware-state", "needs-suspend-resume");
253 		return (DDI_SUCCESS);
254 	}
255 
256 	/*
257 	 * PV devices that need to probe xenstore
258 	 */
259 
260 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
261 	    "pm-hardware-state", "needs-suspend-resume");
262 
263 	xsname = xsnamebuf;
264 	if (!backend)
265 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
266 		    "%s/%d", xdcp->xs_path_fe, vdevnum);
267 	else
268 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
269 		    "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
270 
271 	pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
272 	pdp->xd_xsdev.devicetype = xdcp->xsdev;
273 	pdp->xd_xsdev.frontend = (backend ? 0 : 1);
274 	pdp->xd_xsdev.data = dip;
275 	pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
276 	if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
277 		cmn_err(CE_WARN, "xvdi_init_dev: "
278 		    "cannot add watches for %s", xsname);
279 		xvdi_uninit_dev(dip);
280 		return (DDI_FAILURE);
281 	}
282 
283 	/*
284 	 * frontend device will use "unit-addr" as
285 	 * the bus address, which will be set here
286 	 */
287 	if (!backend) {
288 		void *prop_str;
289 		unsigned int prop_len, addr;
290 
291 		switch (devcls) {
292 		case XEN_VNET:
293 			if (xenbus_read(XBT_NULL, xsname, "mac", &prop_str,
294 			    &prop_len) == 0) {
295 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
296 				    dip, "mac", prop_str);
297 				kmem_free(prop_str, prop_len);
298 			}
299 			prop_str = NULL;
300 			if (xenbus_scanf(XBT_NULL, xsname, "handle", "%u",
301 			    &addr) == 0) {
302 				char unitaddr[9]; /* hold 32-bit hex */
303 
304 				(void) snprintf(unitaddr, 9, "%x", addr);
305 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
306 				    dip, "unit-address", unitaddr);
307 			}
308 			break;
309 		case XEN_VBLK:
310 			if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend,
311 			    "dev", &prop_str, &prop_len) == 0) {
312 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
313 				    dip, "unit-address", prop_str);
314 				kmem_free(prop_str, prop_len);
315 			}
316 			break;
317 		default:
318 			break;
319 		}
320 	}
321 
322 	return (DDI_SUCCESS);
323 }
324 
325 void
326 xvdi_uninit_dev(dev_info_t *dip)
327 {
328 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
329 
330 	if (pdp != NULL) {
331 		/* Remove any registered callbacks. */
332 		xvdi_remove_event_handler(dip, NULL);
333 
334 		/* Remove any registered watches. */
335 		i_xvdi_rem_watches(dip);
336 
337 		if (pdp->xd_xsdev.nodename != NULL)
338 			kmem_free((char *)(pdp->xd_xsdev.nodename),
339 			    strlen(pdp->xd_xsdev.nodename) + 1);
340 
341 		ddi_set_parent_data(dip, NULL);
342 
343 		mutex_destroy(&pdp->xd_lk);
344 		kmem_free(pdp, sizeof (*pdp));
345 	}
346 }
347 
348 /*
349  * Bind the event channel for this device instance.
350  * Currently we only support one evtchn per device instance.
351  */
352 int
353 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
354 {
355 	struct xendev_ppd *pdp;
356 	domid_t oeid;
357 	int r;
358 
359 	pdp = ddi_get_parent_data(dip);
360 	ASSERT(pdp != NULL);
361 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
362 
363 	mutex_enter(&pdp->xd_lk);
364 	if (pdp->xd_devclass == XEN_CONSOLE) {
365 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
366 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
367 		} else {
368 			pdp->xd_evtchn = INVALID_EVTCHN;
369 			mutex_exit(&pdp->xd_lk);
370 			return (DDI_SUCCESS);
371 		}
372 	} else {
373 		oeid = pdp->xd_xsdev.otherend_id;
374 		if (oeid == (domid_t)-1) {
375 			mutex_exit(&pdp->xd_lk);
376 			return (DDI_FAILURE);
377 		}
378 
379 		if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
380 			xvdi_dev_error(dip, r, "bind event channel");
381 			mutex_exit(&pdp->xd_lk);
382 			return (DDI_FAILURE);
383 		}
384 	}
385 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
386 	mutex_exit(&pdp->xd_lk);
387 
388 	return (DDI_SUCCESS);
389 }
390 
391 /*
392  * Allocate an event channel for this device instance.
393  * Currently we only support one evtchn per device instance.
394  */
395 int
396 xvdi_alloc_evtchn(dev_info_t *dip)
397 {
398 	struct xendev_ppd *pdp;
399 	domid_t oeid;
400 	int rv;
401 
402 	pdp = ddi_get_parent_data(dip);
403 	ASSERT(pdp != NULL);
404 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
405 
406 	mutex_enter(&pdp->xd_lk);
407 	if (pdp->xd_devclass == XEN_CONSOLE) {
408 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
409 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
410 		} else {
411 			pdp->xd_evtchn = INVALID_EVTCHN;
412 			mutex_exit(&pdp->xd_lk);
413 			return (DDI_SUCCESS);
414 		}
415 	} else {
416 		oeid = pdp->xd_xsdev.otherend_id;
417 		if (oeid == (domid_t)-1) {
418 			mutex_exit(&pdp->xd_lk);
419 			return (DDI_FAILURE);
420 		}
421 
422 		if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
423 			xvdi_dev_error(dip, rv, "bind event channel");
424 			mutex_exit(&pdp->xd_lk);
425 			return (DDI_FAILURE);
426 		}
427 	}
428 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
429 	mutex_exit(&pdp->xd_lk);
430 
431 	return (DDI_SUCCESS);
432 }
433 
434 /*
435  * Unbind the event channel for this device instance.
436  * Currently we only support one evtchn per device instance.
437  */
438 void
439 xvdi_free_evtchn(dev_info_t *dip)
440 {
441 	struct xendev_ppd *pdp;
442 
443 	pdp = ddi_get_parent_data(dip);
444 	ASSERT(pdp != NULL);
445 
446 	mutex_enter(&pdp->xd_lk);
447 	if (pdp->xd_evtchn != INVALID_EVTCHN) {
448 		ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
449 		pdp->xd_evtchn = INVALID_EVTCHN;
450 		pdp->xd_ispec.intrspec_vec = 0;
451 	}
452 	mutex_exit(&pdp->xd_lk);
453 }
454 
455 /*
456  * Map an inter-domain communication ring for a virtual device.
457  * This is used by backend drivers.
458  */
459 int
460 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
461     grant_ref_t gref, xendev_ring_t **ringpp)
462 {
463 	domid_t oeid;
464 	gnttab_map_grant_ref_t mapop;
465 	gnttab_unmap_grant_ref_t unmapop;
466 	caddr_t ringva;
467 	ddi_acc_hdl_t *ap;
468 	ddi_acc_impl_t *iap;
469 	xendev_ring_t *ring;
470 	int err;
471 	char errstr[] = "mapping in ring buffer";
472 
473 	ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
474 	oeid = xvdi_get_oeid(dip);
475 
476 	/* alloc va in backend dom for ring buffer */
477 	ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
478 	    0, 0, 0, 0, VM_SLEEP);
479 
480 	/* map in ring page */
481 	hat_prepare_mapping(kas.a_hat, ringva);
482 	mapop.host_addr = (uint64_t)(uintptr_t)ringva;
483 	mapop.flags = GNTMAP_host_map;
484 	mapop.ref = gref;
485 	mapop.dom = oeid;
486 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &mapop, 1);
487 	if (err) {
488 		xvdi_fatal_error(dip, err, errstr);
489 		goto errout1;
490 	}
491 
492 	if (mapop.status != 0) {
493 		xvdi_fatal_error(dip, err, errstr);
494 		goto errout2;
495 	}
496 	ring->xr_vaddr = ringva;
497 	ring->xr_grant_hdl = mapop.handle;
498 	ring->xr_gref = gref;
499 
500 	/*
501 	 * init an acc handle and associate it w/ this ring
502 	 * this is only for backend drivers. we get the memory by calling
503 	 * vmem_xalloc(), instead of calling any ddi function, so we have
504 	 * to init an acc handle by ourselves
505 	 */
506 	ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
507 	ap = impl_acc_hdl_get(ring->xr_acc_hdl);
508 	ap->ah_vers = VERS_ACCHDL;
509 	ap->ah_dip = dip;
510 	ap->ah_xfermodes = DDI_DMA_CONSISTENT;
511 	ap->ah_acc = xendev_dc_accattr;
512 	iap = (ddi_acc_impl_t *)ap->ah_platform_private;
513 	iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
514 	impl_acc_hdl_init(ap);
515 	ap->ah_offset = 0;
516 	ap->ah_len = (off_t)PAGESIZE;
517 	ap->ah_addr = ring->xr_vaddr;
518 
519 	/* init backend ring */
520 	xvdi_ring_init_back_ring(ring, nentry, entrysize);
521 
522 	*ringpp = ring;
523 
524 	return (DDI_SUCCESS);
525 
526 errout2:
527 	/* unmap ring page */
528 	unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
529 	unmapop.handle = ring->xr_grant_hdl;
530 	unmapop.dev_bus_addr = NULL;
531 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
532 	hat_release_mapping(kas.a_hat, ringva);
533 errout1:
534 	vmem_xfree(heap_arena, ringva, PAGESIZE);
535 	kmem_free(ring, sizeof (xendev_ring_t));
536 	return (DDI_FAILURE);
537 }
538 
539 /*
540  * Unmap a ring for a virtual device.
541  * This is used by backend drivers.
542  */
543 void
544 xvdi_unmap_ring(xendev_ring_t *ring)
545 {
546 	gnttab_unmap_grant_ref_t unmapop;
547 
548 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
549 
550 	impl_acc_hdl_free(ring->xr_acc_hdl);
551 	unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
552 	unmapop.handle = ring->xr_grant_hdl;
553 	unmapop.dev_bus_addr = NULL;
554 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
555 	hat_release_mapping(kas.a_hat, ring->xr_vaddr);
556 	vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
557 	kmem_free(ring, sizeof (xendev_ring_t));
558 }
559 
560 /*
561  * Re-initialise an inter-domain communications ring for the backend domain.
562  * ring will be re-initialized after re-grant succeed
563  * ring will be freed if fails to re-grant access to backend domain
564  * so, don't keep useful data in the ring
565  * used only in frontend driver
566  */
567 static void
568 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
569 {
570 	paddr_t rpaddr;
571 	maddr_t rmaddr;
572 
573 	ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
574 	rpaddr = ringp->xr_paddr;
575 
576 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
577 	gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
578 	    rmaddr >> PAGESHIFT, 0);
579 	*gref = ringp->xr_gref;
580 
581 	/* init frontend ring */
582 	xvdi_ring_init_sring(ringp);
583 	xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
584 	    ringp->xr_entry_size);
585 }
586 
587 /*
588  * allocate Xen inter-domain communications ring for Xen virtual devices
589  * used only in frontend driver
590  * if *ringpp is not NULL, we'll simply re-init it
591  */
592 int
593 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
594     grant_ref_t *gref, xendev_ring_t **ringpp)
595 {
596 	size_t len;
597 	xendev_ring_t *ring;
598 	ddi_dma_cookie_t dma_cookie;
599 	uint_t ncookies;
600 	grant_ref_t ring_gref;
601 	domid_t oeid;
602 	maddr_t rmaddr;
603 
604 	if (*ringpp) {
605 		xvdi_reinit_ring(dip, gref, *ringpp);
606 		return (DDI_SUCCESS);
607 	}
608 
609 	*ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
610 	oeid = xvdi_get_oeid(dip);
611 
612 	/*
613 	 * Allocate page for this ring buffer
614 	 */
615 	if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
616 	    0, &ring->xr_dma_hdl) != DDI_SUCCESS)
617 		goto err;
618 
619 	if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
620 	    &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
621 	    &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
622 		ddi_dma_free_handle(&ring->xr_dma_hdl);
623 		goto err;
624 	}
625 
626 	if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
627 	    ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
628 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
629 		ddi_dma_mem_free(&ring->xr_acc_hdl);
630 		ring->xr_vaddr = NULL;
631 		ddi_dma_free_handle(&ring->xr_dma_hdl);
632 		goto err;
633 	}
634 	ASSERT(ncookies == 1);
635 	ring->xr_paddr = dma_cookie.dmac_laddress;
636 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
637 	    pa_to_ma(ring->xr_paddr);
638 
639 	if ((ring_gref = gnttab_grant_foreign_access(oeid,
640 	    rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
641 		(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
642 		ddi_dma_mem_free(&ring->xr_acc_hdl);
643 		ring->xr_vaddr = NULL;
644 		ddi_dma_free_handle(&ring->xr_dma_hdl);
645 		goto err;
646 	}
647 	*gref = ring->xr_gref = ring_gref;
648 
649 	/* init frontend ring */
650 	xvdi_ring_init_sring(ring);
651 	xvdi_ring_init_front_ring(ring, nentry, entrysize);
652 
653 	return (DDI_SUCCESS);
654 
655 err:
656 	kmem_free(ring, sizeof (xendev_ring_t));
657 	return (DDI_FAILURE);
658 }
659 
660 /*
661  * Release ring buffers allocated for Xen devices
662  * used for frontend driver
663  */
664 void
665 xvdi_free_ring(xendev_ring_t *ring)
666 {
667 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
668 
669 	(void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
670 	(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
671 	ddi_dma_mem_free(&ring->xr_acc_hdl);
672 	ddi_dma_free_handle(&ring->xr_dma_hdl);
673 	kmem_free(ring, sizeof (xendev_ring_t));
674 }
675 
676 dev_info_t *
677 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
678     domid_t dom, int vdev)
679 {
680 	dev_info_t *dip;
681 	boolean_t backend;
682 	i_xd_cfg_t *xdcp;
683 	char xsnamebuf[TYPICALMAXPATHLEN];
684 	char *type, *node = NULL, *xsname = NULL;
685 	unsigned int tlen;
686 
687 	ASSERT(DEVI_BUSY_OWNED(parent));
688 
689 	backend = (dom != DOMID_SELF);
690 	xdcp = i_xvdi_devclass2cfg(devclass);
691 	ASSERT(xdcp != NULL);
692 
693 	if (vdev != -1) {
694 		if (!backend) {
695 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
696 			    "%s/%d", xdcp->xs_path_fe, vdev);
697 			xsname = xsnamebuf;
698 			node = xdcp->node_fe;
699 		} else {
700 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
701 			    "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
702 			xsname = xsnamebuf;
703 			node = xdcp->node_be;
704 		}
705 	} else {
706 		node = xdcp->node_fe;
707 	}
708 
709 	/* Must have a driver to use. */
710 	if (node == NULL)
711 		return (NULL);
712 
713 	/*
714 	 * We need to check the state of this device before we go
715 	 * further, otherwise we'll end up with a dead loop if
716 	 * anything goes wrong.
717 	 */
718 	if ((xsname != NULL) &&
719 	    (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
720 		return (NULL);
721 
722 	ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
723 
724 	/*
725 	 * Driver binding uses the compatible property _before_ the
726 	 * node name, so we set the node name to the 'model' of the
727 	 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
728 	 * encode both the model and the type in a compatible property
729 	 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac').  This allows a
730 	 * driver binding based on the <model,type> pair _before_ a
731 	 * binding based on the node name.
732 	 */
733 	if ((xsname != NULL) &&
734 	    (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
735 	    == 0)) {
736 		size_t clen;
737 		char *c[1];
738 
739 		clen = strlen(node) + strlen(type) + 2;
740 		c[0] = kmem_alloc(clen, KM_SLEEP);
741 		(void) snprintf(c[0], clen, "%s,%s", node, type);
742 
743 		(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
744 		    dip, "compatible", (char **)c, 1);
745 
746 		kmem_free(c[0], clen);
747 		kmem_free(type, tlen);
748 	}
749 
750 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
751 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
752 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
753 
754 	if (i_ddi_devi_attached(parent))
755 		/*
756 		 * Cleanup happens in xendev_removechild when the
757 		 * other end closes or a driver fails to attach.
758 		 */
759 		(void) ndi_devi_online(dip, 0);
760 	else
761 		(void) ndi_devi_bind_driver(dip, 0);
762 
763 	return (dip);
764 }
765 
766 /*
767  * xendev_enum_class()
768  */
769 void
770 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
771 {
772 	i_xd_cfg_t *xdcp;
773 
774 	xdcp = i_xvdi_devclass2cfg(devclass);
775 	ASSERT(xdcp != NULL);
776 
777 	if (xdcp->xsdev == NULL) {
778 		int circ;
779 
780 		/*
781 		 * Don't need to probe this kind of device from the
782 		 * store, just create one if it doesn't exist.
783 		 */
784 
785 		ndi_devi_enter(parent, &circ);
786 		if (xvdi_find_dev(parent, devclass, DOMID_SELF, -1)
787 		    == NULL)
788 			(void) xvdi_create_dev(parent, devclass,
789 			    DOMID_SELF, -1);
790 		ndi_devi_exit(parent, circ);
791 	} else {
792 		/*
793 		 * Probe this kind of device from the store, both
794 		 * frontend and backend.
795 		 */
796 
797 		i_xvdi_enum_fe(parent, xdcp);
798 		i_xvdi_enum_be(parent, xdcp);
799 	}
800 }
801 
802 /*
803  * xendev_enum_all()
804  */
805 void
806 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
807 {
808 	int i;
809 	i_xd_cfg_t *xdcp;
810 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
811 	boolean_t domU = !dom0;
812 
813 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
814 
815 		if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
816 			continue;
817 
818 		if (domU && !(xdcp->flags & XD_DOM_GUEST))
819 			continue;
820 
821 		/*
822 		 * Dom0 relies on watchpoints to create non-soft
823 		 * devices - don't attempt to iterate over the store.
824 		 */
825 		if (dom0 && (xdcp->xsdev != NULL))
826 			continue;
827 
828 		/*
829 		 * If the store is not yet available, don't attempt to
830 		 * iterate.
831 		 */
832 		if (store_unavailable && (xdcp->xsdev != NULL))
833 			continue;
834 
835 		xendev_enum_class(parent, xdcp->devclass);
836 	}
837 }
838 
839 xendev_devclass_t
840 xendev_nodename_to_devclass(char *nodename)
841 {
842 	int i;
843 	i_xd_cfg_t *xdcp;
844 
845 	/*
846 	 * This relies on the convention that variants of a base
847 	 * driver share the same prefix and that there are no drivers
848 	 * which share a common prefix with the name of any other base
849 	 * drivers.
850 	 *
851 	 * So for a base driver 'xnb' (which is the name listed in
852 	 * xdci) the variants all begin with the string 'xnb' (in fact
853 	 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
854 	 * base drivers which have the prefix 'xnb'.
855 	 */
856 	ASSERT(nodename != NULL);
857 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
858 		if (((xdcp->node_fe != NULL) &&
859 		    (strncmp(nodename, xdcp->node_fe,
860 		    strlen(xdcp->node_fe)) == 0)) ||
861 		    ((xdcp->node_be != NULL) &&
862 		    (strncmp(nodename, xdcp->node_be,
863 		    strlen(xdcp->node_be)) == 0)))
864 
865 			return (xdcp->devclass);
866 	}
867 	return (XEN_INVAL);
868 }
869 
870 int
871 xendev_devclass_ipl(xendev_devclass_t devclass)
872 {
873 	i_xd_cfg_t *xdcp;
874 
875 	xdcp = i_xvdi_devclass2cfg(devclass);
876 	ASSERT(xdcp != NULL);
877 
878 	return (xdcp->xd_ipl);
879 }
880 
881 /*
882  * Determine if a devinfo instance exists of a particular device
883  * class, domain and xenstore virtual device number.
884  */
885 dev_info_t *
886 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
887     domid_t dom, int vdev)
888 {
889 	dev_info_t *dip;
890 
891 	ASSERT(DEVI_BUSY_OWNED(parent));
892 
893 	switch (devclass) {
894 	case XEN_CONSOLE:
895 	case XEN_XENBUS:
896 	case XEN_DOMCAPS:
897 	case XEN_BALLOON:
898 	case XEN_EVTCHN:
899 	case XEN_PRIVCMD:
900 		/* Console and soft devices have no vdev. */
901 		vdev = -1;
902 		break;
903 	default:
904 		break;
905 	}
906 
907 	for (dip = ddi_get_child(parent); dip != NULL;
908 	    dip = ddi_get_next_sibling(dip)) {
909 		int *vdevnump, *domidp, *devclsp, vdevnum;
910 		uint_t ndomid, nvdevnum, ndevcls;
911 		xendev_devclass_t devcls;
912 		domid_t domid;
913 		struct xendev_ppd *pdp = ddi_get_parent_data(dip);
914 
915 		if (pdp == NULL) {
916 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
917 			    DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
918 			    DDI_PROP_SUCCESS)
919 				continue;
920 			ASSERT(ndomid == 1);
921 			domid = (domid_t)*domidp;
922 			ddi_prop_free(domidp);
923 
924 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
925 			    DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
926 			    DDI_PROP_SUCCESS)
927 				continue;
928 			ASSERT(nvdevnum == 1);
929 			vdevnum = *vdevnump;
930 			ddi_prop_free(vdevnump);
931 
932 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
933 			    DDI_PROP_DONTPASS, "devclass", &devclsp,
934 			    &ndevcls) != DDI_PROP_SUCCESS)
935 				continue;
936 			ASSERT(ndevcls == 1);
937 			devcls = (xendev_devclass_t)*devclsp;
938 			ddi_prop_free(devclsp);
939 		} else {
940 			domid = pdp->xd_domain;
941 			vdevnum = pdp->xd_vdevnum;
942 			devcls = pdp->xd_devclass;
943 		}
944 
945 		if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
946 			return (dip);
947 	}
948 	return (NULL);
949 }
950 
951 int
952 xvdi_get_evtchn(dev_info_t *xdip)
953 {
954 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
955 
956 	ASSERT(pdp != NULL);
957 	return (pdp->xd_evtchn);
958 }
959 
960 int
961 xvdi_get_vdevnum(dev_info_t *xdip)
962 {
963 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
964 
965 	ASSERT(pdp != NULL);
966 	return (pdp->xd_vdevnum);
967 }
968 
969 char *
970 xvdi_get_xsname(dev_info_t *xdip)
971 {
972 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
973 
974 	ASSERT(pdp != NULL);
975 	return ((char *)(pdp->xd_xsdev.nodename));
976 }
977 
978 char *
979 xvdi_get_oename(dev_info_t *xdip)
980 {
981 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
982 
983 	ASSERT(pdp != NULL);
984 	if (pdp->xd_devclass == XEN_CONSOLE)
985 		return (NULL);
986 	return ((char *)(pdp->xd_xsdev.otherend));
987 }
988 
989 struct xenbus_device *
990 xvdi_get_xsd(dev_info_t *xdip)
991 {
992 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
993 
994 	ASSERT(pdp != NULL);
995 	return (&pdp->xd_xsdev);
996 }
997 
998 domid_t
999 xvdi_get_oeid(dev_info_t *xdip)
1000 {
1001 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1002 
1003 	ASSERT(pdp != NULL);
1004 	if (pdp->xd_devclass == XEN_CONSOLE)
1005 		return ((domid_t)-1);
1006 	return ((domid_t)(pdp->xd_xsdev.otherend_id));
1007 }
1008 
1009 void
1010 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1011 {
1012 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1013 
1014 	ASSERT(pdp != NULL);
1015 	xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1016 }
1017 
1018 void
1019 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1020 {
1021 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1022 
1023 	ASSERT(pdp != NULL);
1024 	xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1025 }
1026 
1027 static void
1028 i_xvdi_oestate_handler(void *arg)
1029 {
1030 	dev_info_t *dip = arg;
1031 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1032 	XenbusState oestate = pdp->xd_xsdev.otherend_state;
1033 	ddi_eventcookie_t evc;
1034 
1035 	mutex_enter(&pdp->xd_lk);
1036 
1037 	if (pdp->xd_oe_ehid != NULL) {
1038 		/* send notification to driver */
1039 		if (ddi_get_eventcookie(dip, XS_OE_STATE,
1040 		    &evc) == DDI_SUCCESS) {
1041 			mutex_exit(&pdp->xd_lk);
1042 			(void) ndi_post_event(dip, dip, evc, &oestate);
1043 			mutex_enter(&pdp->xd_lk);
1044 		}
1045 	} else {
1046 		/*
1047 		 * take default action, if driver hasn't registered its
1048 		 * event handler yet
1049 		 */
1050 		if (oestate == XenbusStateClosing) {
1051 			(void) xvdi_switch_state(dip, XBT_NULL,
1052 			    XenbusStateClosed);
1053 		} else if (oestate == XenbusStateClosed) {
1054 			(void) xvdi_switch_state(dip, XBT_NULL,
1055 			    XenbusStateClosed);
1056 			(void) xvdi_post_event(dip, XEN_HP_REMOVE);
1057 		}
1058 	}
1059 
1060 	mutex_exit(&pdp->xd_lk);
1061 
1062 	/*
1063 	 * We'll try to remove the devinfo node of this device if the
1064 	 * other end has closed.
1065 	 */
1066 	if (oestate == XenbusStateClosed)
1067 		(void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1068 		    xendev_offline_device, dip, DDI_SLEEP);
1069 }
1070 
1071 static void
1072 i_xvdi_hpstate_handler(void *arg)
1073 {
1074 	dev_info_t *dip = (dev_info_t *)arg;
1075 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1076 	ddi_eventcookie_t evc;
1077 	char *hp_status;
1078 	unsigned int hpl;
1079 
1080 	mutex_enter(&pdp->xd_lk);
1081 	if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1082 	    (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1083 	    (void *)&hp_status, &hpl) == 0)) {
1084 
1085 		xendev_hotplug_state_t new_state = Unrecognized;
1086 
1087 		if (strcmp(hp_status, "connected") == 0)
1088 			new_state = Connected;
1089 
1090 		mutex_exit(&pdp->xd_lk);
1091 
1092 		(void) ndi_post_event(dip, dip, evc, &new_state);
1093 		kmem_free(hp_status, hpl);
1094 		return;
1095 	}
1096 	mutex_exit(&pdp->xd_lk);
1097 }
1098 
1099 void
1100 xvdi_notify_oe(dev_info_t *dip)
1101 {
1102 	struct xendev_ppd *pdp;
1103 
1104 	pdp = ddi_get_parent_data(dip);
1105 	ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1106 	ec_notify_via_evtchn(pdp->xd_evtchn);
1107 }
1108 
1109 static void
1110 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1111 {
1112 	dev_info_t *dip = (dev_info_t *)w->dev;
1113 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1114 	char *be = NULL;
1115 	unsigned int bel;
1116 
1117 	ASSERT(len > XS_WATCH_PATH);
1118 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1119 
1120 	/*
1121 	 * If the backend is not the same as that we already stored,
1122 	 * re-set our watch for its' state.
1123 	 */
1124 	if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1125 	    == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1126 		(void) i_xvdi_add_watch_oestate(dip);
1127 
1128 	if (be != NULL) {
1129 		ASSERT(bel > 0);
1130 		kmem_free(be, bel);
1131 	}
1132 }
1133 
1134 static int
1135 i_xvdi_add_watch_oestate(dev_info_t *dip)
1136 {
1137 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1138 
1139 	ASSERT(pdp != NULL);
1140 	ASSERT(pdp->xd_xsdev.nodename != NULL);
1141 	ASSERT(mutex_owned(&pdp->xd_lk));
1142 
1143 	/*
1144 	 * Create taskq for delivering other end state change event to
1145 	 * this device later.
1146 	 *
1147 	 * Set nthreads to 1 to make sure that events can be delivered
1148 	 * in order.
1149 	 *
1150 	 * Note: It is _not_ guaranteed that driver can see every
1151 	 * xenstore change under the path that it is watching. If two
1152 	 * changes happen consecutively in a very short amount of
1153 	 * time, it is likely that the driver will see only the last
1154 	 * one.
1155 	 */
1156 	if (pdp->xd_oe_taskq == NULL)
1157 		if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1158 		    "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1159 			return (DDI_FAILURE);
1160 
1161 	/*
1162 	 * Watch for changes to the XenbusState of otherend.
1163 	 */
1164 	pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1165 	pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1166 
1167 	if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1168 		i_xvdi_rem_watch_oestate(dip);
1169 		return (DDI_FAILURE);
1170 	}
1171 
1172 	return (DDI_SUCCESS);
1173 }
1174 
1175 static void
1176 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1177 {
1178 	struct xendev_ppd *pdp;
1179 	struct xenbus_device *dev;
1180 
1181 	pdp = ddi_get_parent_data(dip);
1182 	ASSERT(pdp != NULL);
1183 	ASSERT(mutex_owned(&pdp->xd_lk));
1184 
1185 	dev = &pdp->xd_xsdev;
1186 
1187 	/* Unwatch for changes to XenbusState of otherend */
1188 	if (dev->otherend_watch.node != NULL) {
1189 		mutex_exit(&pdp->xd_lk);
1190 		unregister_xenbus_watch(&dev->otherend_watch);
1191 		mutex_enter(&pdp->xd_lk);
1192 	}
1193 
1194 	/* make sure no event handler is running */
1195 	if (pdp->xd_oe_taskq != NULL) {
1196 		mutex_exit(&pdp->xd_lk);
1197 		ddi_taskq_destroy(pdp->xd_oe_taskq);
1198 		mutex_enter(&pdp->xd_lk);
1199 		pdp->xd_oe_taskq = NULL;
1200 	}
1201 
1202 	/* clean up */
1203 	dev->otherend_state = XenbusStateUnknown;
1204 	dev->otherend_id = (domid_t)-1;
1205 	if (dev->otherend_watch.node != NULL)
1206 		kmem_free((void *)dev->otherend_watch.node,
1207 		    strlen(dev->otherend_watch.node) + 1);
1208 	dev->otherend_watch.node = NULL;
1209 	if (dev->otherend != NULL)
1210 		kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1211 	dev->otherend = NULL;
1212 }
1213 
1214 static int
1215 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1216 {
1217 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1218 
1219 	ASSERT(pdp != NULL);
1220 	ASSERT(pdp->xd_xsdev.frontend == 0);
1221 	ASSERT(mutex_owned(&pdp->xd_lk));
1222 
1223 	/*
1224 	 * Create taskq for delivering hotplug status change event to
1225 	 * this device later.
1226 	 *
1227 	 * Set nthreads to 1 to make sure that events can be delivered
1228 	 * in order.
1229 	 *
1230 	 * Note: It is _not_ guaranteed that driver can see every
1231 	 * hotplug status change under the path that it is
1232 	 * watching. If two changes happen consecutively in a very
1233 	 * short amount of time, it is likely that the driver only
1234 	 * sees the last one.
1235 	 */
1236 	if (pdp->xd_hp_taskq == NULL)
1237 		if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1238 		    "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1239 			return (DDI_FAILURE);
1240 
1241 	if (pdp->xd_hp_watch.node == NULL) {
1242 		size_t len;
1243 		char *path;
1244 
1245 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1246 
1247 		len = strlen(pdp->xd_xsdev.nodename) +
1248 		    strlen("/hotplug-status") + 1;
1249 		path = kmem_alloc(len, KM_SLEEP);
1250 		(void) snprintf(path, len, "%s/hotplug-status",
1251 		    pdp->xd_xsdev.nodename);
1252 
1253 		pdp->xd_hp_watch.node = path;
1254 		pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1255 		pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1256 		if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1257 			i_xvdi_rem_watch_hpstate(dip);
1258 			return (DDI_FAILURE);
1259 		}
1260 	}
1261 
1262 	return (DDI_SUCCESS);
1263 }
1264 
1265 static void
1266 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1267 {
1268 	struct xendev_ppd *pdp;
1269 	pdp = ddi_get_parent_data(dip);
1270 
1271 	ASSERT(pdp != NULL);
1272 	ASSERT(pdp->xd_xsdev.frontend == 0);
1273 	ASSERT(mutex_owned(&pdp->xd_lk));
1274 
1275 	/* Unwatch for changes to "hotplug-status" node for backend device. */
1276 	if (pdp->xd_hp_watch.node != NULL) {
1277 		mutex_exit(&pdp->xd_lk);
1278 		unregister_xenbus_watch(&pdp->xd_hp_watch);
1279 		mutex_enter(&pdp->xd_lk);
1280 	}
1281 
1282 	/* Make sure no event handler is running. */
1283 	if (pdp->xd_hp_taskq != NULL) {
1284 		mutex_exit(&pdp->xd_lk);
1285 		ddi_taskq_destroy(pdp->xd_hp_taskq);
1286 		mutex_enter(&pdp->xd_lk);
1287 		pdp->xd_hp_taskq = NULL;
1288 	}
1289 
1290 	/* Clean up. */
1291 	if (pdp->xd_hp_watch.node != NULL) {
1292 		kmem_free((void *)pdp->xd_hp_watch.node,
1293 		    strlen(pdp->xd_hp_watch.node) + 1);
1294 		pdp->xd_hp_watch.node = NULL;
1295 	}
1296 }
1297 
1298 static int
1299 i_xvdi_add_watches(dev_info_t *dip)
1300 {
1301 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1302 
1303 	ASSERT(pdp != NULL);
1304 
1305 	mutex_enter(&pdp->xd_lk);
1306 
1307 	if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1308 		mutex_exit(&pdp->xd_lk);
1309 		return (DDI_FAILURE);
1310 	}
1311 
1312 	if (pdp->xd_xsdev.frontend == 1) {
1313 		/*
1314 		 * Frontend devices must watch for the backend path
1315 		 * changing.
1316 		 */
1317 		if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1318 			goto unwatch_and_fail;
1319 	} else {
1320 		/*
1321 		 * Backend devices must watch for hotplug events.
1322 		 */
1323 		if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1324 			goto unwatch_and_fail;
1325 	}
1326 
1327 	mutex_exit(&pdp->xd_lk);
1328 
1329 	return (DDI_SUCCESS);
1330 
1331 unwatch_and_fail:
1332 	i_xvdi_rem_watch_oestate(dip);
1333 	mutex_exit(&pdp->xd_lk);
1334 
1335 	return (DDI_FAILURE);
1336 }
1337 
1338 static void
1339 i_xvdi_rem_watches(dev_info_t *dip)
1340 {
1341 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1342 
1343 	ASSERT(pdp != NULL);
1344 
1345 	mutex_enter(&pdp->xd_lk);
1346 
1347 	i_xvdi_rem_watch_oestate(dip);
1348 
1349 	if (pdp->xd_xsdev.frontend == 1)
1350 		i_xvdi_rem_watch_bepath(dip);
1351 	else
1352 		i_xvdi_rem_watch_hpstate(dip);
1353 
1354 	mutex_exit(&pdp->xd_lk);
1355 }
1356 
1357 static int
1358 i_xvdi_add_watch_bepath(dev_info_t *dip)
1359 {
1360 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1361 
1362 	ASSERT(pdp != NULL);
1363 	ASSERT(pdp->xd_xsdev.frontend == 1);
1364 
1365 	/*
1366 	 * Frontend devices need to watch for the backend path changing.
1367 	 */
1368 	if (pdp->xd_bepath_watch.node == NULL) {
1369 		size_t len;
1370 		char *path;
1371 
1372 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1373 
1374 		len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1375 		path = kmem_alloc(len, KM_SLEEP);
1376 		(void) snprintf(path, len, "%s/backend",
1377 		    pdp->xd_xsdev.nodename);
1378 
1379 		pdp->xd_bepath_watch.node = path;
1380 		pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1381 		pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1382 		if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1383 			kmem_free(path, len);
1384 			pdp->xd_bepath_watch.node = NULL;
1385 			return (DDI_FAILURE);
1386 		}
1387 	}
1388 
1389 	return (DDI_SUCCESS);
1390 }
1391 
1392 static void
1393 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1394 {
1395 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1396 
1397 	ASSERT(pdp != NULL);
1398 	ASSERT(pdp->xd_xsdev.frontend == 1);
1399 	ASSERT(mutex_owned(&pdp->xd_lk));
1400 
1401 	if (pdp->xd_bepath_watch.node != NULL) {
1402 		mutex_exit(&pdp->xd_lk);
1403 		unregister_xenbus_watch(&pdp->xd_bepath_watch);
1404 		mutex_enter(&pdp->xd_lk);
1405 
1406 		kmem_free((void *)(pdp->xd_bepath_watch.node),
1407 		    strlen(pdp->xd_bepath_watch.node) + 1);
1408 		pdp->xd_bepath_watch.node = NULL;
1409 	}
1410 }
1411 
1412 int
1413 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1414     XenbusState newState)
1415 {
1416 	int rv;
1417 	struct xendev_ppd *pdp;
1418 
1419 	pdp = ddi_get_parent_data(dip);
1420 	ASSERT(pdp != NULL);
1421 
1422 	XVDI_DPRINTF(XVDI_DBG_STATE,
1423 	    "xvdi_switch_state: dip 0x%p moves to %d",
1424 	    (void *)dip, newState);
1425 
1426 	rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1427 	if (rv > 0)
1428 		cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1429 
1430 	return (rv);
1431 }
1432 
1433 /*
1434  * Notify hotplug script running in userland
1435  */
1436 int
1437 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1438 {
1439 	struct xendev_ppd *pdp;
1440 	nvlist_t *attr_list = NULL;
1441 	i_xd_cfg_t *xdcp;
1442 	sysevent_id_t eid;
1443 	int err;
1444 	char devname[256]; /* XXPV dme: ? */
1445 
1446 	pdp = ddi_get_parent_data(dip);
1447 	ASSERT(pdp != NULL);
1448 
1449 	xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1450 	ASSERT(xdcp != NULL);
1451 
1452 	(void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1453 	    ddi_driver_name(dip),  ddi_get_instance(dip));
1454 
1455 	err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1456 	if (err != DDI_SUCCESS)
1457 		goto failure;
1458 
1459 	err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1460 	if (err != DDI_SUCCESS)
1461 		goto failure;
1462 	err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1463 	if (err != DDI_SUCCESS)
1464 		goto failure;
1465 	err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1466 	if (err != DDI_SUCCESS)
1467 		goto failure;
1468 	err = nvlist_add_string(attr_list, "device", devname);
1469 	if (err != DDI_SUCCESS)
1470 		goto failure;
1471 	err = nvlist_add_string(attr_list, "fob",
1472 	    ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1473 	if (err != DDI_SUCCESS)
1474 		goto failure;
1475 
1476 	switch (hpc) {
1477 	case XEN_HP_ADD:
1478 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1479 		    "add", attr_list, &eid, DDI_NOSLEEP);
1480 		break;
1481 	case XEN_HP_REMOVE:
1482 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1483 		    "remove", attr_list, &eid, DDI_NOSLEEP);
1484 		break;
1485 	default:
1486 		err = DDI_FAILURE;
1487 		goto failure;
1488 	}
1489 
1490 failure:
1491 	if (attr_list != NULL)
1492 		nvlist_free(attr_list);
1493 
1494 	return (err);
1495 }
1496 
1497 /* ARGSUSED */
1498 static void
1499 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1500     unsigned int len)
1501 {
1502 	char *path;
1503 
1504 	if (xendev_dip == NULL)
1505 		xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1506 
1507 	path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1508 
1509 	(void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1510 	    i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1511 }
1512 
1513 static void
1514 i_xvdi_watch_device(char *path)
1515 {
1516 	struct xenbus_watch *w;
1517 
1518 	ASSERT(path != NULL);
1519 
1520 	w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1521 	w->node = path;
1522 	w->callback = &i_xvdi_probe_path_cb;
1523 	w->dev = NULL;
1524 
1525 	if (register_xenbus_watch(w) != 0) {
1526 		cmn_err(CE_WARN, "i_xvdi_watch_device: "
1527 		    "cannot set watch on %s", path);
1528 		kmem_free(w, sizeof (*w));
1529 		return;
1530 	}
1531 }
1532 
1533 void
1534 xvdi_watch_devices(int newstate)
1535 {
1536 	int devclass;
1537 
1538 	/*
1539 	 * Watch for devices being created in the store.
1540 	 */
1541 	if (newstate == XENSTORE_DOWN)
1542 		return;
1543 	for (devclass = 0; devclass < NXDC; devclass++) {
1544 		if (xdci[devclass].xs_path_fe != NULL)
1545 			i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1546 		if (xdci[devclass].xs_path_be != NULL)
1547 			i_xvdi_watch_device(xdci[devclass].xs_path_be);
1548 	}
1549 }
1550 
1551 /*
1552  * Iterate over the store looking for backend devices to create.
1553  */
1554 static void
1555 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1556 {
1557 	char **domains;
1558 	unsigned int ndomains;
1559 	int ldomains, i;
1560 
1561 	if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1562 	    &ndomains)) == NULL)
1563 		return;
1564 
1565 	for (i = 0, ldomains = 0; i < ndomains; i++) {
1566 		ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1567 
1568 		i_xvdi_enum_worker(parent, xdcp, domains[i]);
1569 	}
1570 	kmem_free(domains, ldomains);
1571 }
1572 
1573 /*
1574  * Iterate over the store looking for frontend devices to create.
1575  */
1576 static void
1577 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1578 {
1579 	i_xvdi_enum_worker(parent, xdcp, NULL);
1580 }
1581 
1582 static void
1583 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1584     char *domain)
1585 {
1586 	char *path, *domain_path, *ep;
1587 	char **devices;
1588 	unsigned int ndevices;
1589 	int ldevices, j, circ;
1590 	domid_t dom;
1591 
1592 	if (domain == NULL) {
1593 		dom = DOMID_SELF;
1594 		path = xdcp->xs_path_fe;
1595 		domain_path = "";
1596 	} else {
1597 		(void) ddi_strtol(domain, &ep, 0, (long *)&dom);
1598 		path = xdcp->xs_path_be;
1599 		domain_path = domain;
1600 	}
1601 
1602 	if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1603 	    &ndevices)) == NULL)
1604 		return;
1605 
1606 	for (j = 0, ldevices = 0; j < ndevices; j++) {
1607 		int vdev;
1608 
1609 		ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1610 		(void) ddi_strtol(devices[j], &ep, 0, (long *)&vdev);
1611 
1612 		ndi_devi_enter(parent, &circ);
1613 
1614 		if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev)
1615 		    == NULL)
1616 			(void) xvdi_create_dev(parent, xdcp->devclass,
1617 			    dom, vdev);
1618 
1619 		ndi_devi_exit(parent, circ);
1620 	}
1621 	kmem_free(devices, ldevices);
1622 }
1623 
1624 /*
1625  * Leaf drivers should call this in their detach() routine during suspend.
1626  */
1627 void
1628 xvdi_suspend(dev_info_t *dip)
1629 {
1630 	i_xvdi_rem_watches(dip);
1631 }
1632 
1633 /*
1634  * Leaf drivers should call this in their attach() routine during resume.
1635  */
1636 int
1637 xvdi_resume(dev_info_t *dip)
1638 {
1639 	return (i_xvdi_add_watches(dip));
1640 }
1641 
1642 /*
1643  * Add event handler for the leaf driver
1644  * to handle event triggered by the change in xenstore
1645  */
1646 int
1647 xvdi_add_event_handler(dev_info_t *dip, char *name,
1648     void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *))
1649 {
1650 	ddi_eventcookie_t ecv;
1651 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1652 	ddi_callback_id_t *cbid;
1653 
1654 	ASSERT(pdp != NULL);
1655 
1656 	mutex_enter(&pdp->xd_lk);
1657 
1658 	if (strcmp(name, XS_OE_STATE) == 0) {
1659 		ASSERT(pdp->xd_xsdev.otherend != NULL);
1660 
1661 		cbid = &pdp->xd_oe_ehid;
1662 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1663 		if (pdp->xd_xsdev.frontend == 1) {
1664 			mutex_exit(&pdp->xd_lk);
1665 			return (DDI_FAILURE);
1666 		}
1667 
1668 		ASSERT(pdp->xd_hp_watch.node != NULL);
1669 
1670 		cbid = &pdp->xd_hp_ehid;
1671 	} else {
1672 		/* Unsupported watch. */
1673 		mutex_exit(&pdp->xd_lk);
1674 		return (DDI_FAILURE);
1675 	}
1676 
1677 	/*
1678 	 * No event handler provided, take default action to handle
1679 	 * event.
1680 	 */
1681 	if (evthandler == NULL) {
1682 		mutex_exit(&pdp->xd_lk);
1683 		return (DDI_SUCCESS);
1684 	}
1685 
1686 	ASSERT(*cbid == NULL);
1687 
1688 	if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1689 		cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1690 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1691 		mutex_exit(&pdp->xd_lk);
1692 		return (DDI_FAILURE);
1693 	}
1694 	if (ddi_add_event_handler(dip, ecv, evthandler, NULL, cbid)
1695 	    != DDI_SUCCESS) {
1696 		cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1697 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1698 		*cbid = NULL;
1699 		mutex_exit(&pdp->xd_lk);
1700 		return (DDI_FAILURE);
1701 	}
1702 
1703 	mutex_exit(&pdp->xd_lk);
1704 
1705 	return (DDI_SUCCESS);
1706 }
1707 
1708 /*
1709  * Remove event handler for the leaf driver and unwatch xenstore
1710  * so, driver will not be notified when xenstore entry changed later
1711  */
1712 void
1713 xvdi_remove_event_handler(dev_info_t *dip, char *name)
1714 {
1715 	struct xendev_ppd *pdp;
1716 	boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
1717 	ddi_callback_id_t oeid = NULL, hpid = NULL;
1718 
1719 	pdp = ddi_get_parent_data(dip);
1720 	ASSERT(pdp != NULL);
1721 
1722 	if (name == NULL) {
1723 		rem_oe = B_TRUE;
1724 		rem_hp = B_TRUE;
1725 	} else if (strcmp(name, XS_OE_STATE) == 0) {
1726 		rem_oe = B_TRUE;
1727 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1728 		rem_hp = B_TRUE;
1729 	} else {
1730 		cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
1731 		return;
1732 	}
1733 
1734 	mutex_enter(&pdp->xd_lk);
1735 
1736 	if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
1737 		oeid = pdp->xd_oe_ehid;
1738 		pdp->xd_oe_ehid = NULL;
1739 	}
1740 
1741 	if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
1742 		hpid = pdp->xd_hp_ehid;
1743 		pdp->xd_hp_ehid = NULL;
1744 	}
1745 
1746 	mutex_exit(&pdp->xd_lk);
1747 
1748 	if (oeid != NULL)
1749 		(void) ddi_remove_event_handler(oeid);
1750 	if (hpid != NULL)
1751 		(void) ddi_remove_event_handler(hpid);
1752 }
1753 
1754 
1755 /*
1756  * common ring interfaces
1757  */
1758 
1759 #define	FRONT_RING(_ringp)	(&(_ringp)->xr_sring.fr)
1760 #define	BACK_RING(_ringp)	(&(_ringp)->xr_sring.br)
1761 #define	GET_RING_SIZE(_ringp)	RING_SIZE(FRONT_RING(ringp))
1762 #define	GET_RING_ENTRY_FE(_ringp, _idx)		\
1763 	(FRONT_RING(_ringp)->sring->ring +	\
1764 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1765 #define	GET_RING_ENTRY_BE(_ringp, _idx)		\
1766 	(BACK_RING(_ringp)->sring->ring +	\
1767 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1768 
1769 unsigned int
1770 xvdi_ring_avail_slots(xendev_ring_t *ringp)
1771 {
1772 	comif_ring_fe_t *frp;
1773 	comif_ring_be_t *brp;
1774 
1775 	if (ringp->xr_frontend) {
1776 		frp = FRONT_RING(ringp);
1777 		return (GET_RING_SIZE(ringp) -
1778 		    (frp->req_prod_pvt - frp->rsp_cons));
1779 	} else {
1780 		brp = BACK_RING(ringp);
1781 		return (GET_RING_SIZE(ringp) -
1782 		    (brp->rsp_prod_pvt - brp->req_cons));
1783 	}
1784 }
1785 
1786 int
1787 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
1788 {
1789 	comif_ring_be_t *brp;
1790 
1791 	ASSERT(!ringp->xr_frontend);
1792 	brp = BACK_RING(ringp);
1793 	return ((brp->req_cons !=
1794 	    ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
1795 	    ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
1796 }
1797 
1798 int
1799 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
1800 {
1801 	comif_ring_fe_t *frp;
1802 
1803 	ASSERT(ringp->xr_frontend);
1804 	frp = FRONT_RING(ringp);
1805 	return (frp->req_prod_pvt !=
1806 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1807 }
1808 
1809 int
1810 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
1811 {
1812 	comif_ring_fe_t *frp;
1813 
1814 	ASSERT(ringp->xr_frontend);
1815 	frp = FRONT_RING(ringp);
1816 	return (frp->rsp_cons !=
1817 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1818 }
1819 
1820 /* NOTE: req_event will be increased as needed */
1821 void *
1822 xvdi_ring_get_request(xendev_ring_t *ringp)
1823 {
1824 	comif_ring_fe_t *frp;
1825 	comif_ring_be_t *brp;
1826 
1827 	if (ringp->xr_frontend) {
1828 		/* for frontend ring */
1829 		frp = FRONT_RING(ringp);
1830 		if (!RING_FULL(frp))
1831 			return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
1832 		else
1833 			return (NULL);
1834 	} else {
1835 		/* for backend ring */
1836 		brp = BACK_RING(ringp);
1837 		/* RING_FINAL_CHECK_FOR_REQUESTS() */
1838 		if (xvdi_ring_has_unconsumed_requests(ringp))
1839 			return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
1840 		else {
1841 			ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
1842 			    brp->req_cons + 1);
1843 			membar_enter();
1844 			if (xvdi_ring_has_unconsumed_requests(ringp))
1845 				return (GET_RING_ENTRY_BE(ringp,
1846 				    brp->req_cons++));
1847 			else
1848 				return (NULL);
1849 		}
1850 	}
1851 }
1852 
1853 int
1854 xvdi_ring_push_request(xendev_ring_t *ringp)
1855 {
1856 	RING_IDX old, new, reqevt;
1857 	comif_ring_fe_t *frp;
1858 
1859 	/* only frontend should be able to push request */
1860 	ASSERT(ringp->xr_frontend);
1861 
1862 	/* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
1863 	frp = FRONT_RING(ringp);
1864 	old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
1865 	new = frp->req_prod_pvt;
1866 	ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
1867 	membar_enter();
1868 	reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
1869 	return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
1870 }
1871 
1872 /* NOTE: rsp_event will be increased as needed */
1873 void *
1874 xvdi_ring_get_response(xendev_ring_t *ringp)
1875 {
1876 	comif_ring_fe_t *frp;
1877 	comif_ring_be_t *brp;
1878 
1879 	if (!ringp->xr_frontend) {
1880 		/* for backend ring */
1881 		brp = BACK_RING(ringp);
1882 		return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
1883 	} else {
1884 		/* for frontend ring */
1885 		frp = FRONT_RING(ringp);
1886 		/* RING_FINAL_CHECK_FOR_RESPONSES() */
1887 		if (xvdi_ring_has_unconsumed_responses(ringp))
1888 			return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
1889 		else {
1890 			ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
1891 			    frp->rsp_cons + 1);
1892 			membar_enter();
1893 			if (xvdi_ring_has_unconsumed_responses(ringp))
1894 				return (GET_RING_ENTRY_FE(ringp,
1895 				    frp->rsp_cons++));
1896 			else
1897 				return (NULL);
1898 		}
1899 	}
1900 }
1901 
1902 int
1903 xvdi_ring_push_response(xendev_ring_t *ringp)
1904 {
1905 	RING_IDX old, new, rspevt;
1906 	comif_ring_be_t *brp;
1907 
1908 	/* only backend should be able to push response */
1909 	ASSERT(!ringp->xr_frontend);
1910 
1911 	/* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
1912 	brp = BACK_RING(ringp);
1913 	old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
1914 	new = brp->rsp_prod_pvt;
1915 	ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
1916 	membar_enter();
1917 	rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
1918 	return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
1919 }
1920 
1921 static void
1922 xvdi_ring_init_sring(xendev_ring_t *ringp)
1923 {
1924 	ddi_acc_handle_t acchdl;
1925 	comif_sring_t *xsrp;
1926 	int i;
1927 
1928 	xsrp = (comif_sring_t *)ringp->xr_vaddr;
1929 	acchdl = ringp->xr_acc_hdl;
1930 
1931 	/* shared ring initialization */
1932 	ddi_put32(acchdl, &xsrp->req_prod, 0);
1933 	ddi_put32(acchdl, &xsrp->rsp_prod, 0);
1934 	ddi_put32(acchdl, &xsrp->req_event, 1);
1935 	ddi_put32(acchdl, &xsrp->rsp_event, 1);
1936 	for (i = 0; i < sizeof (xsrp->pad); i++)
1937 		ddi_put8(acchdl, xsrp->pad + i, 0);
1938 }
1939 
1940 static void
1941 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1942 {
1943 	comif_ring_fe_t *xfrp;
1944 
1945 	xfrp = &ringp->xr_sring.fr;
1946 	xfrp->req_prod_pvt = 0;
1947 	xfrp->rsp_cons = 0;
1948 	xfrp->nr_ents = nentry;
1949 	xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1950 
1951 	ringp->xr_frontend = 1;
1952 	ringp->xr_entry_size = entrysize;
1953 }
1954 
1955 static void
1956 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1957 {
1958 	comif_ring_be_t *xbrp;
1959 
1960 	xbrp = &ringp->xr_sring.br;
1961 	xbrp->rsp_prod_pvt = 0;
1962 	xbrp->req_cons = 0;
1963 	xbrp->nr_ents = nentry;
1964 	xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1965 
1966 	ringp->xr_frontend = 0;
1967 	ringp->xr_entry_size = entrysize;
1968 }
1969 
1970 static void
1971 xendev_offline_device(void *arg)
1972 {
1973 	dev_info_t *dip = (dev_info_t *)arg;
1974 	char devname[MAXNAMELEN] = {0};
1975 
1976 	/*
1977 	 * This is currently the only chance to delete a devinfo node, which
1978 	 * is _not_ always successful.
1979 	 */
1980 	(void) ddi_deviname(dip, devname);
1981 	(void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
1982 	(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
1983 }
1984 
1985 static void
1986 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
1987 {
1988 	dev_info_t *dip = (dev_info_t *)dev->data;
1989 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1990 
1991 	/*
1992 	 * Don't trigger two consecutive ndi_devi_offline on the same
1993 	 * dip.
1994 	 */
1995 	if ((oestate == XenbusStateClosed) &&
1996 	    (dev->otherend_state == XenbusStateClosed))
1997 		return;
1998 
1999 	dev->otherend_state = oestate;
2000 	(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2001 	    i_xvdi_oestate_handler, (void *)dip, DDI_SLEEP);
2002 }
2003 
2004 /*ARGSUSED*/
2005 static void
2006 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2007     unsigned int len)
2008 {
2009 	dev_info_t *dip = (dev_info_t *)w->dev;
2010 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2011 
2012 	(void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2013 	    i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2014 }
2015 
2016 static void
2017 i_xvdi_probe_path_handler(void *arg)
2018 {
2019 	dev_info_t *parent;
2020 	char *path = arg, *p = NULL;
2021 	int i, vdev, circ;
2022 	i_xd_cfg_t *xdcp;
2023 	boolean_t frontend;
2024 	domid_t dom;
2025 
2026 	for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2027 
2028 		if ((xdcp->xs_path_fe != NULL) &&
2029 		    (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2030 		    == 0)) {
2031 
2032 			frontend = B_TRUE;
2033 			p = path + strlen(xdcp->xs_path_fe);
2034 			break;
2035 		}
2036 
2037 		if ((xdcp->xs_path_be != NULL) &&
2038 		    (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2039 		    == 0)) {
2040 
2041 			frontend = B_FALSE;
2042 			p = path + strlen(xdcp->xs_path_be);
2043 			break;
2044 		}
2045 
2046 	}
2047 
2048 	if (p == NULL) {
2049 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2050 		    "unexpected path prefix in %s", path);
2051 		goto done;
2052 	}
2053 
2054 	if (frontend) {
2055 		dom = DOMID_SELF;
2056 		if (sscanf(p, "/%d/", &vdev) != 1) {
2057 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2058 			    "i_xvdi_probe_path_handler: "
2059 			    "cannot parse frontend path %s",
2060 			    path);
2061 			goto done;
2062 		}
2063 	} else {
2064 		if (sscanf(p, "/%d/%d/", &dom, &vdev) != 2) {
2065 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2066 			    "i_xvdi_probe_path_handler: "
2067 			    "cannot parse backend path %s",
2068 			    path);
2069 			goto done;
2070 		}
2071 	}
2072 
2073 	parent = xendev_dip;
2074 	ASSERT(parent != NULL);
2075 
2076 	ndi_devi_enter(parent, &circ);
2077 
2078 	if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2079 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2080 		    "i_xvdi_probe_path_handler: create for %s", path);
2081 		(void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2082 	} else {
2083 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2084 		    "i_xvdi_probe_path_handler: %s already exists", path);
2085 	}
2086 
2087 	ndi_devi_exit(parent, circ);
2088 
2089 done:
2090 	kmem_free(path, strlen(path) + 1);
2091 }
2092