xref: /illumos-gate/usr/src/uts/sun4v/io/vnet_dds.c (revision 7bd3a2e2)
1678453a8Sspeer /*
2678453a8Sspeer  * CDDL HEADER START
3678453a8Sspeer  *
4678453a8Sspeer  * The contents of this file are subject to the terms of the
5678453a8Sspeer  * Common Development and Distribution License (the "License").
6678453a8Sspeer  * You may not use this file except in compliance with the License.
7678453a8Sspeer  *
8678453a8Sspeer  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9678453a8Sspeer  * or http://www.opensolaris.org/os/licensing.
10678453a8Sspeer  * See the License for the specific language governing permissions
11678453a8Sspeer  * and limitations under the License.
12678453a8Sspeer  *
13678453a8Sspeer  * When distributing Covered Code, include this CDDL HEADER in each
14678453a8Sspeer  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15678453a8Sspeer  * If applicable, add the following below this CDDL HEADER, with the
16678453a8Sspeer  * fields enclosed by brackets "[]" replaced with your own identifying
17678453a8Sspeer  * information: Portions Copyright [yyyy] [name of copyright owner]
18678453a8Sspeer  *
19678453a8Sspeer  * CDDL HEADER END
20678453a8Sspeer  */
21678453a8Sspeer 
22678453a8Sspeer /*
23*7bd3a2e2SSriharsha Basavapatna  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24678453a8Sspeer  * Use is subject to license terms.
25678453a8Sspeer  */
26678453a8Sspeer 
27678453a8Sspeer #include <sys/modctl.h>
28678453a8Sspeer #include <sys/prom_plat.h>
29678453a8Sspeer #include <sys/ddi.h>
30678453a8Sspeer #include <sys/sunddi.h>
31678453a8Sspeer #include <sys/sunndi.h>
32678453a8Sspeer #include <sys/ndi_impldefs.h>
33678453a8Sspeer #include <sys/ddi_impldefs.h>
34678453a8Sspeer #include <sys/ethernet.h>
35678453a8Sspeer #include <sys/machsystm.h>
36678453a8Sspeer #include <sys/hypervisor_api.h>
37678453a8Sspeer #include <sys/mach_descrip.h>
38678453a8Sspeer #include <sys/drctl.h>
39678453a8Sspeer #include <sys/dr_util.h>
40678453a8Sspeer #include <sys/mac.h>
41678453a8Sspeer #include <sys/vnet.h>
42678453a8Sspeer #include <sys/vnet_mailbox.h>
43678453a8Sspeer #include <sys/vnet_common.h>
44678453a8Sspeer #include <sys/hsvc.h>
45678453a8Sspeer 
46678453a8Sspeer 
47678453a8Sspeer #define	VDDS_MAX_RANGES		6	/* 6 possible VRs */
48678453a8Sspeer #define	VDDS_MAX_VRINTRS	8	/* limited to 8 intrs/VR */
49678453a8Sspeer #define	VDDS_MAX_INTR_NUM	64	/* 0-63 or valid */
50678453a8Sspeer 
51678453a8Sspeer #define	VDDS_INO_RANGE_START(x) (x * VDDS_MAX_VRINTRS)
52678453a8Sspeer #define	HVCOOKIE(c)	((c) & 0xFFFFFFFFF)
53678453a8Sspeer #define	NIUCFGHDL(c)	((c) >> 32)
54678453a8Sspeer 
55678453a8Sspeer 
56678453a8Sspeer /* For "ranges" property */
57678453a8Sspeer typedef struct vdds_ranges {
58678453a8Sspeer 	uint32_t child_hi;
59678453a8Sspeer 	uint32_t child_lo;
60678453a8Sspeer 	uint32_t parent_hi;
61678453a8Sspeer 	uint32_t parent_lo;
62678453a8Sspeer 	uint32_t size_hi;
63678453a8Sspeer 	uint32_t size_lo;
64678453a8Sspeer } vdds_ranges_t;
65678453a8Sspeer 
66678453a8Sspeer /* For "reg" property */
67678453a8Sspeer typedef struct vdds_reg {
68678453a8Sspeer 	uint32_t addr_hi;
69678453a8Sspeer 	uint32_t addr_lo;
70678453a8Sspeer 	uint32_t size_hi;
71678453a8Sspeer 	uint32_t size_lo;
72678453a8Sspeer } vdds_reg_t;
73678453a8Sspeer 
74678453a8Sspeer /* For ddi callback argument */
75678453a8Sspeer typedef struct vdds_cb_arg {
76678453a8Sspeer 	dev_info_t *dip;
77678453a8Sspeer 	uint64_t cookie;
78678453a8Sspeer 	uint64_t macaddr;
797b1f684aSSriharsha Basavapatna 	uint32_t max_frame_size;
80678453a8Sspeer } vdds_cb_arg_t;
81678453a8Sspeer 
82678453a8Sspeer 
83678453a8Sspeer /* Functions exported to other files */
84678453a8Sspeer void vdds_mod_init(void);
85678453a8Sspeer void vdds_mod_fini(void);
86678453a8Sspeer int vdds_init(vnet_t *vnetp);
87678453a8Sspeer void vdds_cleanup(vnet_t *vnetp);
88678453a8Sspeer void vdds_process_dds_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg);
89d0288fccSRaghuram Kothakota void vdds_cleanup_hybrid_res(void *arg);
906d6de4eeSWENTAO YANG void vdds_cleanup_hio(vnet_t *vnetp);
91678453a8Sspeer 
92678453a8Sspeer /* Support functions to create/destory Hybrid device */
937b1f684aSSriharsha Basavapatna static dev_info_t *vdds_create_niu_node(uint64_t cookie,
947b1f684aSSriharsha Basavapatna     uint64_t macaddr, uint32_t max_frame_size);
95678453a8Sspeer static int vdds_destroy_niu_node(dev_info_t *niu_dip, uint64_t cookie);
967b1f684aSSriharsha Basavapatna static dev_info_t *vdds_create_new_node(vdds_cb_arg_t *cba,
97678453a8Sspeer     dev_info_t *pdip, int (*new_node_func)(dev_info_t *dip,
98678453a8Sspeer     void *arg, uint_t flags));
99678453a8Sspeer static int vdds_new_nexus_node(dev_info_t *dip, void *arg, uint_t flags);
100678453a8Sspeer static int vdds_new_niu_node(dev_info_t *dip, void *arg, uint_t flags);
101678453a8Sspeer static dev_info_t *vdds_find_node(uint64_t cookie, dev_info_t *sdip,
102678453a8Sspeer 	int (*match_func)(dev_info_t *dip, void *arg));
103678453a8Sspeer static int vdds_match_niu_nexus(dev_info_t *dip, void *arg);
104678453a8Sspeer static int vdds_match_niu_node(dev_info_t *dip, void *arg);
105678453a8Sspeer static int vdds_get_interrupts(uint64_t cookie, int ino_range,
106678453a8Sspeer     int *intrs, int *nintr);
107678453a8Sspeer 
108678453a8Sspeer /* DDS message processing related functions */
109678453a8Sspeer static void vdds_process_dds_msg_task(void *arg);
110678453a8Sspeer static int vdds_send_dds_resp_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg, int ack);
111678453a8Sspeer static int vdds_send_dds_rel_msg(vnet_t *vnetp);
112678453a8Sspeer static void vdds_release_range_prop(dev_info_t *nexus_dip, uint64_t cookie);
113678453a8Sspeer 
114678453a8Sspeer /* Functions imported from other files */
115678453a8Sspeer extern int vnet_send_dds_msg(vnet_t *vnetp, void *dmsg);
11663f531d1SSriharsha Basavapatna extern int vnet_hio_mac_init(vnet_t *vnetp, char *ifname);
11763f531d1SSriharsha Basavapatna extern void vnet_hio_mac_cleanup(vnet_t *vnetp);
118678453a8Sspeer 
119678453a8Sspeer /* HV functions that are used in this file */
120678453a8Sspeer extern uint64_t vdds_hv_niu_vr_getinfo(uint32_t hvcookie,
121678453a8Sspeer     uint64_t *real_start, uint64_t *size);
122678453a8Sspeer extern uint64_t vdds_hv_niu_vr_get_txmap(uint32_t hvcookie, uint64_t *dma_map);
123678453a8Sspeer extern uint64_t vdds_hv_niu_vr_get_rxmap(uint32_t hvcookie, uint64_t *dma_map);
124678453a8Sspeer extern uint64_t vdds_hv_niu_vrtx_set_ino(uint32_t cookie, uint64_t vch_idx,
125678453a8Sspeer     uint32_t ino);
126678453a8Sspeer extern uint64_t vdds_hv_niu_vrrx_set_ino(uint32_t cookie, uint64_t vch_idx,
127678453a8Sspeer     uint32_t ino);
128678453a8Sspeer 
129678453a8Sspeer 
130678453a8Sspeer #ifdef DEBUG
131678453a8Sspeer 
132*7bd3a2e2SSriharsha Basavapatna #define	DEBUG_PRINTF	debug_printf
133*7bd3a2e2SSriharsha Basavapatna 
134678453a8Sspeer extern int vnet_dbglevel;
135678453a8Sspeer 
136678453a8Sspeer static void
debug_printf(const char * fname,void * arg,const char * fmt,...)137678453a8Sspeer debug_printf(const char *fname, void *arg,  const char *fmt, ...)
138678453a8Sspeer {
139678453a8Sspeer 	char    buf[512];
140678453a8Sspeer 	va_list ap;
141678453a8Sspeer 	char    *bufp = buf;
142678453a8Sspeer 	vnet_dds_info_t *vdds = arg;
143678453a8Sspeer 
144678453a8Sspeer 	if (vdds != NULL) {
145678453a8Sspeer 		(void) sprintf(bufp, "vnet%d: %s: ",
146678453a8Sspeer 		    vdds->vnetp->instance, fname);
147678453a8Sspeer 	} else {
148678453a8Sspeer 		(void) sprintf(bufp, "%s: ", fname);
149678453a8Sspeer 	}
150678453a8Sspeer 	bufp += strlen(bufp);
151678453a8Sspeer 	va_start(ap, fmt);
152678453a8Sspeer 	(void) vsprintf(bufp, fmt, ap);
153678453a8Sspeer 	va_end(ap);
154678453a8Sspeer 	cmn_err(CE_CONT, "%s\n", buf);
155678453a8Sspeer }
156678453a8Sspeer #endif
157678453a8Sspeer 
158678453a8Sspeer /*
1594df55fdeSJanie Lu  * Hypervisor N2/NIU services information:
1604df55fdeSJanie Lu  *
1614df55fdeSJanie Lu  * The list of HV versions that support NIU HybridIO. Note,
1624df55fdeSJanie Lu  * the order is higher version to a lower version, as the
1634df55fdeSJanie Lu  * registration is attempted in this order.
164678453a8Sspeer  */
1654df55fdeSJanie Lu static hsvc_info_t niu_hsvc[] = {
1664df55fdeSJanie Lu 	{HSVC_REV_1, NULL, HSVC_GROUP_NIU, 2, 0, "vnet_dds"},
1674df55fdeSJanie Lu 	{HSVC_REV_1, NULL, HSVC_GROUP_NIU, 1, 1, "vnet_dds"}
168678453a8Sspeer };
169678453a8Sspeer 
1704df55fdeSJanie Lu /*
1714df55fdeSJanie Lu  * Index that points to the successful HV version that
1724df55fdeSJanie Lu  * is registered.
1734df55fdeSJanie Lu  */
1744df55fdeSJanie Lu static int niu_hsvc_index = -1;
1754df55fdeSJanie Lu 
176d0288fccSRaghuram Kothakota /*
177d0288fccSRaghuram Kothakota  * Lock to serialize the NIU device node related operations.
178d0288fccSRaghuram Kothakota  */
179d0288fccSRaghuram Kothakota kmutex_t vdds_dev_lock;
180d0288fccSRaghuram Kothakota 
181678453a8Sspeer boolean_t vdds_hv_hio_capable = B_FALSE;
182678453a8Sspeer 
183678453a8Sspeer /*
184678453a8Sspeer  * vdds_mod_init -- one time initialization.
185678453a8Sspeer  */
186678453a8Sspeer void
vdds_mod_init(void)187678453a8Sspeer vdds_mod_init(void)
188678453a8Sspeer {
1894df55fdeSJanie Lu 	int i;
190678453a8Sspeer 	int rv;
1914df55fdeSJanie Lu 	uint64_t minor = 0;
192678453a8Sspeer 
193678453a8Sspeer 	/*
1944df55fdeSJanie Lu 	 * Try register one by one from niu_hsvc.
195678453a8Sspeer 	 */
1964df55fdeSJanie Lu 	for (i = 0; i < (sizeof (niu_hsvc) / sizeof (hsvc_info_t)); i++) {
1974df55fdeSJanie Lu 		rv = hsvc_register(&niu_hsvc[i], &minor);
1984df55fdeSJanie Lu 		if (rv == 0) {
1994df55fdeSJanie Lu 			if (minor == niu_hsvc[i].hsvc_minor) {
2004df55fdeSJanie Lu 				vdds_hv_hio_capable = B_TRUE;
2014df55fdeSJanie Lu 				niu_hsvc_index = i;
2024df55fdeSJanie Lu 				break;
2034df55fdeSJanie Lu 			} else {
2044df55fdeSJanie Lu 				(void) hsvc_unregister(&niu_hsvc[i]);
2054df55fdeSJanie Lu 			}
2064df55fdeSJanie Lu 		}
207678453a8Sspeer 	}
208d0288fccSRaghuram Kothakota 	mutex_init(&vdds_dev_lock, NULL, MUTEX_DRIVER, NULL);
2094df55fdeSJanie Lu 	DBG2(NULL, "HV HIO capable=%d ver(%ld.%ld)", vdds_hv_hio_capable,
2104df55fdeSJanie Lu 	    (niu_hsvc_index == -1) ? 0 : niu_hsvc[niu_hsvc_index].hsvc_major,
2114df55fdeSJanie Lu 	    minor);
212678453a8Sspeer }
213678453a8Sspeer 
214678453a8Sspeer /*
215678453a8Sspeer  * vdds_mod_fini -- one time cleanup.
216678453a8Sspeer  */
217678453a8Sspeer void
vdds_mod_fini(void)218678453a8Sspeer vdds_mod_fini(void)
219678453a8Sspeer {
2204df55fdeSJanie Lu 	if (niu_hsvc_index != -1) {
2214df55fdeSJanie Lu 		(void) hsvc_unregister(&niu_hsvc[niu_hsvc_index]);
2224df55fdeSJanie Lu 	}
223d0288fccSRaghuram Kothakota 	mutex_destroy(&vdds_dev_lock);
224678453a8Sspeer }
225678453a8Sspeer 
226678453a8Sspeer /*
227678453a8Sspeer  * vdds_init -- vnet instance related DDS related initialization.
228678453a8Sspeer  */
229678453a8Sspeer int
vdds_init(vnet_t * vnetp)230678453a8Sspeer vdds_init(vnet_t *vnetp)
231678453a8Sspeer {
232678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
233678453a8Sspeer 	char		qname[TASKQ_NAMELEN];
234678453a8Sspeer 
235678453a8Sspeer 	vdds->vnetp = vnetp;
236678453a8Sspeer 	DBG1(vdds, "Initializing..");
237678453a8Sspeer 	(void) snprintf(qname, TASKQ_NAMELEN, "vdds_taskq%d", vnetp->instance);
238678453a8Sspeer 	if ((vdds->dds_taskqp = ddi_taskq_create(vnetp->dip, qname, 1,
239678453a8Sspeer 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
240678453a8Sspeer 		cmn_err(CE_WARN, "!vnet%d: Unable to create DDS task queue",
241678453a8Sspeer 		    vnetp->instance);
242678453a8Sspeer 		return (ENOMEM);
243678453a8Sspeer 	}
244678453a8Sspeer 	mutex_init(&vdds->lock, NULL, MUTEX_DRIVER, NULL);
245678453a8Sspeer 	return (0);
246678453a8Sspeer }
247678453a8Sspeer 
248678453a8Sspeer /*
249678453a8Sspeer  * vdds_cleanup -- vnet instance related cleanup.
250678453a8Sspeer  */
251678453a8Sspeer void
vdds_cleanup(vnet_t * vnetp)252678453a8Sspeer vdds_cleanup(vnet_t *vnetp)
253678453a8Sspeer {
254678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
255678453a8Sspeer 
256678453a8Sspeer 	DBG1(vdds, "Cleanup...");
257678453a8Sspeer 	/* Cleanup/destroy any hybrid resouce that exists */
258678453a8Sspeer 	vdds_cleanup_hybrid_res(vnetp);
259678453a8Sspeer 
260678453a8Sspeer 	/* taskq_destroy will wait for all taskqs to complete */
261678453a8Sspeer 	ddi_taskq_destroy(vdds->dds_taskqp);
262678453a8Sspeer 	vdds->dds_taskqp = NULL;
263678453a8Sspeer 	mutex_destroy(&vdds->lock);
264678453a8Sspeer 	DBG1(vdds, "Cleanup complete");
265678453a8Sspeer }
266678453a8Sspeer 
267678453a8Sspeer /*
268678453a8Sspeer  * vdds_cleanup_hybrid_res -- Cleanup Hybrid resource.
269678453a8Sspeer  */
270678453a8Sspeer void
vdds_cleanup_hybrid_res(void * arg)271d0288fccSRaghuram Kothakota vdds_cleanup_hybrid_res(void *arg)
272678453a8Sspeer {
273d0288fccSRaghuram Kothakota 	vnet_t *vnetp = arg;
274678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
275678453a8Sspeer 
276678453a8Sspeer 	DBG1(vdds, "Hybrid device cleanup...");
277678453a8Sspeer 	mutex_enter(&vdds->lock);
278678453a8Sspeer 	if (vdds->task_flags == VNET_DDS_TASK_ADD_SHARE) {
279678453a8Sspeer 		/*
280678453a8Sspeer 		 * Task for ADD_SHARE is pending, simply
281678453a8Sspeer 		 * cleanup the flags, the task will quit without
282678453a8Sspeer 		 * any changes.
283678453a8Sspeer 		 */
284678453a8Sspeer 		vdds->task_flags = 0;
285678453a8Sspeer 		DBG2(vdds, "Task for ADD is pending, clean flags only");
286678453a8Sspeer 	} else if ((vdds->hio_dip != NULL) && (vdds->task_flags == 0)) {
287678453a8Sspeer 		/*
288678453a8Sspeer 		 * There is no task pending and a hybrid device
289678453a8Sspeer 		 * is present, so dispatch a task to release the share.
290678453a8Sspeer 		 */
291678453a8Sspeer 		vdds->task_flags = VNET_DDS_TASK_REL_SHARE;
292678453a8Sspeer 		(void) ddi_taskq_dispatch(vdds->dds_taskqp,
293678453a8Sspeer 		    vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP);
294678453a8Sspeer 		DBG2(vdds, "Dispatched a task to destroy HIO device");
295678453a8Sspeer 	}
296678453a8Sspeer 	/*
297678453a8Sspeer 	 * Other possible cases include either DEL_SHARE or
298678453a8Sspeer 	 * REL_SHARE as pending. In that case, there is nothing
299678453a8Sspeer 	 * to do as a task is already pending to do the cleanup.
300678453a8Sspeer 	 */
301678453a8Sspeer 	mutex_exit(&vdds->lock);
302678453a8Sspeer 	DBG1(vdds, "Hybrid device cleanup complete");
303678453a8Sspeer }
304678453a8Sspeer 
3056d6de4eeSWENTAO YANG /*
3066d6de4eeSWENTAO YANG  * vdds_cleanup_hio -- An interface to cleanup the hio resources before
3076d6de4eeSWENTAO YANG  *	resetting the vswitch port.
3086d6de4eeSWENTAO YANG  */
3096d6de4eeSWENTAO YANG void
vdds_cleanup_hio(vnet_t * vnetp)3106d6de4eeSWENTAO YANG vdds_cleanup_hio(vnet_t *vnetp)
3116d6de4eeSWENTAO YANG {
3126d6de4eeSWENTAO YANG 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
3136d6de4eeSWENTAO YANG 
3146d6de4eeSWENTAO YANG 	/* Wait for any pending vdds tasks to complete */
3156d6de4eeSWENTAO YANG 	ddi_taskq_wait(vdds->dds_taskqp);
3166d6de4eeSWENTAO YANG 	vdds_cleanup_hybrid_res(vnetp);
3176d6de4eeSWENTAO YANG 	/* Wait for the cleanup task to complete */
3186d6de4eeSWENTAO YANG 	ddi_taskq_wait(vdds->dds_taskqp);
3196d6de4eeSWENTAO YANG }
3206d6de4eeSWENTAO YANG 
321678453a8Sspeer /*
322678453a8Sspeer  * vdds_process_dds_msg -- Process a DDS message.
323678453a8Sspeer  */
324678453a8Sspeer void
vdds_process_dds_msg(vnet_t * vnetp,vio_dds_msg_t * dmsg)325678453a8Sspeer vdds_process_dds_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg)
326678453a8Sspeer {
327678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
328678453a8Sspeer 	int rv;
329678453a8Sspeer 
330678453a8Sspeer 	DBG1(vdds, "DDS message received...");
331678453a8Sspeer 
332678453a8Sspeer 	if (dmsg->dds_class != DDS_VNET_NIU) {
333678453a8Sspeer 		DBG2(vdds, "Invalid class send NACK");
334678453a8Sspeer 		(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
335678453a8Sspeer 		return;
336678453a8Sspeer 	}
337678453a8Sspeer 	mutex_enter(&vdds->lock);
338678453a8Sspeer 	switch (dmsg->dds_subclass) {
339678453a8Sspeer 	case DDS_VNET_ADD_SHARE:
340678453a8Sspeer 		DBG2(vdds, "DDS_VNET_ADD_SHARE message...");
341678453a8Sspeer 		if ((vdds->task_flags != 0) || (vdds->hio_dip != NULL)) {
342678453a8Sspeer 			/*
343678453a8Sspeer 			 * Either a task is already pending or
344678453a8Sspeer 			 * a hybrid device already exists.
345678453a8Sspeer 			 */
346678453a8Sspeer 			DWARN(vdds, "NACK: Already pending DDS task");
347678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
348678453a8Sspeer 			mutex_exit(&vdds->lock);
349678453a8Sspeer 			return;
350678453a8Sspeer 		}
351678453a8Sspeer 		vdds->task_flags = VNET_DDS_TASK_ADD_SHARE;
352678453a8Sspeer 		bcopy(dmsg, &vnetp->vdds_info.dmsg, sizeof (vio_dds_msg_t));
353678453a8Sspeer 		DBG2(vdds, "Dispatching task for ADD_SHARE");
354678453a8Sspeer 		rv = ddi_taskq_dispatch(vdds->dds_taskqp,
355678453a8Sspeer 		    vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP);
356678453a8Sspeer 		if (rv != 0) {
357678453a8Sspeer 			/* Send NACK */
358678453a8Sspeer 			DBG2(vdds, "NACK: Failed to dispatch task");
359678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
360678453a8Sspeer 			vdds->task_flags = 0;
361678453a8Sspeer 		}
362678453a8Sspeer 		break;
363678453a8Sspeer 
364678453a8Sspeer 	case DDS_VNET_DEL_SHARE:
365678453a8Sspeer 		DBG2(vdds, "DDS_VNET_DEL_SHARE message...");
366678453a8Sspeer 		if (vdds->task_flags == VNET_DDS_TASK_ADD_SHARE) {
367678453a8Sspeer 			/*
368678453a8Sspeer 			 * ADD_SHARE task still pending, simply clear
369678453a8Sspeer 			 * task falgs and ACK.
370678453a8Sspeer 			 */
371678453a8Sspeer 			DBG2(vdds, "ACK:ADD_SHARE task still pending");
372678453a8Sspeer 			vdds->task_flags = 0;
373678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE);
374678453a8Sspeer 			mutex_exit(&vdds->lock);
375678453a8Sspeer 			return;
376678453a8Sspeer 		}
377678453a8Sspeer 		if ((vdds->task_flags == 0) && (vdds->hio_dip == NULL)) {
378678453a8Sspeer 			/* Send NACK */
379678453a8Sspeer 			DBG2(vdds, "NACK:No HIO device exists");
380678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
381678453a8Sspeer 			mutex_exit(&vdds->lock);
382678453a8Sspeer 			return;
383678453a8Sspeer 		}
384678453a8Sspeer 		vdds->task_flags = VNET_DDS_TASK_DEL_SHARE;
385678453a8Sspeer 		bcopy(dmsg, &vdds->dmsg, sizeof (vio_dds_msg_t));
386678453a8Sspeer 		DBG2(vdds, "Dispatching DEL_SHARE task");
387678453a8Sspeer 		rv = ddi_taskq_dispatch(vdds->dds_taskqp,
388678453a8Sspeer 		    vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP);
389678453a8Sspeer 		if (rv != 0) {
390678453a8Sspeer 			/* Send NACK */
391678453a8Sspeer 			DBG2(vdds, "NACK: failed to dispatch task");
392678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
393678453a8Sspeer 			vdds->task_flags = 0;
394678453a8Sspeer 		}
395678453a8Sspeer 		break;
396678453a8Sspeer 	case DDS_VNET_REL_SHARE:
397678453a8Sspeer 		DBG2(vdds, "Reply for REL_SHARE reply=%d",
398678453a8Sspeer 		    dmsg->tag.vio_subtype);
399678453a8Sspeer 		break;
400678453a8Sspeer 	default:
401678453a8Sspeer 		DWARN(vdds, "Discarding Unknown DDS message");
402678453a8Sspeer 		break;
403678453a8Sspeer 	}
404678453a8Sspeer 	mutex_exit(&vdds->lock);
405678453a8Sspeer }
406678453a8Sspeer 
407678453a8Sspeer /*
408678453a8Sspeer  * vdds_process_dds_msg_task -- Called from a taskq to process the
409678453a8Sspeer  *	DDS message.
410678453a8Sspeer  */
411678453a8Sspeer static void
vdds_process_dds_msg_task(void * arg)412678453a8Sspeer vdds_process_dds_msg_task(void *arg)
413678453a8Sspeer {
414678453a8Sspeer 	vnet_t		*vnetp = arg;
415678453a8Sspeer 	vnet_dds_info_t	*vdds = &vnetp->vdds_info;
416678453a8Sspeer 	vio_dds_msg_t	*dmsg = &vdds->dmsg;
417678453a8Sspeer 	dev_info_t	*dip;
4187b1f684aSSriharsha Basavapatna 	uint32_t	max_frame_size;
419678453a8Sspeer 	uint64_t	hio_cookie;
420678453a8Sspeer 	int		rv;
421678453a8Sspeer 
422678453a8Sspeer 	DBG1(vdds, "DDS task started...");
423678453a8Sspeer 	mutex_enter(&vdds->lock);
424678453a8Sspeer 	switch (vdds->task_flags) {
425678453a8Sspeer 	case VNET_DDS_TASK_ADD_SHARE:
426678453a8Sspeer 		DBG2(vdds, "ADD_SHARE task...");
427678453a8Sspeer 		hio_cookie = dmsg->msg.share_msg.cookie;
4287b1f684aSSriharsha Basavapatna 		/*
4297b1f684aSSriharsha Basavapatna 		 * max-frame-size value need to be set to
4307b1f684aSSriharsha Basavapatna 		 * the full ethernet frame size. That is,
4317b1f684aSSriharsha Basavapatna 		 * header + payload + checksum.
4327b1f684aSSriharsha Basavapatna 		 */
4337b1f684aSSriharsha Basavapatna 		max_frame_size = vnetp->mtu +
4347b1f684aSSriharsha Basavapatna 		    sizeof (struct  ether_vlan_header) + ETHERFCSL;
435678453a8Sspeer 		dip = vdds_create_niu_node(hio_cookie,
4367b1f684aSSriharsha Basavapatna 		    dmsg->msg.share_msg.macaddr, max_frame_size);
437678453a8Sspeer 		if (dip == NULL) {
438678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
439678453a8Sspeer 			DERR(vdds, "Failed to create HIO node");
440678453a8Sspeer 		} else {
441678453a8Sspeer 			vdds->hio_dip = dip;
442678453a8Sspeer 			vdds->hio_cookie = hio_cookie;
4438a81408bSSriharsha Basavapatna 			(void) snprintf(vdds->hio_ifname,
4448a81408bSSriharsha Basavapatna 			    sizeof (vdds->hio_ifname), "%s%d",
4458a81408bSSriharsha Basavapatna 			    ddi_driver_name(dip), ddi_get_instance(dip));
44663f531d1SSriharsha Basavapatna 
44763f531d1SSriharsha Basavapatna 			rv = vnet_hio_mac_init(vnetp, vdds->hio_ifname);
44863f531d1SSriharsha Basavapatna 			if (rv != 0) {
44963f531d1SSriharsha Basavapatna 				/* failed - cleanup, send failed DDS message */
45063f531d1SSriharsha Basavapatna 				DERR(vdds, "HIO mac init failed, cleaning up");
45163f531d1SSriharsha Basavapatna 				rv = vdds_destroy_niu_node(dip, hio_cookie);
45263f531d1SSriharsha Basavapatna 				if (rv == 0) {
45363f531d1SSriharsha Basavapatna 					/* use DERR to print by default */
45463f531d1SSriharsha Basavapatna 					DERR(vdds, "Successfully destroyed"
45563f531d1SSriharsha Basavapatna 					    " Hybrid node");
45663f531d1SSriharsha Basavapatna 				} else {
45763f531d1SSriharsha Basavapatna 					cmn_err(CE_WARN, "vnet%d:Failed to "
45863f531d1SSriharsha Basavapatna 					    "destroy Hybrid node",
45963f531d1SSriharsha Basavapatna 					    vnetp->instance);
46063f531d1SSriharsha Basavapatna 				}
46163f531d1SSriharsha Basavapatna 				vdds->hio_dip = NULL;
46263f531d1SSriharsha Basavapatna 				vdds->hio_cookie = 0;
46363f531d1SSriharsha Basavapatna 				(void) vdds_send_dds_resp_msg(vnetp,
46463f531d1SSriharsha Basavapatna 				    dmsg, B_FALSE);
46563f531d1SSriharsha Basavapatna 			} else {
46663f531d1SSriharsha Basavapatna 				(void) vdds_send_dds_resp_msg(vnetp,
46763f531d1SSriharsha Basavapatna 				    dmsg, B_TRUE);
46863f531d1SSriharsha Basavapatna 			}
469678453a8Sspeer 			/* DERR used only print by default */
470678453a8Sspeer 			DERR(vdds, "Successfully created HIO node");
471678453a8Sspeer 		}
472678453a8Sspeer 		break;
473678453a8Sspeer 
474678453a8Sspeer 	case VNET_DDS_TASK_DEL_SHARE:
475678453a8Sspeer 		DBG2(vdds, "DEL_SHARE task...");
476678453a8Sspeer 		if (vnetp->vdds_info.hio_dip == NULL) {
477678453a8Sspeer 			DBG2(vdds, "NACK: No HIO device destroy");
478678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE);
479678453a8Sspeer 		} else {
48063f531d1SSriharsha Basavapatna 			vnet_hio_mac_cleanup(vnetp);
481678453a8Sspeer 			rv = vdds_destroy_niu_node(vnetp->vdds_info.hio_dip,
482678453a8Sspeer 			    vdds->hio_cookie);
483678453a8Sspeer 			if (rv == 0) {
484678453a8Sspeer 				/* use DERR to print by default */
485678453a8Sspeer 				DERR(vdds, "Successfully destroyed"
486678453a8Sspeer 				    " Hybrid node");
487678453a8Sspeer 			} else {
488678453a8Sspeer 				cmn_err(CE_WARN, "vnet%d:Failed to "
489678453a8Sspeer 				    "destroy Hybrid node", vnetp->instance);
490678453a8Sspeer 			}
491678453a8Sspeer 			/* TODO: send ACK even for failure? */
492678453a8Sspeer 			DBG2(vdds, "ACK: HIO device destroyed");
493678453a8Sspeer 			(void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE);
494678453a8Sspeer 			vdds->hio_dip = 0;
495678453a8Sspeer 			vdds->hio_cookie = 0;
496678453a8Sspeer 		}
497678453a8Sspeer 		break;
498678453a8Sspeer 	case VNET_DDS_TASK_REL_SHARE:
499678453a8Sspeer 		DBG2(vdds, "REL_SHARE task...");
500678453a8Sspeer 		if (vnetp->vdds_info.hio_dip != NULL) {
50163f531d1SSriharsha Basavapatna 			vnet_hio_mac_cleanup(vnetp);
502678453a8Sspeer 			rv = vdds_destroy_niu_node(vnetp->vdds_info.hio_dip,
503678453a8Sspeer 			    vdds->hio_cookie);
504678453a8Sspeer 			if (rv == 0) {
505678453a8Sspeer 				DERR(vdds, "Successfully destroyed "
506678453a8Sspeer 				    "Hybrid node");
507678453a8Sspeer 			} else {
508678453a8Sspeer 				cmn_err(CE_WARN, "vnet%d:Failed to "
509678453a8Sspeer 				    "destroy HIO node", vnetp->instance);
510678453a8Sspeer 			}
511678453a8Sspeer 			/* TODO: failure case */
512678453a8Sspeer 			(void) vdds_send_dds_rel_msg(vnetp);
513678453a8Sspeer 			vdds->hio_dip = 0;
514678453a8Sspeer 			vdds->hio_cookie = 0;
515678453a8Sspeer 		}
516678453a8Sspeer 		break;
517678453a8Sspeer 	default:
518678453a8Sspeer 		break;
519678453a8Sspeer 	}
520678453a8Sspeer 	vdds->task_flags = 0;
521678453a8Sspeer 	mutex_exit(&vdds->lock);
522678453a8Sspeer }
523678453a8Sspeer 
524678453a8Sspeer /*
525678453a8Sspeer  * vdds_send_dds_rel_msg -- Send a DDS_REL_SHARE message.
526678453a8Sspeer  */
527678453a8Sspeer static int
vdds_send_dds_rel_msg(vnet_t * vnetp)528678453a8Sspeer vdds_send_dds_rel_msg(vnet_t *vnetp)
529678453a8Sspeer {
530678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
531678453a8Sspeer 	vio_dds_msg_t	vmsg;
532678453a8Sspeer 	dds_share_msg_t	*smsg = &vmsg.msg.share_msg;
533678453a8Sspeer 	int rv;
534678453a8Sspeer 
535678453a8Sspeer 	DBG1(vdds, "Sending DDS_VNET_REL_SHARE message");
536678453a8Sspeer 	vmsg.tag.vio_msgtype = VIO_TYPE_CTRL;
537678453a8Sspeer 	vmsg.tag.vio_subtype = VIO_SUBTYPE_INFO;
538678453a8Sspeer 	vmsg.tag.vio_subtype_env = VIO_DDS_INFO;
539678453a8Sspeer 	/* vio_sid filled by the LDC module */
540678453a8Sspeer 	vmsg.dds_class = DDS_VNET_NIU;
541678453a8Sspeer 	vmsg.dds_subclass = DDS_VNET_REL_SHARE;
542678453a8Sspeer 	vmsg.dds_req_id = (++vdds->dds_req_id);
543678453a8Sspeer 	smsg->macaddr = vnet_macaddr_strtoul(vnetp->curr_macaddr);
544678453a8Sspeer 	smsg->cookie = vdds->hio_cookie;
545678453a8Sspeer 	rv = vnet_send_dds_msg(vnetp, &vmsg);
546678453a8Sspeer 	return (rv);
547678453a8Sspeer }
548678453a8Sspeer 
549678453a8Sspeer /*
550678453a8Sspeer  * vdds_send_dds_resp_msg -- Send a DDS response message.
551678453a8Sspeer  */
552678453a8Sspeer static int
vdds_send_dds_resp_msg(vnet_t * vnetp,vio_dds_msg_t * dmsg,int ack)553678453a8Sspeer vdds_send_dds_resp_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg, int ack)
554678453a8Sspeer {
555678453a8Sspeer 	vnet_dds_info_t *vdds = &vnetp->vdds_info;
556678453a8Sspeer 	int rv;
557678453a8Sspeer 
558678453a8Sspeer 	DBG1(vdds, "Sending a response mesage=%d", ack);
559678453a8Sspeer 	if (ack == B_TRUE) {
560678453a8Sspeer 		dmsg->tag.vio_subtype = VIO_SUBTYPE_ACK;
561678453a8Sspeer 		dmsg->msg.share_resp_msg.status = DDS_VNET_SUCCESS;
562678453a8Sspeer 	} else {
563678453a8Sspeer 		dmsg->tag.vio_subtype = VIO_SUBTYPE_NACK;
564678453a8Sspeer 		dmsg->msg.share_resp_msg.status = DDS_VNET_FAIL;
565678453a8Sspeer 	}
566678453a8Sspeer 	rv = vnet_send_dds_msg(vnetp, dmsg);
567678453a8Sspeer 	return (rv);
568678453a8Sspeer }
569678453a8Sspeer 
570678453a8Sspeer /*
571678453a8Sspeer  * vdds_create_niu_node -- Create NIU Hybrid node. The NIU nexus
572678453a8Sspeer  *	node also created if it doesn't exist already.
573678453a8Sspeer  */
574678453a8Sspeer dev_info_t *
vdds_create_niu_node(uint64_t cookie,uint64_t macaddr,uint32_t max_frame_size)5757b1f684aSSriharsha Basavapatna vdds_create_niu_node(uint64_t cookie, uint64_t macaddr, uint32_t max_frame_size)
576678453a8Sspeer {
577678453a8Sspeer 	dev_info_t *nexus_dip;
578678453a8Sspeer 	dev_info_t *niu_dip;
5797b1f684aSSriharsha Basavapatna 	vdds_cb_arg_t cba;
580678453a8Sspeer 
581678453a8Sspeer 	DBG1(NULL, "Called");
582678453a8Sspeer 
583678453a8Sspeer 	if (vdds_hv_hio_capable == B_FALSE) {
584678453a8Sspeer 		return (NULL);
585678453a8Sspeer 	}
586d0288fccSRaghuram Kothakota 	mutex_enter(&vdds_dev_lock);
587678453a8Sspeer 	/* Check if the nexus node exists already */
588678453a8Sspeer 	nexus_dip = vdds_find_node(cookie, ddi_root_node(),
589678453a8Sspeer 	    vdds_match_niu_nexus);
590678453a8Sspeer 	if (nexus_dip == NULL) {
591678453a8Sspeer 		/*
592678453a8Sspeer 		 * NIU nexus node not found, so create it now.
593678453a8Sspeer 		 */
5947b1f684aSSriharsha Basavapatna 		cba.dip = NULL;
5957b1f684aSSriharsha Basavapatna 		cba.cookie = cookie;
5967b1f684aSSriharsha Basavapatna 		cba.macaddr = macaddr;
5977b1f684aSSriharsha Basavapatna 		cba.max_frame_size = max_frame_size;
5987b1f684aSSriharsha Basavapatna 		nexus_dip = vdds_create_new_node(&cba, NULL,
599678453a8Sspeer 		    vdds_new_nexus_node);
600678453a8Sspeer 		if (nexus_dip == NULL) {
601d0288fccSRaghuram Kothakota 			mutex_exit(&vdds_dev_lock);
602678453a8Sspeer 			return (NULL);
603678453a8Sspeer 		}
604678453a8Sspeer 	}
605678453a8Sspeer 	DBG2(NULL, "nexus_dip = 0x%p", nexus_dip);
606678453a8Sspeer 
607678453a8Sspeer 	/* Check if NIU node exists already before creating one */
608678453a8Sspeer 	niu_dip = vdds_find_node(cookie, nexus_dip,
609678453a8Sspeer 	    vdds_match_niu_node);
610678453a8Sspeer 	if (niu_dip == NULL) {
6117b1f684aSSriharsha Basavapatna 		cba.dip = NULL;
6127b1f684aSSriharsha Basavapatna 		cba.cookie = cookie;
6137b1f684aSSriharsha Basavapatna 		cba.macaddr = macaddr;
6147b1f684aSSriharsha Basavapatna 		cba.max_frame_size = max_frame_size;
6157b1f684aSSriharsha Basavapatna 		niu_dip = vdds_create_new_node(&cba, nexus_dip,
616678453a8Sspeer 		    vdds_new_niu_node);
617678453a8Sspeer 		/*
618d0288fccSRaghuram Kothakota 		 * Hold the niu_dip to prevent it from
619d0288fccSRaghuram Kothakota 		 * detaching.
620678453a8Sspeer 		 */
621d0288fccSRaghuram Kothakota 		if (niu_dip != NULL) {
622d0288fccSRaghuram Kothakota 			e_ddi_hold_devi(niu_dip);
623d0288fccSRaghuram Kothakota 		} else {
624d0288fccSRaghuram Kothakota 			DWARN(NULL, "niumx/network node creation failed");
625d0288fccSRaghuram Kothakota 		}
626d0288fccSRaghuram Kothakota 	} else {
627d0288fccSRaghuram Kothakota 		DWARN(NULL, "niumx/network node already exists(dip=0x%p)",
628d0288fccSRaghuram Kothakota 		    niu_dip);
629678453a8Sspeer 	}
630d0288fccSRaghuram Kothakota 	/* release the hold that was done in find/create */
631d0288fccSRaghuram Kothakota 	if ((niu_dip != NULL) && (e_ddi_branch_held(niu_dip)))
632d0288fccSRaghuram Kothakota 		e_ddi_branch_rele(niu_dip);
633d0288fccSRaghuram Kothakota 	if (e_ddi_branch_held(nexus_dip))
634d0288fccSRaghuram Kothakota 		e_ddi_branch_rele(nexus_dip);
635d0288fccSRaghuram Kothakota 	mutex_exit(&vdds_dev_lock);
636d0288fccSRaghuram Kothakota 	DBG1(NULL, "returning niu_dip=0x%p", niu_dip);
637678453a8Sspeer 	return (niu_dip);
638678453a8Sspeer }
639678453a8Sspeer 
640678453a8Sspeer /*
641678453a8Sspeer  * vdds_destroy_niu_node -- Destroy the NIU node.
642678453a8Sspeer  */
643678453a8Sspeer int
vdds_destroy_niu_node(dev_info_t * niu_dip,uint64_t cookie)644678453a8Sspeer vdds_destroy_niu_node(dev_info_t *niu_dip, uint64_t cookie)
645678453a8Sspeer {
646678453a8Sspeer 	int rv;
647678453a8Sspeer 	dev_info_t *fdip = NULL;
648678453a8Sspeer 	dev_info_t *nexus_dip = ddi_get_parent(niu_dip);
649678453a8Sspeer 
650678453a8Sspeer 
651678453a8Sspeer 	DBG1(NULL, "Called");
652d0288fccSRaghuram Kothakota 	ASSERT(nexus_dip != NULL);
653d0288fccSRaghuram Kothakota 	mutex_enter(&vdds_dev_lock);
654d0288fccSRaghuram Kothakota 
655d0288fccSRaghuram Kothakota 	if (!e_ddi_branch_held(niu_dip))
656d0288fccSRaghuram Kothakota 		e_ddi_branch_hold(niu_dip);
657d0288fccSRaghuram Kothakota 	/*
658d0288fccSRaghuram Kothakota 	 * As we are destroying now, release the
659d0288fccSRaghuram Kothakota 	 * hold that was done in during the creation.
660d0288fccSRaghuram Kothakota 	 */
661d0288fccSRaghuram Kothakota 	ddi_release_devi(niu_dip);
662678453a8Sspeer 	rv = e_ddi_branch_destroy(niu_dip, &fdip, 0);
663678453a8Sspeer 	if (rv != 0) {
664678453a8Sspeer 		DERR(NULL, "Failed to destroy niumx/network node dip=0x%p",
665678453a8Sspeer 		    niu_dip);
666678453a8Sspeer 		if (fdip != NULL) {
667d0288fccSRaghuram Kothakota 			ddi_release_devi(fdip);
668678453a8Sspeer 		}
669d0288fccSRaghuram Kothakota 		rv = EBUSY;
670d0288fccSRaghuram Kothakota 		goto dest_exit;
671678453a8Sspeer 	}
672678453a8Sspeer 	/*
673678453a8Sspeer 	 * Cleanup the parent's ranges property set
674678453a8Sspeer 	 * for this Hybrid device.
675678453a8Sspeer 	 */
676678453a8Sspeer 	vdds_release_range_prop(nexus_dip, cookie);
677d0288fccSRaghuram Kothakota 
678d0288fccSRaghuram Kothakota dest_exit:
679d0288fccSRaghuram Kothakota 	mutex_exit(&vdds_dev_lock);
680d0288fccSRaghuram Kothakota 	DBG1(NULL, "returning rv=%d", rv);
681d0288fccSRaghuram Kothakota 	return (rv);
682678453a8Sspeer }
683678453a8Sspeer 
684678453a8Sspeer /*
685678453a8Sspeer  * vdds_match_niu_nexus -- callback function to verify a node is the
686678453a8Sspeer  *	NIU nexus node.
687678453a8Sspeer  */
688678453a8Sspeer static int
vdds_match_niu_nexus(dev_info_t * dip,void * arg)689678453a8Sspeer vdds_match_niu_nexus(dev_info_t *dip, void *arg)
690678453a8Sspeer {
691678453a8Sspeer 	vdds_cb_arg_t	*warg = (vdds_cb_arg_t *)arg;
692678453a8Sspeer 	vdds_reg_t	*reg_p;
693678453a8Sspeer&n