xref: /illumos-gate/usr/src/uts/common/io/ib/clients/of/sol_ofs/sol_cma.c (revision c39526b769298791ff5b0b6c5e761f49aabaeb4e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * sol_cma is a part of sol_ofs misc module. This file
28  * provides interfaces for supporting the communication
29  * management API defined in "rdma_cm.h". In-Kernel
30  * consumers of the "rdma_cm.h" API should link sol_ofs
31  * misc module using :
32  *	-N misc/sol_ofs
33  * Solaris uCMA (sol_ucma) driver is the current consumer for
34  * sol_cma.
35  */
36 
37 /* Standard driver includes */
38 #include <sys/types.h>
39 #include <sys/modctl.h>
40 #include <sys/errno.h>
41 #include <sys/stat.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/modctl.h>
45 
46 #include <sys/ib/clients/of/sol_ofs/sol_ofs_common.h>
47 #include <sys/ib/clients/of/ofed_kernel.h>
48 #include <sys/ib/clients/of/rdma/ib_addr.h>
49 #include <sys/ib/clients/of/rdma/rdma_cm.h>
50 
51 #include <sys/ib/clients/of/sol_ofs/sol_cma.h>
52 
53 /* Modload support */
54 static struct modlmisc sol_ofs_modmisc	= {
55 	&mod_miscops,
56 	"Solaris OFS Misc module"
57 };
58 
59 struct modlinkage sol_ofs_modlinkage = {
60 	MODREV_1,
61 	(void *)&sol_ofs_modmisc,
62 	NULL
63 };
64 
65 static void sol_ofs_ibt_async_hdlr(void *clnt, ibt_hca_hdl_t hdl,
66     ibt_async_code_t code, ibt_async_event_t *event);
67 
68 static ibt_clnt_modinfo_t sol_ofs_ibt_modinfo  = {
69 	IBTI_V_CURR,
70 	IBT_GENERIC_MISC,
71 	sol_ofs_ibt_async_hdlr,
72 	NULL,
73 	"sol_ofs"
74 };
75 
76 ibt_clnt_hdl_t		sol_ofs_ibt_hdl;
77 sol_cma_glbl_listen_t	sol_cma_glbl_listen;
78 avl_tree_t		sol_cma_glbl_listen_tree;
79 
80 static void		sol_cma_add_dev(struct ib_device *);
81 static void		sol_cma_rem_dev(struct ib_device *);
82 
83 static llist_head_t	sol_cma_dev_list = LLIST_HEAD_INIT(sol_cma_dev_list);
84 kmutex_t		sol_cma_dev_mutex;
85 kmutex_t		sol_cma_glob_mutex;
86 
87 char	*sol_rdmacm_dbg_str = "sol_rdmacm";
88 char	*sol_ofs_dbg_str = "sol_ofs_mod";
89 
90 /*
91  * Local functions defines.
92  */
93 int sol_cma_req_cmid_cmp(const void *p1, const void *p2);
94 int sol_cma_cmid_cmp(const void *p1, const void *p2);
95 int sol_cma_svc_cmp(const void *, const void *);
96 
97 static struct rdma_cm_id *cma_alloc_chan(rdma_cm_event_handler,
98     void *, enum rdma_port_space);
99 static void cma_set_chan_state(sol_cma_chan_t *, cma_chan_state_t);
100 static int cma_cas_chan_state(sol_cma_chan_t *, cma_chan_state_t,
101     cma_chan_state_t);
102 static void cma_free_listen_list(struct rdma_cm_id *);
103 static void cma_destroy_id(struct rdma_cm_id *);
104 static void cma_handle_nomore_events(sol_cma_chan_t *);
105 
106 extern void sol_ofs_dprintf_init();
107 extern void sol_ofs_dprintf_fini();
108 
109 static void ibcma_init_rdma_devs();
110 cma_chan_state_t cma_get_chan_state(sol_cma_chan_t *);
111 extern int ibcma_init_root_chan(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
112 extern int ibcma_fini_root_chan(sol_cma_chan_t *);
113 extern void ibcma_copy_srv_hdl(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
114 extern int ibcma_fini_ep_chan(sol_cma_chan_t *);
115 extern uint64_t ibcma_init_root_sid(sol_cma_chan_t *);
116 extern void rdma_ib_destroy_id(struct rdma_cm_id *);
117 extern int rdma_ib_bind_addr(struct rdma_cm_id *, struct sockaddr *);
118 extern int rdma_ib_resolve_addr(struct rdma_cm_id *, struct sockaddr *,
119     struct sockaddr *, int);
120 extern int rdma_ib_resolve_route(struct rdma_cm_id *, int);
121 extern int rdma_ib_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
122     int *);
123 extern int rdma_ib_connect(struct rdma_cm_id *, struct rdma_conn_param *);
124 extern int rdma_ib_listen(struct rdma_cm_id *, int);
125 extern int rdma_ib_accept(struct rdma_cm_id *, struct rdma_conn_param *);
126 extern int rdma_ib_reject(struct rdma_cm_id *, const void *, uint8_t);
127 extern int rdma_ib_disconnect(struct rdma_cm_id *);
128 extern int rdma_ib_join_multicast(struct rdma_cm_id *, struct sockaddr *,
129     void *);
130 extern void rdma_ib_leave_multicast(struct rdma_cm_id *, struct sockaddr *);
131 
132 int
133 _init(void)
134 {
135 	int		err;
136 	ibt_status_t	status;
137 
138 	sol_ofs_dprintf_init();
139 	SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init()");
140 
141 	mutex_init(&sol_cma_glob_mutex, NULL, MUTEX_DRIVER, NULL);
142 	mutex_init(&sol_cma_dev_mutex, NULL, MUTEX_DRIVER, NULL);
143 	avl_create(&sol_cma_glbl_listen_tree,
144 	    sol_cma_svc_cmp, sizeof (sol_cma_glbl_listen_t),
145 	    offsetof(sol_cma_glbl_listen_t, cma_listen_node));
146 
147 
148 	if ((status = ibt_attach(&sol_ofs_ibt_modinfo, NULL, NULL,
149 	    &sol_ofs_ibt_hdl)) != IBT_SUCCESS) {
150 		cmn_err(CE_WARN, "_init: ibt_attach failed");
151 		SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
152 		    "_init() ibt_attach() failed with status %d",
153 		    status);
154 		avl_destroy(&sol_cma_glbl_listen_tree);
155 		mutex_destroy(&sol_cma_dev_mutex);
156 		mutex_destroy(&sol_cma_glob_mutex);
157 		sol_ofs_dprintf_fini();
158 		return (ENODEV);
159 	}
160 
161 	if ((err = mod_install(&sol_ofs_modlinkage)) != 0) {
162 		SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "_init() failed");
163 		(void) ibt_detach(sol_ofs_ibt_hdl);
164 		avl_destroy(&sol_cma_glbl_listen_tree);
165 		mutex_destroy(&sol_cma_dev_mutex);
166 		mutex_destroy(&sol_cma_glob_mutex);
167 		sol_ofs_dprintf_fini();
168 		return (err);
169 	}
170 
171 	ibcma_init_rdma_devs();
172 
173 	SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init() - ret");
174 	return (err);
175 }
176 
177 int
178 _fini(void)
179 {
180 	int		err;
181 
182 	SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini()");
183 
184 	if (avl_numnodes(&sol_cma_glbl_listen_tree)) {
185 		SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "_fini - "
186 		    "listen CMIDs still active");
187 		return (EBUSY);
188 	}
189 	if ((err = mod_remove(&sol_ofs_modlinkage)) != 0) {
190 		SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
191 		    "_fini: mod_remove failed");
192 		return (err);
193 	}
194 	(void) ibt_detach(sol_ofs_ibt_hdl);
195 	avl_destroy(&sol_cma_glbl_listen_tree);
196 	mutex_destroy(&sol_cma_dev_mutex);
197 	mutex_destroy(&sol_cma_glob_mutex);
198 	SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini() - ret");
199 	sol_ofs_dprintf_fini();
200 	return (err);
201 }
202 
203 int
204 _info(struct modinfo *modinfop)
205 {
206 	return (mod_info(&sol_ofs_modlinkage, modinfop));
207 }
208 
209 typedef struct cma_device {
210 	kmutex_t		cma_mutex;
211 	llist_head_t		cma_list;
212 	genlist_t		cma_epchan_list;
213 	struct ib_device	*cma_device;
214 	uint_t			cma_ref_count;
215 	enum {
216 		SOL_CMA_DEV_ADDED,
217 		SOL_CMA_DEV_REM_IN_PROGRESS
218 	} cma_dev_state;
219 } cma_device_t;
220 
221 static void
222 sol_cma_add_dev(struct ib_device *dev)
223 {
224 	cma_device_t	*new_device;
225 
226 	new_device = kmem_zalloc(sizeof (cma_device_t), KM_NOSLEEP);
227 	if (!new_device) {
228 		SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_add_dev() "
229 		    "alloc failed!!");
230 		return;
231 	}
232 	mutex_init(&new_device->cma_mutex, NULL, MUTEX_DRIVER, NULL);
233 	llist_head_init(&new_device->cma_list, new_device);
234 	init_genlist(&new_device->cma_epchan_list);
235 	new_device->cma_device = dev;
236 
237 	dev->data = new_device;
238 
239 	mutex_enter(&sol_cma_dev_mutex);
240 	llist_add_tail(&new_device->cma_list, &sol_cma_dev_list);
241 	mutex_exit(&sol_cma_dev_mutex);
242 }
243 
244 static void
245 sol_cma_rem_dev(struct ib_device *dev)
246 {
247 	cma_device_t	*rem_device;
248 	genlist_entry_t	*entry;
249 
250 	rem_device = (cma_device_t *)dev->data;
251 	if (!rem_device) {
252 		SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_rem_dev() "
253 		    "NULL cma_dev!!");
254 		return;
255 	}
256 
257 	mutex_enter(&rem_device->cma_mutex);
258 	rem_device->cma_dev_state = SOL_CMA_DEV_REM_IN_PROGRESS;
259 	if (rem_device->cma_ref_count) {
260 		mutex_exit(&rem_device->cma_mutex);
261 		SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str, "sol_cma_rem_dev() "
262 		    "BUSY cma_dev!!");
263 		return;
264 	}
265 	entry = remove_genlist_head(&rem_device->cma_epchan_list);
266 	while (entry) {
267 		sol_cma_chan_t	*ep_chanp;
268 
269 		ep_chanp = (sol_cma_chan_t *)entry->data;
270 		if (ibcma_fini_ep_chan(ep_chanp) == 0) {
271 			genlist_entry_t	*entry1;
272 			sol_cma_chan_t	*root_chanp;
273 
274 			ASSERT(ep_chanp->chan_listenp);
275 			entry1 = ep_chanp->chan_listenp->listen_ep_root_entry;
276 			root_chanp = (sol_cma_chan_t *)ep_chanp->listen_root;
277 			root_chanp->chan_listenp->listen_eps--;
278 			delete_genlist(&root_chanp->chan_listenp->listen_list,
279 			    entry1);
280 
281 			kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
282 			kmem_free(entry, sizeof (genlist_entry_t));
283 		}
284 
285 		entry = remove_genlist_head(&rem_device->cma_epchan_list);
286 	}
287 	mutex_exit(&rem_device->cma_mutex);
288 
289 	mutex_enter(&sol_cma_dev_mutex);
290 	llist_del(&rem_device->cma_list);
291 	mutex_exit(&sol_cma_dev_mutex);
292 
293 	kmem_free(rem_device, sizeof (cma_device_t));
294 }
295 
296 struct ib_device *
297 sol_cma_acquire_device(ib_guid_t hca_guid)
298 {
299 	llist_head_t	*entry;
300 	cma_device_t	*cma_devp;
301 
302 	mutex_enter(&sol_cma_dev_mutex);
303 	list_for_each(entry, &sol_cma_dev_list) {
304 		cma_devp = (cma_device_t *)entry->ptr;
305 
306 		if (cma_devp->cma_device->node_guid != hca_guid)
307 			continue;
308 
309 		mutex_enter(&cma_devp->cma_mutex);
310 		if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS) {
311 			SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
312 			    "sol_cma_acquire_dev() - Device getting removed!!");
313 			mutex_exit(&cma_devp->cma_mutex);
314 			mutex_exit(&sol_cma_dev_mutex);
315 			return (NULL);
316 		}
317 		cma_devp->cma_ref_count++;
318 		mutex_exit(&cma_devp->cma_mutex);
319 		mutex_exit(&sol_cma_dev_mutex);
320 		return (cma_devp->cma_device);
321 
322 	}
323 	mutex_exit(&sol_cma_dev_mutex);
324 	return (NULL);
325 }
326 
327 static void
328 sol_cma_release_device(struct rdma_cm_id *id)
329 {
330 	ib_device_t	*device = id->device;
331 	llist_head_t	*entry;
332 	cma_device_t	*cma_devp;
333 
334 	mutex_enter(&sol_cma_dev_mutex);
335 	list_for_each(entry, &sol_cma_dev_list) {
336 		cma_devp = (cma_device_t *)entry->ptr;
337 
338 		if (cma_devp->cma_device != device)
339 			continue;
340 
341 		mutex_enter(&cma_devp->cma_mutex);
342 		cma_devp->cma_ref_count--;
343 		if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS &&
344 		    cma_devp->cma_ref_count == 0) {
345 			SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
346 			    "sol_cma_release_dev() - Device free removed!!");
347 			mutex_exit(&cma_devp->cma_mutex);
348 			llist_del(&cma_devp->cma_list);
349 			kmem_free(cma_devp, sizeof (cma_device_t));
350 			mutex_exit(&sol_cma_dev_mutex);
351 			return;
352 		}
353 		mutex_exit(&cma_devp->cma_mutex);
354 	}
355 	mutex_exit(&sol_cma_dev_mutex);
356 }
357 
358 void
359 sol_cma_add_hca_list(sol_cma_chan_t *ep_chanp, ib_guid_t hca_guid)
360 {
361 	llist_head_t	*entry;
362 	cma_device_t	*cma_devp;
363 
364 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx)",
365 	    ep_chanp, hca_guid);
366 	mutex_enter(&sol_cma_dev_mutex);
367 	list_for_each(entry, &sol_cma_dev_list) {
368 		cma_devp = (cma_device_t *)entry->ptr;
369 
370 		if ((cma_devp->cma_device)->node_guid != hca_guid)
371 			continue;
372 
373 		mutex_enter(&cma_devp->cma_mutex);
374 		ep_chanp->chan_listenp->listen_ep_dev_entry =
375 		    add_genlist(&cma_devp->cma_epchan_list,
376 		    (uintptr_t)ep_chanp, NULL);
377 		ep_chanp->chan_listenp->listen_ep_device = cma_devp->cma_device;
378 		mutex_exit(&cma_devp->cma_mutex);
379 		mutex_exit(&sol_cma_dev_mutex);
380 		return;
381 	}
382 	mutex_exit(&sol_cma_dev_mutex);
383 	SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx): "
384 	    "No matching HCA in list!!", ep_chanp, hca_guid);
385 }
386 
387 /*ARGSUSED*/
388 static void
389 sol_ofs_ibt_async_hdlr(void *clnt, ibt_hca_hdl_t hdl,
390     ibt_async_code_t code, ibt_async_event_t *event)
391 {
392 	struct ib_device *device;
393 	llist_head_t	*entry;
394 	cma_device_t	*cma_devp;
395 
396 	SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
397 	    "ibt_async_hdlr(%p, %p, %x, %p)",
398 	    clnt, hdl, code, event);
399 
400 	switch (code) {
401 	case IBT_HCA_ATTACH_EVENT:
402 		device = kmem_zalloc(sizeof (struct ib_device),
403 		    KM_SLEEP);
404 		device->node_guid = htonll(event->ev_hca_guid);
405 		sol_cma_add_dev(device);
406 		break;
407 	case IBT_HCA_DETACH_EVENT:
408 		mutex_enter(&sol_cma_dev_mutex);
409 		list_for_each(entry, &sol_cma_dev_list) {
410 			cma_devp = (cma_device_t *)entry->ptr;
411 
412 			if (cma_devp->cma_device->node_guid ==
413 			    htonll(event->ev_hca_guid)) {
414 				mutex_exit(&sol_cma_dev_mutex);
415 				sol_cma_rem_dev(cma_devp->cma_device);
416 				mutex_enter(&sol_cma_dev_mutex);
417 				break;
418 			}
419 		}
420 		mutex_exit(&sol_cma_dev_mutex);
421 
422 		break;
423 	}
424 }
425 
426 /*
427  * rdma_cm.h API functions.
428  */
429 struct rdma_cm_id *
430 rdma_create_id(rdma_cm_event_handler evt_hdlr, void *context,
431     enum rdma_port_space ps)
432 {
433 	struct rdma_cm_id 	*rdma_idp;
434 
435 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_create_id(%p, %p, %x)",
436 	    evt_hdlr, context, ps);
437 
438 	if (ps != RDMA_PS_TCP && ps != RDMA_PS_UDP && ps != RDMA_PS_IPOIB) {
439 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
440 		    "rdma_create_id: unsupported protocol %x", ps);
441 		return (NULL);
442 	}
443 
444 	rdma_idp = cma_alloc_chan(evt_hdlr, context, ps);
445 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
446 	    "rdma_create_id : ret %p", rdma_idp);
447 
448 	return (rdma_idp);
449 }
450 
451 void
452 rdma_map_id2clnthdl(struct rdma_cm_id *rdma_idp, void *ib_client_hdl,
453     void *iw_client_hdl)
454 {
455 	sol_cma_chan_t	*chanp = (sol_cma_chan_t *)rdma_idp;
456 
457 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
458 	    "rdma_map_id2clnthdl(%p, %p, %p)",
459 	    rdma_idp, ib_client_hdl, iw_client_hdl);
460 	ASSERT(ib_client_hdl != NULL || iw_client_hdl != NULL);
461 	chanp->chan_ib_client_hdl = ib_client_hdl;
462 	chanp->chan_iw_client_hdl = iw_client_hdl;
463 }
464 
465 void
466 rdma_map_id2qphdl(struct rdma_cm_id *rdma_idp, void *qp_hdl)
467 {
468 	sol_cma_chan_t	*chanp = (sol_cma_chan_t *)rdma_idp;
469 
470 	ASSERT(rdma_idp);
471 	ASSERT(qp_hdl);
472 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_mapid2qphdl(%p, %p)",
473 	    rdma_idp, qp_hdl);
474 	chanp->chan_qp_hdl = qp_hdl;
475 }
476 
477 void
478 rdma_destroy_id(struct rdma_cm_id *rdma_idp)
479 {
480 	sol_cma_chan_t		*chanp, *root_chanp;
481 	cma_chan_state_t	state;
482 	int			rc, is_root_cmid, do_wait, is_passive;
483 
484 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p)", rdma_idp);
485 
486 	if (!rdma_idp)
487 		return;
488 
489 	is_root_cmid = do_wait = is_passive = 0;
490 
491 	chanp = (sol_cma_chan_t *)rdma_idp;
492 	root_chanp = (sol_cma_chan_t *)chanp->listen_root;
493 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p), %p",
494 	    rdma_idp, root_chanp);
495 
496 	mutex_enter(&chanp->chan_mutex);
497 	chanp->chan_cmid_destroy_state = SOL_CMA_CALLER_CMID_DESTROYED;
498 	/* Wait if Event is been notified to consumer */
499 	while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_EVENT_PROGRESS)
500 		cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
501 
502 	if (rdma_idp->device)
503 		sol_cma_release_device(rdma_idp);
504 
505 	if (chanp->chan_listenp && chanp->chan_listenp->listen_is_root)
506 		is_root_cmid = 1;
507 	if (root_chanp == NULL && is_root_cmid == 0)
508 		is_passive = 1;
509 
510 	/*
511 	 * Skip Active side handling for passive CMIDs and listen CMID
512 	 * for which REQ CMIDs have not been created.
513 	 */
514 	if (is_passive || (is_root_cmid && chanp->chan_req_state !=
515 	    REQ_CMID_CREATED)) {
516 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
517 		    "Skipping passive %p, %x, %x", chanp->chan_listenp,
518 		    is_root_cmid, chanp->chan_req_state);
519 		goto skip_passive_handling;
520 	}
521 
522 	/*
523 	 * destroy_id() called for listening CMID and there are REQ
524 	 * CMIDs not yet notified. Reject such CMIDs and decrement
525 	 * the count.
526 	 */
527 	if (is_root_cmid && chanp->chan_req_cnt) {
528 		sol_cma_chan_t	*req_cmid_chan, *next_chan;
529 
530 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
531 		    "not notified handling");
532 		for (req_cmid_chan = (sol_cma_chan_t *)avl_first(
533 		    &chanp->chan_req_avl_tree); req_cmid_chan &&
534 		    chanp->chan_req_cnt; req_cmid_chan = next_chan) {
535 			next_chan = AVL_NEXT(
536 			    &chanp->chan_req_avl_tree, req_cmid_chan);
537 			if (req_cmid_chan->chan_req_state ==
538 			    REQ_CMID_CREATED) {
539 				avl_remove(&chanp->chan_req_avl_tree,
540 				    req_cmid_chan);
541 				chanp->chan_req_cnt--;
542 				chanp->chan_req_total_cnt--;
543 				mutex_exit(&chanp->chan_mutex);
544 				(void) rdma_disconnect(
545 				    (struct rdma_cm_id *)req_cmid_chan);
546 				mutex_enter(&chanp->chan_mutex);
547 				if (rdma_idp->ps == RDMA_PS_TCP) {
548 					mutex_enter(
549 					    &req_cmid_chan->chan_mutex);
550 					req_cmid_chan->listen_root =
551 					    rdma_idp;
552 					cma_set_chan_state(req_cmid_chan,
553 					    SOL_CMA_CHAN_DESTROY_PENDING);
554 					mutex_exit(
555 					    &req_cmid_chan->chan_mutex);
556 				} else {
557 					mutex_destroy(
558 					    &req_cmid_chan->chan_mutex);
559 					cv_destroy(
560 					    &req_cmid_chan->chan_destroy_cv);
561 					kmem_free(req_cmid_chan,
562 					    sizeof (sol_cma_chan_t));
563 				}
564 			}
565 		}
566 	}
567 
568 	/*
569 	 * destroy_id() called for :
570 	 * 	listening CMID and all REQ CMIDs destroy_id() called
571 	 *	REQ CMID and 1 more REQ CMID not yet destroyed.
572 	 * wait till the CMID is completly destroyed.
573 	 */
574 	if (is_root_cmid && chanp->chan_req_total_cnt == 0) {
575 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
576 		    "root idp waiting");
577 		cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_WAIT);
578 		cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
579 	}
580 
581 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
582 	    "root_idp %p, cnt %x, state %x", root_chanp,
583 	    root_chanp ? root_chanp->chan_req_total_cnt : 0,
584 	    root_chanp ? cma_get_chan_state(root_chanp) : 0);
585 
586 	if (root_chanp && root_chanp->chan_req_total_cnt == 1 &&
587 	    cma_get_chan_state(root_chanp) == SOL_CMA_CHAN_DESTROY_PENDING)
588 		do_wait = 1;
589 
590 skip_passive_handling :
591 	state = cma_get_chan_state(chanp);
592 	if (is_root_cmid == 0 && state != SOL_CMA_CHAN_DISCONNECT &&
593 	    SOL_CMA_DISCONNECT_OK(chanp)) {
594 		/*
595 		 * A connected CM ID has not been disconnected.
596 		 * Call rdma_disconnect() to disconnect it.
597 		 */
598 		mutex_exit(&chanp->chan_mutex);
599 		rc = rdma_disconnect(rdma_idp);
600 		if (rc) {
601 			SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
602 			    "rdma_destroy_id(%p)- disconnect failed!!",
603 			    rdma_idp);
604 			return;
605 		}
606 		mutex_enter(&chanp->chan_mutex);
607 		if (root_chanp && chanp->listen_root == NULL)
608 			chanp->listen_root = (struct rdma_cm_id *)root_chanp;
609 		mutex_exit(&chanp->chan_mutex);
610 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
611 		    "rdma_destroy_id(chanp %p, connect %x, ps %x)",
612 		    chanp, chanp->chan_connect_flag, rdma_idp->ps);
613 		if ((SOL_CMAID_IS_CONNECTED(chanp) &&
614 		    rdma_idp->ps == RDMA_PS_TCP) ||
615 		    (IS_UDP_CMID(rdma_idp) &&
616 		    chanp->chan_connect_flag == SOL_CMA_CONNECT_INITIATED)) {
617 			if (do_wait) {
618 				mutex_enter(&chanp->chan_mutex);
619 				cma_set_chan_state(chanp,
620 				    SOL_CMA_CHAN_DESTROY_WAIT);
621 				cv_wait(&chanp->chan_destroy_cv,
622 				    &chanp->chan_mutex);
623 				mutex_exit(&chanp->chan_mutex);
624 				cma_destroy_id(rdma_idp);
625 			} else {
626 				mutex_enter(&chanp->chan_mutex);
627 				cma_set_chan_state(chanp,
628 				    SOL_CMA_CHAN_DESTROY_PENDING);
629 				mutex_exit(&chanp->chan_mutex);
630 			}
631 		} else {
632 			/*
633 			 * Disconnected a CMID for which CONNECT has been
634 			 * Initiated but not complete.
635 			 * No more callbacks are expected for this CMID.
636 			 * Free this CMID.
637 			 */
638 			cma_destroy_id(rdma_idp);
639 		}
640 	} else if (is_root_cmid == 0 && state ==
641 	    SOL_CMA_CHAN_DISCONNECT && SOL_CMAID_IS_CONNECTED(chanp)) {
642 		/*
643 		 * CM ID was connected and disconnect is process.
644 		 * Free of this CM ID is done for the DISCONNECT
645 		 * notification for this CMID.
646 		 */
647 		cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
648 		mutex_exit(&chanp->chan_mutex);
649 	} else if (state != SOL_CMA_CHAN_DESTROY_PENDING) {
650 		/* CM ID, not connected, just free it. */
651 		mutex_exit(&chanp->chan_mutex);
652 		cma_destroy_id(rdma_idp);
653 	} else
654 		mutex_exit(&chanp->chan_mutex);
655 
656 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: ret");
657 }
658 
659 /*
660  * State transitions for Address resolution :
661  *	Active Side (Client) :
662  *	1. CREATE_ID-->BIND_ADDR-->RESOLVE_ADDR-->RESOLVE_ROUTE
663  *
664  *	Passive Side (Server) :
665  *	2. CREATE_ID-->RESOLVE_ADDR-->RESOLVE_ROUTE
666  *	IF_ADDR_ANY can be passed as local address in RESOLVE_ADDR
667  */
668 int
669 rdma_bind_addr(struct rdma_cm_id *idp, struct sockaddr *addr)
670 {
671 	sol_cma_chan_t		*chanp;
672 	struct rdma_addr	*addrp;
673 	int			ret;
674 
675 	ASSERT(idp);
676 	ASSERT(addr);
677 	chanp = (sol_cma_chan_t *)idp;
678 	addrp = &(idp->route.addr);
679 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_bind_addr(%p, %p)",
680 	    idp, addr);
681 
682 	mutex_enter(&chanp->chan_mutex);
683 	ret = cma_cas_chan_state(chanp, SOL_CMA_CHAN_IDLE, SOL_CMA_CHAN_BOUND);
684 	if (ret) {
685 		mutex_exit(&chanp->chan_mutex);
686 		return (ret);
687 	}
688 	/* Copy the local address to rdma_id structure */
689 	bcopy((void *)addr, (void *)&(addrp->src_addr),
690 	    sizeof (struct sockaddr));
691 	mutex_exit(&chanp->chan_mutex);
692 
693 	/*
694 	 * First call rdma_ib_bind_addr() to bind this address.
695 	 * Next call rdma_iw_bind_addr() to bind this address.
696 	 * For IF_ADDR_ANY, IB address is given priority over
697 	 * iWARP.
698 	 */
699 	if (chanp->chan_ib_client_hdl == NULL) {
700 		chanp->chan_ib_client_hdl = sol_ofs_ibt_hdl;
701 	}
702 	if (chanp->chan_ib_client_hdl && rdma_ib_bind_addr(idp, addr) == 0) {
703 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
704 		    "rdma_bind_addr: ret IB @");
705 		return (0);
706 #ifdef	IWARP_SUPPORT
707 	} else if (chanp->chan_iw_client_hdl && rdma_iw_bind_addr(idp, addr)
708 	    == 0) {
709 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
710 		    "rdma_bind_addr: ret iWARP @");
711 		return (0);
712 #endif	/* IWARP_SUPPORT */
713 	}
714 
715 	mutex_enter(&chanp->chan_mutex);
716 	cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
717 	mutex_exit(&chanp->chan_mutex);
718 	SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str, "rdma_bind_addr: ret failure!");
719 	return (EINVAL);
720 }
721 
722 int
723 rdma_resolve_addr(struct rdma_cm_id *idp, struct sockaddr *src_addr,
724     struct sockaddr *dst_addr, int timeout_ms)
725 {
726 	sol_cma_chan_t		*chanp;
727 	struct rdma_addr	*addrp;
728 	cma_chan_state_t	state;
729 	enum rdma_cm_event_type	event;
730 	int			rc = 0;
731 
732 	ASSERT(idp);
733 	chanp = (sol_cma_chan_t *)idp;
734 	addrp = &(idp->route.addr);
735 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr(%p, %p, "
736 	    "%p, %x)", idp, src_addr, dst_addr, timeout_ms);
737 
738 	mutex_enter(&chanp->chan_mutex);
739 	state = cma_get_chan_state(chanp);
740 	if (state != SOL_CMA_CHAN_IDLE && state != SOL_CMA_CHAN_BOUND) {
741 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
742 		    "rdma_resolve_addr : invalid chan state %x", state);
743 		rc = EINVAL;
744 		mutex_exit(&chanp->chan_mutex);
745 		goto resolve_exit;
746 	}
747 	if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
748 		bcopy((void *)src_addr, (void *)&(addrp->src_addr),
749 		    sizeof (struct sockaddr));
750 	}
751 	bcopy((void *)dst_addr, (void *)&(addrp->dst_addr),
752 	    sizeof (struct sockaddr));
753 	mutex_exit(&chanp->chan_mutex);
754 
755 	/*
756 	 * First resolve this as an @ corresponding to IB fabric
757 	 * if this fails, resolve this as an @ corresponding to iWARP
758 	 */
759 	if (chanp->chan_ib_client_hdl == NULL) {
760 		chanp->chan_ib_client_hdl = sol_ofs_ibt_hdl;
761 	}
762 	if (chanp->chan_ib_client_hdl && rdma_ib_resolve_addr(idp, src_addr,
763 	    dst_addr, timeout_ms) == 0) {
764 		SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
765 		    "rdma_resolve_addr: ret IB @");
766 		goto resolve_exit;
767 #ifdef IWARP_SUPPORT
768 	} else if (chanp->chan_iw_client_hdl && rdma_iw_resolve_addr(idp,
769 	    src_addr, dst_addr, timeout_ms) == 0) {
770 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
771 		    "rdma_resolve_addr: ret iWARP @");
772 		goto resolve_exit;
773 #endif	/* IWARP_SUPPORT */
774 	} else {
775 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
776 		    "rdma_resolve_addr: Invalid @");
777 		rc = EINVAL;
778 	}
779 
780 resolve_exit:
781 	if (rc == 0) {
782 		mutex_enter(&chanp->chan_mutex);
783 		cma_set_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD);
784 		mutex_exit(&chanp->chan_mutex);
785 		event = RDMA_CM_EVENT_ADDR_RESOLVED;
786 	} else
787 		event = RDMA_CM_EVENT_ADDR_ERROR;
788 
789 	/*
790 	 * Generate RDMA_CM_EVENT_ADDR_RESOLVED event
791 	 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
792 	 * userland.
793 	 */
794 	cma_generate_event(idp, event, 0, NULL, NULL);
795 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr: ret 0");
796 	return (0);
797 }
798 
799 int
800 rdma_resolve_route(struct rdma_cm_id *idp, int timeout_ms)
801 {
802 	sol_cma_chan_t		*chanp;
803 
804 	ASSERT(idp);
805 	chanp = (sol_cma_chan_t *)idp;
806 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route(%p, %x)", idp,
807 	    timeout_ms);
808 
809 	mutex_enter(&chanp->chan_mutex);
810 	if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD,
811 	    SOL_CMA_CHAN_ROUTE_RESLVD) != 0) {
812 		mutex_exit(&chanp->chan_mutex);
813 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
814 		    "resolve_route: Invalid state");
815 		return (EINVAL);
816 	}
817 	mutex_exit(&chanp->chan_mutex);
818 
819 	/*
820 	 * Generate RDMA_CM_EVENT_ROUTE_RESOLVED event
821 	 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
822 	 * userland
823 	 */
824 	cma_generate_event(idp, RDMA_CM_EVENT_ROUTE_RESOLVED, 0,
825 	    NULL, NULL);
826 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route: ret 0");
827 	return (0);
828 }
829 
830 /*
831  * Connect or Listen request should be send after Route is resolved
832  *
833  *	Active Side (Client) :
834  *	1. (State ROUTE_RESOLVED)-->CONNECT-->ACCEPT/REJECT-->DISCONNECT
835  *	       -->DESTROY_ID-->close(9E)
836  *	2. Same as (1), DESTROY_ID without DISCONNECT
837  *	3. Same as (1), close(9e) without DESTROY_ID.
838  *
839  *	Passive Side (Server) :
840  *	4. (State ROUTE_RESOLVED)-->LISTEN->DISCONNECT
841  *		-->DESTROY_ID-->close(9E)
842  *	5. Same as (4), DESTROY_ID without DISCONNECT
843  *	6. Same as (4), close(9e) without DESTROY_ID.
844  */
845 int
846 rdma_connect(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
847 {
848 	sol_cma_chan_t		*chanp;
849 	int			ret = EINVAL;
850 
851 	ASSERT(idp);
852 	chanp = (sol_cma_chan_t *)idp;
853 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect(%p, %p)", idp,
854 	    conn_param);
855 
856 	mutex_enter(&chanp->chan_mutex);
857 	if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
858 		mutex_exit(&chanp->chan_mutex);
859 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
860 		    "rdma_connect, Invalid Xport");
861 		return (EINVAL);
862 	}
863 	if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
864 	    SOL_CMA_CHAN_CONNECT)) {
865 		mutex_exit(&chanp->chan_mutex);
866 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
867 		    "rdma_connect, Invalid state");
868 		return (EINVAL);
869 	}
870 
871 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
872 		ret = rdma_ib_connect(idp, conn_param);
873 #ifdef	IWARP_SUPPORT
874 	} else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
875 		ret = rdma_iw_connect(idp, conn_param);
876 #endif	/* IWARP_SUPPORT */
877 	}
878 	mutex_exit(&chanp->chan_mutex);
879 
880 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect: ret %x", ret);
881 	return (ret);
882 }
883 
884 static int cma_init_listen_root(sol_cma_chan_t *);
885 static void cma_fini_listen_root(sol_cma_chan_t *);
886 
887 int
888 rdma_listen(struct rdma_cm_id *idp, int bklog)
889 {
890 	sol_cma_chan_t		*chanp;
891 	int			ret;
892 	genlist_entry_t		*entry;
893 	cma_chan_state_t	state;
894 
895 	ASSERT(idp);
896 	chanp = (sol_cma_chan_t *)idp;
897 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen(%p, %x)",
898 	    idp, bklog);
899 
900 	mutex_enter(&chanp->chan_mutex);
901 	state = cma_get_chan_state(chanp);
902 	if (state == SOL_CMA_CHAN_IDLE) {
903 		mutex_exit(&chanp->chan_mutex);
904 		return (EINVAL);
905 	}
906 	cma_set_chan_state(chanp, SOL_CMA_CHAN_LISTEN);
907 
908 	if (chanp->chan_listenp) {
909 		SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str, "rdma_listen: "
910 		    "NON NULL listen_list");
911 		goto listen_from_list;
912 	}
913 
914 	chanp->chan_listenp = kmem_zalloc(sizeof (sol_cma_listen_info_t),
915 	    KM_SLEEP);
916 	init_genlist(&(CHAN_LISTEN_LIST(chanp)));
917 	(chanp->chan_listenp)->listen_is_root = 1;
918 	ret = cma_init_listen_root(chanp);
919 	if (ret) {
920 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
921 		    "cma_init_listen_root: failed");
922 		kmem_free(chanp->chan_listenp,
923 		    sizeof (sol_cma_listen_info_t));
924 		chanp->chan_listenp = NULL;
925 		mutex_exit(&chanp->chan_mutex);
926 		return (EINVAL);
927 	}
928 
929 	if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
930 		ibcma_append_listen_list(idp);
931 #ifdef IWARP_SUPPORT
932 		iwcma_append_listen_list(idp);
933 #endif
934 	} else if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
935 		ibcma_append_listen_list(idp);
936 #ifdef	IWARP_SUPPORT
937 	} else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
938 		iwcma_append_listen_list(idp);
939 #endif	/* IWARP_SUPPORT */
940 	}
941 
942 	if (genlist_empty(&(CHAN_LISTEN_LIST(chanp)))) {
943 		cma_fini_listen_root(chanp);
944 		kmem_free((void *)chanp->chan_listenp,
945 		    sizeof (sol_cma_listen_info_t));
946 		chanp->chan_listenp = NULL;
947 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
948 		    "No listeners");
949 		mutex_exit(&chanp->chan_mutex);
950 		return (0);
951 	}
952 listen_from_list:
953 	genlist_for_each(entry, &(CHAN_LISTEN_LIST(chanp))) {
954 		struct rdma_cm_id	*ep_idp;
955 		sol_cma_chan_t		*ep_chanp;
956 
957 		ep_idp = (struct rdma_cm_id *)entry->data;
958 		ep_chanp = (sol_cma_chan_t *)ep_idp;
959 		if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IB)
960 			ret = rdma_ib_listen(ep_idp, bklog);
961 #ifdef IWARP_SUPPORT
962 		if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
963 			ret = rdma_iw_listen(ep_idp, bklog);
964 #endif
965 		if (ret)
966 			break;
967 	}
968 	mutex_exit(&chanp->chan_mutex);
969 
970 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen: ret %x", ret);
971 	return (ret);
972 }
973 
974 int
975 rdma_accept(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
976 {
977 	struct rdma_cm_id	*root_idp;
978 	sol_cma_chan_t		*root_chanp, *chanp;
979 	int			ret = EINVAL;
980 
981 	ASSERT(idp);
982 	chanp = (sol_cma_chan_t *)idp;
983 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept(%p, %p)",
984 	    idp, conn_param);
985 
986 	mutex_enter(&chanp->chan_mutex);
987 	if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
988 	    SOL_CMA_CHAN_ACCEPT) && cma_cas_chan_state(chanp,
989 	    SOL_CMA_CHAN_CONNECT, SOL_CMA_CHAN_ACCEPT)) {
990 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
991 		    "rdma_accept, Invalid state");
992 		mutex_exit(&chanp->chan_mutex);
993 		return (EINVAL);
994 	}
995 	mutex_exit(&chanp->chan_mutex);
996 
997 	root_idp = CHAN_LISTEN_ROOT(chanp);
998 	root_chanp = (sol_cma_chan_t *)root_idp;
999 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p",
1000 	    root_idp);
1001 
1002 	/* Delete from REQ_AVL_TREE on passive side */
1003 	if (root_idp) {
1004 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p"
1005 		    "REQ AVL remove %p", root_chanp, idp);
1006 		mutex_enter(&root_chanp->chan_mutex);
1007 		avl_remove(&root_chanp->chan_req_avl_tree, idp);
1008 		mutex_exit(&root_chanp->chan_mutex);
1009 
1010 		mutex_enter(&chanp->chan_mutex);
1011 		/* Update chan_req_state to ACCEPTED */
1012 		chanp->chan_req_state = REQ_CMID_ACCEPTED;
1013 		mutex_exit(&chanp->chan_mutex);
1014 	}
1015 
1016 	/* For TCP, insert into ACPT_AVL_TREE */
1017 	if (root_idp && idp->ps == RDMA_PS_TCP) {
1018 		void		*find_ret;
1019 		avl_index_t	where;
1020 
1021 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1022 		    "Add to ACPT AVL of %p IDP, idp %p, qp_hdl %p",
1023 		    root_idp, idp, chanp->chan_qp_hdl);
1024 		mutex_enter(&root_chanp->chan_mutex);
1025 		find_ret = avl_find(&root_chanp->chan_acpt_avl_tree,
1026 		    (void *)chanp->chan_qp_hdl, &where);
1027 		if (find_ret)
1028 			SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1029 			    "DUPLICATE ENTRY in ACPT AVL : root %p, "
1030 			    "idp %p, qp_hdl %p",
1031 			    root_idp, idp, chanp->chan_qp_hdl);
1032 		avl_insert(&root_chanp->chan_acpt_avl_tree,
1033 		    (void *)idp, where);
1034 		mutex_exit(&root_chanp->chan_mutex);
1035 	} else if (root_idp && IS_UDP_CMID(root_idp)) {
1036 		cma_chan_state_t	chan_state;
1037 
1038 		/*
1039 		 * Accepting the connect request, no more events for this
1040 		 * connection.
1041 		 */
1042 		mutex_enter(&chanp->chan_mutex);
1043 		cma_handle_nomore_events(chanp);
1044 		chan_state = cma_get_chan_state(chanp);
1045 		mutex_exit(&chanp->chan_mutex);
1046 		/* If rdma_destroy_id() was called, destroy CMID */
1047 		if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING)
1048 			cma_destroy_id((struct rdma_cm_id *)chanp);
1049 	}
1050 
1051 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1052 		ret = rdma_ib_accept(idp, conn_param);
1053 #ifdef	IWARP_SUPPORT
1054 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1055 		ret = rdma_iw_accept(idp, conn_param);
1056 #endif	/* IWARP_SUPPORT */
1057 
1058 	if (ret && root_idp && idp->ps == RDMA_PS_TCP) {
1059 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1060 		    "Delete from REQ AVL of %p IDP, idp %p",
1061 		    root_idp, idp);
1062 		mutex_enter(&root_chanp->chan_mutex);
1063 		avl_remove(&root_chanp->chan_acpt_avl_tree, idp);
1064 		mutex_exit(&root_chanp->chan_mutex);
1065 	}
1066 
1067 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept: ret %x", ret);
1068 	return (ret);
1069 }
1070 
1071 int
1072 rdma_notify(struct rdma_cm_id *idp, enum ib_event_type evt)
1073 {
1074 	sol_cma_chan_t		*chanp;
1075 
1076 	ASSERT(idp);
1077 	chanp = (sol_cma_chan_t *)idp;
1078 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify(%p, %x)", idp, evt);
1079 
1080 	mutex_enter(&chanp->chan_mutex);
1081 	if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
1082 	    SOL_CMA_CHAN_EVENT_NOTIFIED)) {
1083 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1084 		    "rdma_notify, Invalid state");
1085 		mutex_exit(&chanp->chan_mutex);
1086 		return (EINVAL);
1087 	}
1088 	mutex_exit(&chanp->chan_mutex);
1089 
1090 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify: ret 0");
1091 	return (0);
1092 }
1093 
1094 int
1095 rdma_reject(struct rdma_cm_id *idp, const void *priv_data,
1096     uint8_t priv_data_len)
1097 {
1098 	struct rdma_cm_id	*root_idp;
1099 	sol_cma_chan_t		*root_chanp, *chanp;
1100 	int			ret = EINVAL;
1101 
1102 	ASSERT(idp);
1103 	chanp = (sol_cma_chan_t *)idp;
1104 	root_idp = CHAN_LISTEN_ROOT(chanp);
1105 	root_chanp = (sol_cma_chan_t *)root_idp;
1106 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject(%p, %p)", idp,
1107 	    priv_data, priv_data_len);
1108 
1109 	mutex_enter(&chanp->chan_mutex);
1110 	if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1111 	    SOL_CMA_CHAN_REJECT)) {
1112 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1113 		    "rdma_accept, Invalid state");
1114 		mutex_exit(&chanp->chan_mutex);
1115 		return (EINVAL);
1116 	}
1117 	mutex_exit(&chanp->chan_mutex);
1118 
1119 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1120 		ret = rdma_ib_reject(idp, priv_data, priv_data_len);
1121 #ifdef	IWARP_SUPPORT
1122 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1123 		ret = rdma_iw_reject(idp, priv_data, priv_data_len);
1124 #endif	/* IWARP_SUPPORT */
1125 
1126 	mutex_enter(&chanp->chan_mutex);
1127 	if (!ret)
1128 		chanp->chan_connect_flag = SOL_CMA_CONNECT_NONE;
1129 	mutex_exit(&chanp->chan_mutex);
1130 
1131 	if (!ret && root_idp) {
1132 		cma_chan_state_t	chan_state;
1133 
1134 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "reject: root_idp %p"
1135 		    "REQ AVL remove %p", root_chanp, idp);
1136 		/* Remove from Req AVL tree */
1137 		mutex_enter(&root_chanp->chan_mutex);
1138 		avl_remove(&root_chanp->chan_req_avl_tree, idp);
1139 		mutex_exit(&root_chanp->chan_mutex);
1140 
1141 		/* Update chan_req_state to REJECTED */
1142 		mutex_enter(&chanp->chan_mutex);
1143 		chanp->chan_req_state = REQ_CMID_REJECTED;
1144 
1145 		/*
1146 		 * Rejecting connect request, no more events for this
1147 		 * connection.
1148 		 */
1149 		cma_handle_nomore_events(chanp);
1150 		chan_state = cma_get_chan_state(chanp);
1151 		mutex_exit(&chanp->chan_mutex);
1152 		/* If rdma_destroy_id() was called, destroy CMID */
1153 		if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING)
1154 			cma_destroy_id((struct rdma_cm_id *)chanp);
1155 	}
1156 
1157 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject: ret %x", ret);
1158 	return (ret);
1159 }
1160 
1161 int
1162 rdma_disconnect(struct rdma_cm_id *idp)
1163 {
1164 	sol_cma_chan_t		*chanp;
1165 	int			ret = EINVAL;
1166 	cma_chan_state_t	state;
1167 
1168 	chanp = (sol_cma_chan_t *)idp;
1169 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect(%p)", idp);
1170 
1171 	if (!idp)
1172 		return (0);
1173 
1174 	mutex_enter(&chanp->chan_mutex);
1175 	if (!(SOL_CMA_DISCONNECT_OK(chanp))) {
1176 		SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1177 		    "rdma_disconnect(%p) - Not connected!!", idp);
1178 		mutex_exit(&chanp->chan_mutex);
1179 		return (EINVAL);
1180 	}
1181 	state = cma_get_chan_state(chanp);
1182 	cma_set_chan_state(chanp, SOL_CMA_CHAN_DISCONNECT);
1183 	mutex_exit(&chanp->chan_mutex);
1184 
1185 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1186 		ret = rdma_ib_disconnect(idp);
1187 #ifdef	IWARP_SUPPORT
1188 	} else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
1189 		ret = rdma_iw_disconnect(idp);
1190 #endif	/* IWARP_SUPPORT */
1191 	}
1192 
1193 	if (ret) {
1194 		mutex_enter(&chanp->chan_mutex);
1195 		cma_set_chan_state(chanp, state);
1196 		mutex_exit(&chanp->chan_mutex);
1197 		return (ret);
1198 	}
1199 
1200 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect: ret %x", ret);
1201 	return (ret);
1202 }
1203 
1204 int
1205 rdma_init_qp_attr(struct rdma_cm_id *idp, struct ib_qp_attr *qpattr,
1206     int *qp_attr_mask)
1207 {
1208 	sol_cma_chan_t		*chanp;
1209 	int			ret = EINVAL;
1210 
1211 	ASSERT(idp);
1212 	chanp = (sol_cma_chan_t *)idp;
1213 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_init_qp_attr(%p, %p, %p)",
1214 	    idp, qpattr, qp_attr_mask);
1215 
1216 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1217 		ret = rdma_ib_init_qp_attr(idp, qpattr, qp_attr_mask);
1218 #ifdef	IWARP_SUPPORT
1219 	} else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1220 		ret = rdma_iw_init_qp_attr(idp, qpattr, qp_attr_mask);
1221 #endif	/* IWARP_SUPPORT */
1222 	} else {
1223 		ret = EINVAL;
1224 	}
1225 
1226 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1227 	    "rdma_init_qp_attr: ret %x", ret);
1228 
1229 	return (ret);
1230 }
1231 
1232 int
1233 rdma_join_multicast(struct rdma_cm_id *idp, struct sockaddr *addr,
1234     void *context)
1235 {
1236 	sol_cma_chan_t		*chanp;
1237 	int			ret = ENODEV;
1238 	cma_chan_state_t	state;
1239 
1240 	ASSERT(idp);
1241 	chanp = (sol_cma_chan_t *)idp;
1242 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1243 	    "rdma_join_multicast(%p, %p, %p)",
1244 	    idp, addr, context);
1245 
1246 	mutex_enter(&chanp->chan_mutex);
1247 	state = cma_get_chan_state(chanp);
1248 	if (state != SOL_CMA_CHAN_BOUND &&
1249 	    state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1250 	    state != SOL_CMA_CHAN_ADDR_RESLVD) {
1251 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1252 		    "rdma_join_multicast, Invalid state");
1253 		mutex_exit(&chanp->chan_mutex);
1254 		return (EINVAL);
1255 	}
1256 
1257 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1258 		ret = rdma_ib_join_multicast(idp, addr, context);
1259 #ifdef	IWARP_SUPPORT
1260 	/* No support for Multicast on iWARP */
1261 	else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1262 		ret = ENOTSUP;
1263 #endif	/* IWARP_SUPPORT */
1264 	mutex_exit(&chanp->chan_mutex);
1265 
1266 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1267 	    "rdma_join_multicast: ret %x", ret);
1268 	return (ret);
1269 }
1270 
1271 void
1272 rdma_leave_multicast(struct rdma_cm_id *idp, struct sockaddr *addr)
1273 {
1274 	sol_cma_chan_t		*chanp;
1275 	cma_chan_state_t	state;
1276 
1277 	ASSERT(idp);
1278 	chanp = (sol_cma_chan_t *)idp;
1279 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_leave_multicast(%p, %p)",
1280 	    idp, addr);
1281 
1282 	mutex_enter(&chanp->chan_mutex);
1283 	state = cma_get_chan_state(chanp);
1284 	if (state != SOL_CMA_CHAN_BOUND &&
1285 	    state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1286 	    state != SOL_CMA_CHAN_ADDR_RESLVD) {
1287 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1288 		    "rdma_leave_multicast, Invalid state");
1289 		mutex_exit(&chanp->chan_mutex);
1290 		return;
1291 	}
1292 
1293 	if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1294 		rdma_ib_leave_multicast(idp, addr);
1295 #ifdef	IWARP_SUPPORT
1296 	/* No support for Multicast on iWARP */
1297 	else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1298 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1299 		    "rdma_leave_multicast, iWARP");
1300 #endif	/* IWARP_SUPPORT */
1301 	mutex_exit(&chanp->chan_mutex);
1302 
1303 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_join_multicast: ret");
1304 }
1305 
1306 /*ARGSUSED*/
1307 int
1308 rdma_create_qp(struct rdma_cm_id *idp, struct ib_pd *pd,
1309     struct ib_qp_init_attr *qp_init_attr)
1310 {
1311 	return (-EINVAL);
1312 }
1313 
1314 /*ARGSUSED*/
1315 void
1316 rdma_destroy_qp(struct rdma_cm_id *idp)
1317 {
1318 }
1319 
1320 void
1321 ibcma_init_rdma_devs()
1322 {
1323 	uint_t			i, nhcas;
1324 	ib_guid_t		*guidp;
1325 	struct ib_device	*device;
1326 
1327 	if ((nhcas = ibt_get_hca_list(&guidp)) == 0) {
1328 		SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1329 		    "ibcma_init_rdma_devs() - NO HCAs");
1330 		return;
1331 	}
1332 
1333 	for (i = 0; i < nhcas; i++) {
1334 		device = kmem_zalloc(sizeof (struct ib_device), KM_SLEEP);
1335 		device->node_guid = htonll(guidp[i]);
1336 		sol_cma_add_dev(device);
1337 	}
1338 	ibt_free_hca_list(guidp, nhcas);
1339 }
1340 
1341 /*
1342  * Functions to compare to rdma_cm_id *, used by AVL tree
1343  * routines.
1344  */
1345 int
1346 sol_cma_req_cmid_cmp(const void *p1, const void *p2)
1347 {
1348 	sol_cma_chan_t		*chanp;
1349 
1350 	chanp = (sol_cma_chan_t *)p2;
1351 	if (chanp->chan_session_id > p1)
1352 		return (+1);
1353 	else if (chanp->chan_session_id < p1)
1354 		return (-1);
1355 	else
1356 		return (0);
1357 }
1358 
1359 int
1360 sol_cma_cmid_cmp(const void *p1, const void *p2)
1361 {
1362 	sol_cma_chan_t		*chanp;
1363 
1364 	chanp = (sol_cma_chan_t *)p2;
1365 	if (chanp->chan_qp_hdl > p1)
1366 		return (+1);
1367 	else if (chanp->chan_qp_hdl < p1)
1368 		return (-1);
1369 	else
1370 		return (0);
1371 }
1372 
1373 /*
1374  * Function to compare two sol_cma_glbl_listen_t *, used by
1375  * AVL tree routines.
1376  */
1377 int
1378 sol_cma_svc_cmp(const void *p1, const void *p2)
1379 {
1380 	sol_cma_glbl_listen_t	*listenp;
1381 	uint64_t		sid;
1382 
1383 	sid = *(uint64_t *)p1;
1384 	listenp = (sol_cma_glbl_listen_t *)p2;
1385 	if (listenp->cma_listen_chan_sid > sid)
1386 		return (+1);
1387 	else if (listenp->cma_listen_chan_sid < sid)
1388 		return (-1);
1389 	else
1390 		return (0);
1391 }
1392 
1393 static int
1394 cma_init_listen_root(sol_cma_chan_t *chanp)
1395 {
1396 	sol_cma_glbl_listen_t	*cma_listenp;
1397 	sol_cma_listen_info_t	*chan_listenp;
1398 	int			rc = 0;
1399 	avl_index_t		where = 0;
1400 	uint64_t		listen_sid;
1401 
1402 	ASSERT(chanp);
1403 	ASSERT(chanp->chan_listenp);
1404 	chan_listenp = chanp->chan_listenp;
1405 
1406 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1407 	    "cma_init_listen_root(%p)", chanp);
1408 
1409 	/*
1410 	 * First search for matching global listen_info for this SID.
1411 	 * If found with the same client handle, reuse the service
1412 	 * handle, if matching SID is found with different client
1413 	 * handle, return EINVAL.
1414 	 */
1415 	listen_sid = ibcma_init_root_sid(chanp);
1416 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1417 	    "cma_init_listen_root: search SID 0x%llx",
1418 	    listen_sid);
1419 
1420 	mutex_enter(&sol_cma_glob_mutex);
1421 	cma_listenp = avl_find(&sol_cma_glbl_listen_tree,
1422 	    (void *) &listen_sid, &where);
1423 	if (cma_listenp && cma_listenp->cma_listen_clnt_hdl ==
1424 	    chanp->chan_ib_client_hdl) {
1425 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1426 		    "cma_init_listen_root: matching listenp %p SID 0x%llx",
1427 		    cma_listenp, listen_sid);
1428 		chan_listenp->listen_entry = add_genlist(
1429 		    &cma_listenp->cma_listen_chan_list,
1430 		    (uintptr_t)chanp, NULL);
1431 		chan_listenp->chan_glbl_listen_info = cma_listenp;
1432 		ibcma_copy_srv_hdl(chanp, cma_listenp);
1433 		mutex_exit(&sol_cma_glob_mutex);
1434 		return (0);
1435 	} else if (cma_listenp) {
1436 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1437 		    "cma_init_listen_root: listenp %p, SID 0x%llx match, "
1438 		    "client hdl prev %p, new %p mismatch",
1439 		    cma_listenp, listen_sid,
1440 		    cma_listenp->cma_listen_clnt_hdl,
1441 		    chanp->chan_ib_client_hdl);
1442 		mutex_exit(&sol_cma_glob_mutex);
1443 		return (EINVAL);
1444 	}
1445 
1446 	cma_listenp = kmem_zalloc(sizeof (sol_cma_glbl_listen_t), KM_SLEEP);
1447 	init_genlist(&cma_listenp->cma_listen_chan_list);
1448 	chan_listenp->listen_entry = add_genlist(
1449 	    &cma_listenp->cma_listen_chan_list, (uintptr_t)chanp, NULL);
1450 	chan_listenp->chan_glbl_listen_info = cma_listenp;
1451 	cma_listenp->cma_listen_clnt_hdl = chanp->chan_ib_client_hdl;
1452 	cma_listenp->cma_listen_chan_sid = listen_sid;
1453 
1454 	rc = ibcma_init_root_chan(chanp, cma_listenp);
1455 	if (rc) {
1456 		mutex_exit(&sol_cma_glob_mutex);
1457 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1458 		    "cma_init_listen_root: ibcma_init_root_chan failed!!");
1459 		delete_genlist(&cma_listenp->cma_listen_chan_list,
1460 		    chan_listenp->listen_entry);
1461 		kmem_free(cma_listenp, sizeof (sol_cma_glbl_listen_t));
1462 		return (rc);
1463 	}
1464 	avl_insert(&sol_cma_glbl_listen_tree, cma_listenp, where);
1465 	mutex_exit(&sol_cma_glob_mutex);
1466 	return (0);
1467 }
1468 
1469 static void
1470 cma_fini_listen_root(sol_cma_chan_t *chanp)
1471 {
1472 	sol_cma_glbl_listen_t	*cma_listenp;
1473 	sol_cma_listen_info_t	*chan_listenp;
1474 
1475 	ASSERT(chanp);
1476 	ASSERT(chanp->chan_listenp);
1477 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_fini_listen_root(%p)",
1478 	    chanp);
1479 	chan_listenp = chanp->chan_listenp;
1480 	cma_listenp = chan_listenp->chan_glbl_listen_info;
1481 	ASSERT(cma_listenp);
1482 	mutex_enter(&sol_cma_glob_mutex);
1483 	delete_genlist(&cma_listenp->cma_listen_chan_list,
1484 	    chan_listenp->listen_entry);
1485 	if (genlist_empty(&cma_listenp->cma_listen_chan_list)) {
1486 		if (ibcma_fini_root_chan(chanp) == 0) {
1487 			avl_remove(&sol_cma_glbl_listen_tree,
1488 			    cma_listenp);
1489 			kmem_free(cma_listenp,
1490 			    sizeof (sol_cma_glbl_listen_t));
1491 		} else
1492 			SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1493 			    "cma_fini_listen_root: "
1494 			    "ibcma_fini_root_chan failed");
1495 	}
1496 
1497 	mutex_exit(&sol_cma_glob_mutex);
1498 }
1499 
1500 typedef struct cma_event_async_arg {
1501 	struct rdma_cm_id	*idp;
1502 	enum rdma_cm_event_type	event;
1503 	int			status;
1504 	union {
1505 		struct rdma_conn_param	conn;
1506 		struct rdma_ud_param	param;
1507 	} un;
1508 	struct rdma_conn_param	*conn_param;
1509 	struct rdma_ud_param	*ud_paramp;
1510 } cma_event_async_arg_t;
1511 
1512 static void cma_generate_event_sync(struct rdma_cm_id *,
1513     enum rdma_cm_event_type, int, struct rdma_conn_param *,
1514     struct rdma_ud_param *);
1515 
1516 void
1517 cma_generate_event_thr(void *arg)
1518 {
1519 	cma_event_async_arg_t	*event_arg = (cma_event_async_arg_t *)arg;
1520 
1521 	cma_generate_event_sync(event_arg->idp, event_arg->event,
1522 	    event_arg->status, event_arg->conn_param,
1523 	    event_arg->ud_paramp);
1524 
1525 	if (event_arg->conn_param && event_arg->conn_param->private_data_len)
1526 		kmem_free((void *)event_arg->conn_param->private_data,
1527 		    event_arg->conn_param->private_data_len);
1528 	if (event_arg->ud_paramp && event_arg->ud_paramp->private_data_len)
1529 		kmem_free((void *)event_arg->ud_paramp->private_data,
1530 		    event_arg->ud_paramp->private_data_len);
1531 	kmem_free(arg, sizeof (cma_event_async_arg_t));
1532 }
1533 
1534 void
1535 cma_generate_event(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1536     int status, struct rdma_conn_param *conn_param,
1537     struct rdma_ud_param *ud_paramp)
1538 {
1539 	cma_event_async_arg_t	*event_arg;
1540 	sol_cma_chan_t		*chanp = (sol_cma_chan_t *)idp;
1541 
1542 	/*
1543 	 * Set SOL_CMA_CALLER_EVENT_PROGRESS to indicate event
1544 	 * notification is in progress, so that races between
1545 	 * rdma_destroy_id() and event notification is taken care.
1546 	 *
1547 	 * If rdma_destroy_id() has been called for this CMID, call
1548 	 * cma_generate_event_sync() which skips notification to the
1549 	 * consumer and handles the event.
1550 	 */
1551 	mutex_enter(&chanp->chan_mutex);
1552 	chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
1553 	if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1554 		mutex_exit(&chanp->chan_mutex);
1555 		cma_generate_event_sync(idp, event, status, conn_param,
1556 		    ud_paramp);
1557 		return;
1558 	}
1559 	mutex_exit(&chanp->chan_mutex);
1560 
1561 	event_arg = kmem_zalloc(sizeof (cma_event_async_arg_t), KM_SLEEP);
1562 	event_arg->idp = idp;
1563 	event_arg->event = event;
1564 	event_arg->status = status;
1565 	event_arg->conn_param = NULL;
1566 	event_arg->ud_paramp = NULL;
1567 	if (conn_param && conn_param->private_data_len) {
1568 		bcopy(conn_param, &(event_arg->un.conn),
1569 		    sizeof (struct rdma_conn_param));
1570 		event_arg->conn_param = &(event_arg->un.conn);
1571 		event_arg->conn_param->private_data = kmem_zalloc(
1572 		    conn_param->private_data_len, KM_SLEEP);
1573 		bcopy(conn_param->private_data,
1574 		    (void *)event_arg->conn_param->private_data,
1575 		    conn_param->private_data_len);
1576 	} else if (conn_param && conn_param->private_data_len == 0) {
1577 		bcopy(conn_param, &(event_arg->un.conn),
1578 		    sizeof (struct rdma_conn_param));
1579 	} else if (ud_paramp) {
1580 		bcopy(ud_paramp, &(event_arg->un.param),
1581 		    sizeof (struct rdma_ud_param));
1582 		event_arg->ud_paramp = &(event_arg->un.param);
1583 		if (ud_paramp->private_data_len) {
1584 			event_arg->ud_paramp->private_data = kmem_zalloc(
1585 			    ud_paramp->private_data_len, KM_SLEEP);
1586 			bcopy(ud_paramp->private_data,
1587 			    (void *)event_arg->ud_paramp->private_data,
1588 			    ud_paramp->private_data_len);
1589 		} else if (ud_paramp->private_data) {
1590 			event_arg->ud_paramp->private_data =
1591 			    ud_paramp->private_data;
1592 		}
1593 	}
1594 
1595 	if (taskq_dispatch(system_taskq, cma_generate_event_thr,
1596 	    (void *)event_arg, TQ_SLEEP) == 0) {
1597 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1598 		    "generate_event_async: taskq_dispatch() failed!!");
1599 		mutex_enter(&chanp->chan_mutex);
1600 		chanp->chan_cmid_destroy_state &=
1601 		    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1602 		if (chanp->chan_cmid_destroy_state &
1603 		    SOL_CMA_CALLER_CMID_DESTROYED)
1604 			cv_broadcast(&chanp->chan_destroy_cv);
1605 		mutex_exit(&chanp->chan_mutex);
1606 	}
1607 }
1608 
1609 static void
1610 cma_generate_event_sync(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1611     int status, struct rdma_conn_param *conn_param,
1612     struct rdma_ud_param *ud_paramp)
1613 {
1614 	struct rdma_cm_event	cm_event;
1615 	sol_cma_chan_t		*chanp = (sol_cma_chan_t *)idp;
1616 	struct rdma_cm_id	*root_idp = NULL;
1617 	sol_cma_chan_t		*root_chanp;
1618 	int			ret;
1619 	cma_chan_state_t	chan_state;
1620 
1621 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "generate_event_sync(%p, %x, "
1622 	    "%x, %p, %p", idp, event, status, conn_param, ud_paramp);
1623 
1624 	bzero(&cm_event, sizeof (cm_event));
1625 	cm_event.event = event;
1626 	cm_event.status = status;
1627 	if (conn_param)
1628 		bcopy((void *)conn_param, (void *)(&(cm_event.param.conn)),
1629 		    sizeof (struct rdma_conn_param));
1630 	else if (ud_paramp)
1631 		bcopy((void *)ud_paramp, (void *)(&(cm_event.param.ud)),
1632 		    sizeof (struct rdma_ud_param));
1633 
1634 	/*
1635 	 * If the consumer has destroyed the context for this CMID -
1636 	 * do not notify, skip to handling the sol_ofs specific
1637 	 * handling of the event.
1638 	 */
1639 	mutex_enter(&chanp->chan_mutex);
1640 	if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1641 		mutex_exit(&chanp->chan_mutex);
1642 		goto ofs_consume_event;
1643 	}
1644 	mutex_exit(&chanp->chan_mutex);
1645 
1646 	/* Pass the event to the client */
1647 	ret = (idp->event_handler) (idp, &cm_event);
1648 
1649 	if (ret) {
1650 		if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1651 			SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
1652 			    "cma_generate_event_async: consumer failed %d "
1653 			    "event", event);
1654 			/*
1655 			 * Disconnect if the consumer returned non zero.
1656 			 * rdma_disconnect will send a REJ to the active
1657 			 * side / client.
1658 			 */
1659 			if (rdma_disconnect(idp))
1660 				SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1661 				    "generate_event_async: rdma_disconnect "
1662 				    "failed");
1663 		} else
1664 			SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1665 			    "generate_event_async: consumer failed %d event",
1666 			    event);
1667 
1668 		mutex_enter(&chanp->chan_mutex);
1669 		chanp->chan_connect_flag = SOL_CMA_CONNECT_NONE;
1670 		chanp->chan_cmid_destroy_state &=
1671 		    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1672 		if (chanp->chan_cmid_destroy_state &
1673 		    SOL_CMA_CALLER_CMID_DESTROYED) {
1674 			cv_broadcast(&chanp->chan_destroy_cv);
1675 			mutex_exit(&chanp->chan_mutex);
1676 		} else {
1677 			mutex_exit(&chanp->chan_mutex);
1678 			rdma_destroy_id(idp);
1679 		}
1680 		return;
1681 	}
1682 ofs_consume_event:
1683 	root_idp = CHAN_LISTEN_ROOT(chanp);
1684 	root_chanp = (sol_cma_chan_t *)root_idp;
1685 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "gen_event: root_idp %p",
1686 	    root_idp);
1687 	if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1688 		/*
1689 		 * Update chan_req_state for the REQ CMID. Decrement
1690 		 * count of REQ CMIDs not notifed to consumer.
1691 		 */
1692 		if (!root_idp) {
1693 			mutex_enter(&chanp->chan_mutex);
1694 			chanp->chan_cmid_destroy_state &=
1695 			    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1696 			if (chanp->chan_cmid_destroy_state &
1697 			    SOL_CMA_CALLER_CMID_DESTROYED)
1698 				cv_broadcast(&chanp->chan_destroy_cv);
1699 			mutex_exit(&chanp->chan_mutex);
1700 			return;
1701 		}
1702 
1703 		mutex_enter(&chanp->chan_mutex);
1704 		chanp->chan_req_state = REQ_CMID_NOTIFIED;
1705 		mutex_exit(&chanp->chan_mutex);
1706 		mutex_enter(&root_chanp->chan_mutex);
1707 		root_chanp->chan_req_cnt--;
1708 #ifdef	DEBUG
1709 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1710 		    "Dec req_cnt of %p IDP, idp %p, req_cnt %x",
1711 		    root_idp, idp, root_chanp->chan_req_cnt);
1712 #endif
1713 		mutex_exit(&root_chanp->chan_mutex);
1714 	} else if (event == RDMA_CM_EVENT_DISCONNECTED && root_idp) {
1715 		cma_chan_state_t	chan_state;
1716 
1717 		mutex_enter(&chanp->chan_mutex);
1718 		chanp->chan_qp_hdl = NULL;
1719 		cma_handle_nomore_events(chanp);
1720 		chan_state = cma_get_chan_state(chanp);
1721 		chanp->chan_cmid_destroy_state &=
1722 		    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1723 		if (chanp->chan_cmid_destroy_state &
1724 		    SOL_CMA_CALLER_CMID_DESTROYED) {
1725 			cv_broadcast(&chanp->chan_destroy_cv);
1726 			mutex_exit(&chanp->chan_mutex);
1727 		} else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1728 			/* If rdma_destroy_id() was called, destroy CMID */
1729 			mutex_exit(&chanp->chan_mutex);
1730 			cma_destroy_id((struct rdma_cm_id *)chanp);
1731 		} else
1732 			mutex_exit(&chanp->chan_mutex);
1733 		return;
1734 	} else if (event == RDMA_CM_EVENT_DISCONNECTED && !root_idp) {
1735 		/*
1736 		 * Client side TCP CMID :
1737 		 *	If rdma_destroy_id() was called, destroy CMID.
1738 		 *
1739 		 *	If not chan_connect_flag is set to CONNECT_NONE
1740 		 *	so it can be deleted when rdma_destroy_id is
1741 		 *	called.
1742 		 */
1743 		mutex_enter(&chanp->chan_mutex);
1744 		chan_state = cma_get_chan_state(chanp);
1745 		chanp->chan_cmid_destroy_state &=
1746 		    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1747 		if (chanp->chan_cmid_destroy_state &
1748 		    SOL_CMA_CALLER_CMID_DESTROYED) {
1749 			cv_broadcast(&chanp->chan_destroy_cv);
1750 			mutex_exit(&chanp->chan_mutex);
1751 		} else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1752 			mutex_exit(&chanp->chan_mutex);
1753 			cma_destroy_id(idp);
1754 		} else
1755 			mutex_exit(&chanp->chan_mutex);
1756 		return;
1757 	} else if (IS_UDP_CMID(idp) && event == RDMA_CM_EVENT_UNREACHABLE) {
1758 		/*
1759 		 * If rdma_destroy_id() was called, destroy CMID
1760 		 * If not chan_connect_flag is set to CONNECT_NONE
1761 		 * so it can be deleted when rdma_destroy_id is
1762 		 * called.
1763 		 */
1764 		mutex_enter(&chanp->chan_mutex);
1765 		chan_state = cma_get_chan_state(chanp);
1766 		chanp->chan_cmid_destroy_state &=
1767 		    ~SOL_CMA_CALLER_EVENT_PROGRESS;
1768 		if (chanp->chan_cmid_destroy_state &
1769 		    SOL_CMA_CALLER_CMID_DESTROYED) {
1770 			cv_broadcast(&chanp->chan_destroy_cv);
1771 			mutex_exit(&chanp->chan_mutex);
1772 		} else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1773 			mutex_exit(&chanp->chan_mutex);
1774 			cma_destroy_id(idp);
1775 		} else
1776 			mutex_exit(&chanp->chan_mutex);
1777 		return;
1778 	}
1779 
1780 	mutex_enter(&chanp->chan_mutex);
1781 	chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_EVENT_PROGRESS;
1782 	if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1783 		cv_broadcast(&chanp->chan_destroy_cv);
1784 	mutex_exit(&chanp->chan_mutex);
1785 }
1786 
1787 /* Local Static functions */
1788 static struct rdma_cm_id *
1789 cma_alloc_chan(rdma_cm_event_handler evt_hdlr, void *context,
1790     enum rdma_port_space ps)
1791 {
1792 	struct rdma_cm_id	*rdma_idp;
1793 	sol_cma_chan_t		*chanp;
1794 
1795 	chanp = kmem_zalloc(sizeof (sol_cma_chan_t), KM_SLEEP);
1796 	mutex_init(&chanp->chan_mutex, NULL, MUTEX_DRIVER, NULL);
1797 	cv_init(&chanp->chan_destroy_cv, NULL, CV_DRIVER, NULL);
1798 	rdma_idp = &(chanp->chan_rdma_cm);
1799 	rdma_idp->context = context;
1800 	rdma_idp->ps = ps;
1801 	rdma_idp->event_handler = evt_hdlr;
1802 	mutex_enter(&chanp->chan_mutex);
1803 	cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
1804 	avl_create(&chanp->chan_req_avl_tree, sol_cma_req_cmid_cmp,
1805 	    sizeof (sol_cma_chan_t),
1806 	    offsetof(sol_cma_chan_t, chan_req_avl_node));
1807 	avl_create(&chanp->chan_acpt_avl_tree, sol_cma_cmid_cmp,
1808 	    sizeof (sol_cma_chan_t),
1809 	    offsetof(sol_cma_chan_t, chan_acpt_avl_node));
1810 	mutex_exit(&chanp->chan_mutex);
1811 
1812 	return (rdma_idp);
1813 }
1814 
1815 /* Change the state of sol_cma_chan_t */
1816 static void
1817 cma_set_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t newstate)
1818 {
1819 	ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1820 	chanp->chan_state = newstate;
1821 }
1822 
1823 cma_chan_state_t
1824 cma_get_chan_state(sol_cma_chan_t *chanp)
1825 {
1826 	ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1827 	return (chanp->chan_state);
1828 }
1829 
1830 /* Check & Swap the state of sol_ucma_chan_t */
1831 static int
1832 cma_cas_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t prevstate,
1833     cma_chan_state_t newstate)
1834 {
1835 	int	ret = 0;
1836 
1837 	ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1838 	if (chanp->chan_state != prevstate)
1839 		ret = -1;
1840 	else
1841 		chanp->chan_state = newstate;
1842 
1843 	return (ret);
1844 }
1845 
1846 static void
1847 cma_free_listen_list(struct rdma_cm_id *idp)
1848 {
1849 	genlist_entry_t	*entry;
1850 	sol_cma_chan_t	*chanp = (sol_cma_chan_t *)idp;
1851 
1852 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_free_listen_list(%p)", idp);
1853 	mutex_enter(&chanp->chan_mutex);
1854 	entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1855 	mutex_exit(&chanp->chan_mutex);
1856 	while (entry) {
1857 		sol_cma_chan_t	*ep_chanp;
1858 
1859 		ep_chanp = (sol_cma_chan_t *)entry->data;
1860 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "fini_ep_chan: %p",
1861 		    ep_chanp);
1862 		if (ibcma_fini_ep_chan(ep_chanp) == 0) {
1863 			genlist_entry_t		*entry1;
1864 			struct ib_device	*device;
1865 			cma_device_t		*cma_device;
1866 
1867 			ASSERT(ep_chanp->chan_listenp);
1868 			mutex_enter(&ep_chanp->chan_mutex);
1869 			entry1 = ep_chanp->chan_listenp->listen_ep_dev_entry;
1870 			device = ep_chanp->chan_listenp->listen_ep_device;
1871 			ASSERT(device);
1872 			cma_device = device->data;
1873 			delete_genlist(&cma_device->cma_epchan_list,
1874 			    entry1);
1875 			sol_cma_release_device(
1876 			    (struct rdma_cm_id *)ep_chanp);
1877 			mutex_exit(&ep_chanp->chan_mutex);
1878 			if (ep_chanp->chan_listenp)
1879 				kmem_free(ep_chanp->chan_listenp,
1880 				    sizeof (sol_cma_listen_info_t));
1881 
1882 			mutex_destroy(&ep_chanp->chan_mutex);
1883 			cv_destroy(&ep_chanp->chan_destroy_cv);
1884 			kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
1885 			kmem_free(entry, sizeof (genlist_entry_t));
1886 		}
1887 
1888 		mutex_enter(&chanp->chan_mutex);
1889 		entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1890 		mutex_exit(&chanp->chan_mutex);
1891 	}
1892 }
1893 
1894 /*
1895  * Destroy a listening CMID when :
1896  *	a. All CONNECTION REQUEST recieved have been rejected
1897  *	   or closed.
1898  *	b. No CONNECTION REQUEST recieved.
1899  * Do not destroy a listening CMID when :
1900  *	a. CONNECTION REQUEST has been recieved and not been
1901  *	   accepted from the passive / server side.
1902  *	b. CONNECTION REQUEST has been recieved and has been
1903  *	   accepted from the passive server side.
1904  *	Mark the listening CMID as destroy pending.
1905  *
1906  * For CMIDs created for rdma_connect() or created for a
1907  * CONNECT request, destroy the CMID only when :
1908  *       CONNECTION has been closed or rejected.
1909  *
1910  *       Mark the CMID as destroy pending.
1911  *
1912  * When a connection is rejected or closed :
1913  *	Check if flag indicates - destroy pending,
1914  *	cma_destroy_id() is called, this also does
1915  *
1916  *	If there is a listening CMID assosiated with it,
1917  *	   call cma_destroy_if(listen_cmid);
1918  */
1919 void
1920 cma_destroy_id(struct rdma_cm_id *idp)
1921 {
1922 	sol_cma_chan_t		*chanp = (sol_cma_chan_t *)idp;
1923 	cma_chan_state_t	state;
1924 	ulong_t			acpt_nodes, req_nodes;
1925 
1926 	mutex_enter(&chanp->chan_mutex);
1927 	acpt_nodes = avl_numnodes(&chanp->chan_acpt_avl_tree);
1928 	req_nodes = avl_numnodes(&chanp->chan_req_avl_tree);
1929 	state = cma_get_chan_state(chanp);
1930 	SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)- "
1931 	    "est CMIDs %ld, req CMID %ld, listen_root %p, state %x, %x",
1932 	    idp, acpt_nodes, req_nodes, chanp->listen_root,
1933 	    state, chanp->chan_req_state);
1934 
1935 	/*
1936 	 * If there are either REQ recieved or Established CMIDs just return.
1937 	 * rdma_destroy() for these CMIDs can be called by client later.
1938 	 */
1939 	if (acpt_nodes || req_nodes) {
1940 		cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
1941 		mutex_exit(&chanp->chan_mutex);
1942 		return;
1943 	}
1944 	cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROYING);
1945 	avl_destroy(&chanp->chan_req_avl_tree);
1946 	avl_destroy(&chanp->chan_acpt_avl_tree);
1947 
1948 	mutex_exit(&chanp->chan_mutex);
1949 	if (idp->route.path_rec) {
1950 		kmem_free(idp->route.path_rec,
1951 		    sizeof (struct ib_sa_path_rec) * idp->route.num_paths);
1952 		idp->route.path_rec = NULL;
1953 	}
1954 
1955 	switch (chanp->chan_xport_type) {
1956 	case SOL_CMA_XPORT_NONE :
1957 		break;
1958 	case SOL_CMA_XPORT_IB :
1959 		rdma_ib_destroy_id(idp);
1960 		break;
1961 #ifdef	IWARP_SUPPORT
1962 	case SOL_CMA_XPORT_IWARP :
1963 		rdma_iw_destroy_id(idp);
1964 		break;
1965 #endif	/* IWARP_SUPPORT */
1966 	default :
1967 		SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1968 		    "cma_destroy_id: Unsupported xport type %x",
1969 		    chanp->chan_xport_type);
1970 		break;
1971 	}
1972 
1973 	/*
1974 	 * Flush out & Free all listeners wrt to this ID
1975 	 * No locking is required as this code is executed
1976 	 * all REQ CMIDs have been destroyed. listen_list
1977 	 * will therefore not be modified during this loop.
1978 	 */
1979 	if (chanp->chan_listenp) {
1980 		cma_free_listen_list(idp);
1981 		cma_fini_listen_root(chanp);
1982 		kmem_free((void *)chanp->chan_listenp,
1983 		    sizeof (sol_cma_listen_info_t));
1984 		chanp->chan_listenp = NULL;
1985 	}
1986 
1987 	if (chanp->listen_root) {
1988 		struct rdma_cm_id	*root_idp;
1989 		sol_cma_chan_t		*root_chanp;
1990 
1991 		root_idp = chanp->listen_root;
1992 		root_chanp = (sol_cma_chan_t *)root_idp;
1993 		mutex_enter(&root_chanp->chan_mutex);
1994 		state = cma_get_chan_state(root_chanp);
1995 		acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
1996 		req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
1997 		mutex_exit(&root_chanp->chan_mutex);
1998 		SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)-"
1999 		    " root idp %p, state %x, acpt_nodes %ld, req_nodes %ld",
2000 		    idp, root_idp, state, acpt_nodes, req_nodes);
2001 
2002 		if (state == SOL_CMA_CHAN_DESTROY_PENDING &&
2003 		    req_nodes == 0UL && acpt_nodes == 0UL) {
2004 			mutex_enter(&root_chanp->chan_mutex);
2005 			root_chanp->chan_req_state = REQ_CMID_NONE;
2006 			mutex_exit(&root_chanp->chan_mutex);
2007 			cma_destroy_id(root_idp);
2008 		} else if (state == SOL_CMA_CHAN_DESTROY_WAIT &&
2009 		    req_nodes == 0UL && acpt_nodes == 0UL) {
2010 			mutex_enter(&root_chanp->chan_mutex);
2011 			cma_set_chan_state(root_chanp,
2012 			    SOL_CMA_CHAN_DESTROY_PENDING);
2013 			root_chanp->chan_req_state = REQ_CMID_NONE;
2014 			cv_broadcast(&root_chanp->chan_destroy_cv);
2015 			mutex_exit(&root_chanp->chan_mutex);
2016 		}
2017 	}
2018 
2019 	mutex_destroy(&chanp->chan_mutex);
2020 	cv_destroy(&chanp->chan_destroy_cv);
2021 	kmem_free(chanp, sizeof (sol_cma_chan_t));
2022 }
2023 
2024 /*
2025  * Server TCP disconnect for an established channel.
2026  *	Remove from EST AVL tree.
2027  *
2028  *	If destroy_id() has been called for the listening
2029  *	CMID and there are no more CMIDs with pending
2030  *	events corresponding to the listening CMID, free
2031  *	the listening CMID.
2032  *
2033  *
2034  *	If not chan_connect_flag is set to CONNECT_NONE
2035  *	so it can be deleted when rdma_destroy_id is
2036  *	called.
2037  */
2038 static void
2039 cma_handle_nomore_events(sol_cma_chan_t *chanp)
2040 {
2041 	struct rdma_cm_id	*idp, *root_idp;
2042 	sol_cma_chan_t		*root_chanp;
2043 	cma_chan_state_t	state;
2044 	ulong_t			req_nodes, acpt_nodes;
2045 
2046 	idp = (struct rdma_cm_id *)chanp;
2047 	root_idp = CHAN_LISTEN_ROOT(chanp);
2048 	root_chanp = (sol_cma_chan_t *)root_idp;
2049 	if (!root_chanp)
2050 		return;
2051 
2052 	CHAN_LISTEN_ROOT(chanp) = NULL;
2053 	mutex_enter(&root_chanp->chan_mutex);
2054 	root_chanp->chan_req_total_cnt--;
2055 	if (!root_chanp->chan_req_total_cnt)
2056 		root_chanp->chan_req_state = REQ_CMID_NONE;
2057 	if (root_idp->ps == RDMA_PS_TCP && (chanp->chan_req_state ==
2058 	    REQ_CMID_ACCEPTED || chanp->chan_req_state ==
2059 	    REQ_CMID_DISCONNECTED))
2060 		avl_remove(&root_chanp->chan_acpt_avl_tree, idp);
2061 	if (chanp->chan_req_state == REQ_CMID_CREATED ||
2062 	    chanp->chan_req_state == REQ_CMID_NOTIFIED)
2063 		avl_remove(&root_chanp->chan_req_avl_tree, idp);
2064 	state = cma_get_chan_state(root_chanp);
2065 	req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2066 	acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2067 	mutex_exit(&root_chanp->chan_mutex);
2068 	if (state == SOL_CMA_CHAN_DESTROY_PENDING && req_nodes == 0UL &&
2069 	    acpt_nodes == 0UL)
2070 		cma_destroy_id(root_idp);
2071 }
2072