1*b494511aSVenki Rajagopalan /*
2*b494511aSVenki Rajagopalan  * CDDL HEADER START
3*b494511aSVenki Rajagopalan  *
4*b494511aSVenki Rajagopalan  * The contents of this file are subject to the terms of the
5*b494511aSVenki Rajagopalan  * Common Development and Distribution License (the "License").
6*b494511aSVenki Rajagopalan  * You may not use this file except in compliance with the License.
7*b494511aSVenki Rajagopalan  *
8*b494511aSVenki Rajagopalan  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*b494511aSVenki Rajagopalan  * or http://www.opensolaris.org/os/licensing.
10*b494511aSVenki Rajagopalan  * See the License for the specific language governing permissions
11*b494511aSVenki Rajagopalan  * and limitations under the License.
12*b494511aSVenki Rajagopalan  *
13*b494511aSVenki Rajagopalan  * When distributing Covered Code, include this CDDL HEADER in each
14*b494511aSVenki Rajagopalan  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*b494511aSVenki Rajagopalan  * If applicable, add the following below this CDDL HEADER, with the
16*b494511aSVenki Rajagopalan  * fields enclosed by brackets "[]" replaced with your own identifying
17*b494511aSVenki Rajagopalan  * information: Portions Copyright [yyyy] [name of copyright owner]
18*b494511aSVenki Rajagopalan  *
19*b494511aSVenki Rajagopalan  * CDDL HEADER END
20*b494511aSVenki Rajagopalan  */
21*b494511aSVenki Rajagopalan 
22*b494511aSVenki Rajagopalan /*
23*b494511aSVenki Rajagopalan  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24*b494511aSVenki Rajagopalan  */
25*b494511aSVenki Rajagopalan 
26*b494511aSVenki Rajagopalan #include <sys/types.h>
27*b494511aSVenki Rajagopalan #include <sys/kmem.h>
28*b494511aSVenki Rajagopalan #include <sys/conf.h>
29*b494511aSVenki Rajagopalan #include <sys/ddi.h>
30*b494511aSVenki Rajagopalan #include <sys/sunddi.h>
31*b494511aSVenki Rajagopalan #include <sys/ksynch.h>
32*b494511aSVenki Rajagopalan 
33*b494511aSVenki Rajagopalan #include <sys/ib/clients/eoib/enx_impl.h>
34*b494511aSVenki Rajagopalan 
35*b494511aSVenki Rajagopalan /*
36*b494511aSVenki Rajagopalan  * Module (static) info passed to IBTL during ibt_attach
37*b494511aSVenki Rajagopalan  */
38*b494511aSVenki Rajagopalan static ibt_clnt_modinfo_t eibnx_clnt_modinfo = {
39*b494511aSVenki Rajagopalan 	IBTI_V_CURR,
40*b494511aSVenki Rajagopalan 	IBT_GENERIC,
41*b494511aSVenki Rajagopalan 	eibnx_async_handler,
42*b494511aSVenki Rajagopalan 	NULL,
43*b494511aSVenki Rajagopalan 	"EoIB Nexus"
44*b494511aSVenki Rajagopalan };
45*b494511aSVenki Rajagopalan 
46*b494511aSVenki Rajagopalan ib_gid_t enx_advertise_mgid;
47*b494511aSVenki Rajagopalan ib_gid_t enx_solicit_mgid;
48*b494511aSVenki Rajagopalan 
49*b494511aSVenki Rajagopalan /*
50*b494511aSVenki Rajagopalan  * Static function declarations
51*b494511aSVenki Rajagopalan  */
52*b494511aSVenki Rajagopalan static int eibnx_state_init(void);
53*b494511aSVenki Rajagopalan static int eibnx_setup_txbufs(eibnx_thr_info_t *);
54*b494511aSVenki Rajagopalan static int eibnx_setup_rxbufs(eibnx_thr_info_t *);
55*b494511aSVenki Rajagopalan static int eibnx_join_solicit_mcg(eibnx_thr_info_t *);
56*b494511aSVenki Rajagopalan static int eibnx_join_advertise_mcg(eibnx_thr_info_t *);
57*b494511aSVenki Rajagopalan static int eibnx_rb_ibt_init(eibnx_t *);
58*b494511aSVenki Rajagopalan static void eibnx_rb_state_init(void);
59*b494511aSVenki Rajagopalan static void eibnx_rb_setup_txbufs(eibnx_thr_info_t *);
60*b494511aSVenki Rajagopalan static void eibnx_rb_setup_rxbufs(eibnx_thr_info_t *);
61*b494511aSVenki Rajagopalan static void eibnx_rb_join_solicit_mcg(eibnx_thr_info_t *);
62*b494511aSVenki Rajagopalan static void eibnx_rb_join_advertise_mcg(eibnx_thr_info_t *);
63*b494511aSVenki Rajagopalan 
64*b494511aSVenki Rajagopalan /*
65*b494511aSVenki Rajagopalan  * eibnx_ibt_init() is expected to be called during the nexus driver's
66*b494511aSVenki Rajagopalan  * attach time; given that there is only one instance of the nexus
67*b494511aSVenki Rajagopalan  * driver allowed, and no threads are active before the initialization
68*b494511aSVenki Rajagopalan  * is complete, we don't really have to acquire any driver specific mutex
69*b494511aSVenki Rajagopalan  * within this routine.
70*b494511aSVenki Rajagopalan  */
71*b494511aSVenki Rajagopalan int
eibnx_ibt_init(eibnx_t * ss)72*b494511aSVenki Rajagopalan eibnx_ibt_init(eibnx_t *ss)
73*b494511aSVenki Rajagopalan {
74*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca_list;
75*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca_tail;
76*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca;
77*b494511aSVenki Rajagopalan 	uint_t num_hcas;
78*b494511aSVenki Rajagopalan 	ib_guid_t *hca_guids;
79*b494511aSVenki Rajagopalan 	ibt_status_t ret;
80*b494511aSVenki Rajagopalan 	int i;
81*b494511aSVenki Rajagopalan 
82*b494511aSVenki Rajagopalan 	/*
83*b494511aSVenki Rajagopalan 	 * Do per-state initialization
84*b494511aSVenki Rajagopalan 	 */
85*b494511aSVenki Rajagopalan 	(void) eibnx_state_init();
86*b494511aSVenki Rajagopalan 
87*b494511aSVenki Rajagopalan 	/*
88*b494511aSVenki Rajagopalan 	 * Attach to IBTL
89*b494511aSVenki Rajagopalan 	 */
90*b494511aSVenki Rajagopalan 	if ((ret = ibt_attach(&eibnx_clnt_modinfo, ss->nx_dip, ss,
91*b494511aSVenki Rajagopalan 	    &ss->nx_ibt_hdl)) != IBT_SUCCESS) {
92*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_attach() failed, ret=%d", ret);
93*b494511aSVenki Rajagopalan 		eibnx_rb_state_init();
94*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
95*b494511aSVenki Rajagopalan 	}
96*b494511aSVenki Rajagopalan 
97*b494511aSVenki Rajagopalan 	/*
98*b494511aSVenki Rajagopalan 	 * Get the list of HCA guids on the system
99*b494511aSVenki Rajagopalan 	 */
100*b494511aSVenki Rajagopalan 	if ((num_hcas = ibt_get_hca_list(&hca_guids)) == 0) {
101*b494511aSVenki Rajagopalan 		ENX_DPRINTF_VERBOSE("no HCAs found on the system");
102*b494511aSVenki Rajagopalan 		if ((ret = ibt_detach(ss->nx_ibt_hdl)) != IBT_SUCCESS) {
103*b494511aSVenki Rajagopalan 			ENX_DPRINTF_ERR("ibt_detach() failed, ret=%d", ret);
104*b494511aSVenki Rajagopalan 		}
105*b494511aSVenki Rajagopalan 		ss->nx_ibt_hdl = NULL;
106*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
107*b494511aSVenki Rajagopalan 	}
108*b494511aSVenki Rajagopalan 
109*b494511aSVenki Rajagopalan 	/*
110*b494511aSVenki Rajagopalan 	 * Open the HCAs and store the handles
111*b494511aSVenki Rajagopalan 	 */
112*b494511aSVenki Rajagopalan 	hca_list = hca_tail = NULL;
113*b494511aSVenki Rajagopalan 	for (i = 0; i < num_hcas; i++) {
114*b494511aSVenki Rajagopalan 		/*
115*b494511aSVenki Rajagopalan 		 * If we cannot open a HCA, allocate a protection domain
116*b494511aSVenki Rajagopalan 		 * on it or get portinfo on it, print an error and move on
117*b494511aSVenki Rajagopalan 		 * to the next HCA.  Otherwise, queue it up in our hca list
118*b494511aSVenki Rajagopalan 		 */
119*b494511aSVenki Rajagopalan 		if ((hca = eibnx_prepare_hca(hca_guids[i])) == NULL)
120*b494511aSVenki Rajagopalan 			continue;
121*b494511aSVenki Rajagopalan 
122*b494511aSVenki Rajagopalan 		if (hca_tail) {
123*b494511aSVenki Rajagopalan 			hca_tail->hc_next = hca;
124*b494511aSVenki Rajagopalan 		} else {
125*b494511aSVenki Rajagopalan 			hca_list = hca;
126*b494511aSVenki Rajagopalan 		}
127*b494511aSVenki Rajagopalan 		hca_tail = hca;
128*b494511aSVenki Rajagopalan 	}
129*b494511aSVenki Rajagopalan 
130*b494511aSVenki Rajagopalan 	/*
131*b494511aSVenki Rajagopalan 	 * Free the HCA guid list we've allocated via ibt_get_hca_list()
132*b494511aSVenki Rajagopalan 	 */
133*b494511aSVenki Rajagopalan 	ibt_free_hca_list(hca_guids, num_hcas);
134*b494511aSVenki Rajagopalan 
135*b494511aSVenki Rajagopalan 	/*
136*b494511aSVenki Rajagopalan 	 * Put the hca list in the state structure
137*b494511aSVenki Rajagopalan 	 */
138*b494511aSVenki Rajagopalan 	mutex_enter(&ss->nx_lock);
139*b494511aSVenki Rajagopalan 	ss->nx_hca = hca_list;
140*b494511aSVenki Rajagopalan 	mutex_exit(&ss->nx_lock);
141*b494511aSVenki Rajagopalan 
142*b494511aSVenki Rajagopalan 	/*
143*b494511aSVenki Rajagopalan 	 * Register for subnet notices
144*b494511aSVenki Rajagopalan 	 */
145*b494511aSVenki Rajagopalan 	ibt_register_subnet_notices(ss->nx_ibt_hdl,
146*b494511aSVenki Rajagopalan 	    eibnx_subnet_notices_handler, ss);
147*b494511aSVenki Rajagopalan 
148*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
149*b494511aSVenki Rajagopalan }
150*b494511aSVenki Rajagopalan 
151*b494511aSVenki Rajagopalan static int
eibnx_state_init(void)152*b494511aSVenki Rajagopalan eibnx_state_init(void)
153*b494511aSVenki Rajagopalan {
154*b494511aSVenki Rajagopalan 	eibnx_t *ss = enx_global_ss;
155*b494511aSVenki Rajagopalan 	kthread_t *kt;
156*b494511aSVenki Rajagopalan 
157*b494511aSVenki Rajagopalan 	/*
158*b494511aSVenki Rajagopalan 	 * Initialize synchronization primitives
159*b494511aSVenki Rajagopalan 	 */
160*b494511aSVenki Rajagopalan 	mutex_init(&ss->nx_lock, NULL, MUTEX_DRIVER, NULL);
161*b494511aSVenki Rajagopalan 	mutex_init(&ss->nx_nodeq_lock, NULL, MUTEX_DRIVER, NULL);
162*b494511aSVenki Rajagopalan 	cv_init(&ss->nx_nodeq_cv, NULL, CV_DEFAULT, NULL);
163*b494511aSVenki Rajagopalan 	mutex_init(&ss->nx_busop_lock, NULL, MUTEX_DRIVER, NULL);
164*b494511aSVenki Rajagopalan 	cv_init(&ss->nx_busop_cv, NULL, CV_DEFAULT, NULL);
165*b494511aSVenki Rajagopalan 
166*b494511aSVenki Rajagopalan 	/*
167*b494511aSVenki Rajagopalan 	 * Initialize well-known mgids: there must be a better way to
168*b494511aSVenki Rajagopalan 	 * do this instead of having to express every single gid as a
169*b494511aSVenki Rajagopalan 	 * tuple of two 8-byte integer quantities.
170*b494511aSVenki Rajagopalan 	 */
171*b494511aSVenki Rajagopalan 	enx_solicit_mgid.gid_prefix = EIB_GUID_SOLICIT_PREFIX;
172*b494511aSVenki Rajagopalan 	enx_solicit_mgid.gid_guid = 0;
173*b494511aSVenki Rajagopalan 	enx_advertise_mgid.gid_prefix = EIB_GUID_ADVERTISE_PREFIX;
174*b494511aSVenki Rajagopalan 	enx_advertise_mgid.gid_guid = 0;
175*b494511aSVenki Rajagopalan 
176*b494511aSVenki Rajagopalan 	/*
177*b494511aSVenki Rajagopalan 	 * Start up the eoib node creation thread
178*b494511aSVenki Rajagopalan 	 */
179*b494511aSVenki Rajagopalan 	kt = thread_create(NULL, 0, eibnx_create_eoib_node, NULL, 0,
180*b494511aSVenki Rajagopalan 	    &p0, TS_RUN, minclsyspri);
181*b494511aSVenki Rajagopalan 	ss->nx_nodeq_kt_did = kt->t_did;
182*b494511aSVenki Rajagopalan 
183*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
184*b494511aSVenki Rajagopalan }
185*b494511aSVenki Rajagopalan 
186*b494511aSVenki Rajagopalan /*
187*b494511aSVenki Rajagopalan  * Locate the two multicast groups: the All-EoIB-GWs-GID and
188*b494511aSVenki Rajagopalan  * All-EoIB-ENodes-GID.  Make sure the MTU is something that
189*b494511aSVenki Rajagopalan  * we can work with and Qkey is as expected.
190*b494511aSVenki Rajagopalan  */
191*b494511aSVenki Rajagopalan int
eibnx_find_mgroups(eibnx_thr_info_t * info)192*b494511aSVenki Rajagopalan eibnx_find_mgroups(eibnx_thr_info_t *info)
193*b494511aSVenki Rajagopalan {
194*b494511aSVenki Rajagopalan 	ibt_hca_portinfo_t *pi = info->ti_pi;
195*b494511aSVenki Rajagopalan 	ibt_mcg_attr_t mcg_attr;
196*b494511aSVenki Rajagopalan 	ib_gid_t rgid;
197*b494511aSVenki Rajagopalan 	ibt_status_t ret;
198*b494511aSVenki Rajagopalan 	uint_t entries;
199*b494511aSVenki Rajagopalan 
200*b494511aSVenki Rajagopalan 	mutex_enter(&info->ti_mcg_lock);
201*b494511aSVenki Rajagopalan 
202*b494511aSVenki Rajagopalan 	if ((info->ti_mcg_status & ENX_MCGS_FOUND) == ENX_MCGS_FOUND) {
203*b494511aSVenki Rajagopalan 		mutex_exit(&info->ti_mcg_lock);
204*b494511aSVenki Rajagopalan 		return (ENX_E_SUCCESS);
205*b494511aSVenki Rajagopalan 	}
206*b494511aSVenki Rajagopalan 
207*b494511aSVenki Rajagopalan 	/*
208*b494511aSVenki Rajagopalan 	 * Request GID defining this port
209*b494511aSVenki Rajagopalan 	 */
210*b494511aSVenki Rajagopalan 	rgid = pi->p_sgid_tbl[0];
211*b494511aSVenki Rajagopalan 
212*b494511aSVenki Rajagopalan 	/*
213*b494511aSVenki Rajagopalan 	 * First, locate the multicast group to use for sending solicit
214*b494511aSVenki Rajagopalan 	 * requests to the GW
215*b494511aSVenki Rajagopalan 	 */
216*b494511aSVenki Rajagopalan 	bzero(&mcg_attr, sizeof (ibt_mcg_attr_t));
217*b494511aSVenki Rajagopalan 	mcg_attr.mc_mgid = enx_solicit_mgid;
218*b494511aSVenki Rajagopalan 	mcg_attr.mc_pkey = (ib_pkey_t)EIB_ADMIN_PKEY;
219*b494511aSVenki Rajagopalan 	mcg_attr.mc_qkey = (ib_qkey_t)EIB_FIP_QKEY;
220*b494511aSVenki Rajagopalan 
221*b494511aSVenki Rajagopalan 	if ((ret = ibt_query_mcg(rgid, &mcg_attr, 1, &info->ti_solicit_mcg,
222*b494511aSVenki Rajagopalan 	    &entries)) != IBT_SUCCESS) {
223*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("solicit mcg (gid=%llx.%llx) not found, "
224*b494511aSVenki Rajagopalan 		    "ibt_query_mcg() returned %d", enx_solicit_mgid.gid_prefix,
225*b494511aSVenki Rajagopalan 		    enx_solicit_mgid.gid_guid, ret);
226*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
227*b494511aSVenki Rajagopalan 	}
228*b494511aSVenki Rajagopalan 
229*b494511aSVenki Rajagopalan 	/*
230*b494511aSVenki Rajagopalan 	 * Make sure the multicast mtu isn't bigger than the port mtu
231*b494511aSVenki Rajagopalan 	 * and the multicast group's qkey is the same as EIB_FIP_QKEY.
232*b494511aSVenki Rajagopalan 	 */
233*b494511aSVenki Rajagopalan 	if (info->ti_solicit_mcg->mc_mtu > pi->p_mtu) {
234*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("solicit mcg (gid=%llx.%llx) mtu too big, "
235*b494511aSVenki Rajagopalan 		    "0x%x > 0x%x", enx_solicit_mgid.gid_prefix,
236*b494511aSVenki Rajagopalan 		    enx_solicit_mgid.gid_guid, info->ti_solicit_mcg->mc_mtu,
237*b494511aSVenki Rajagopalan 		    pi->p_mtu);
238*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
239*b494511aSVenki Rajagopalan 	}
240*b494511aSVenki Rajagopalan 	if (info->ti_solicit_mcg->mc_qkey != EIB_FIP_QKEY) {
241*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("solicit mcg (gid=%llx.%llx) qkey bad, "
242*b494511aSVenki Rajagopalan 		    "actual=0x%x, expected=0x%x", enx_solicit_mgid.gid_prefix,
243*b494511aSVenki Rajagopalan 		    enx_solicit_mgid.gid_guid, info->ti_solicit_mcg->mc_qkey,
244*b494511aSVenki Rajagopalan 		    EIB_FIP_QKEY);
245*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
246*b494511aSVenki Rajagopalan 	}
247*b494511aSVenki Rajagopalan 
248*b494511aSVenki Rajagopalan 	/*
249*b494511aSVenki Rajagopalan 	 * Now, locate the multicast group for receiving discover
250*b494511aSVenki Rajagopalan 	 * advertisements from the GW
251*b494511aSVenki Rajagopalan 	 */
252*b494511aSVenki Rajagopalan 	bzero(&mcg_attr, sizeof (ibt_mcg_attr_t));
253*b494511aSVenki Rajagopalan 	mcg_attr.mc_mgid = enx_advertise_mgid;
254*b494511aSVenki Rajagopalan 	mcg_attr.mc_pkey = (ib_pkey_t)EIB_ADMIN_PKEY;
255*b494511aSVenki Rajagopalan 	mcg_attr.mc_qkey = (ib_qkey_t)EIB_FIP_QKEY;
256*b494511aSVenki Rajagopalan 
257*b494511aSVenki Rajagopalan 	if ((ret = ibt_query_mcg(rgid, &mcg_attr, 1, &info->ti_advertise_mcg,
258*b494511aSVenki Rajagopalan 	    &entries)) != IBT_SUCCESS) {
259*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("advertise mcg (gid=%llx.%llx) not found, "
260*b494511aSVenki Rajagopalan 		    "ibt_query_mcg() returned %d",
261*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_prefix,
262*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_guid, ret);
263*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
264*b494511aSVenki Rajagopalan 	}
265*b494511aSVenki Rajagopalan 
266*b494511aSVenki Rajagopalan 	/*
267*b494511aSVenki Rajagopalan 	 * Verify the multicast group's mtu and qkey as before
268*b494511aSVenki Rajagopalan 	 */
269*b494511aSVenki Rajagopalan 	if (info->ti_advertise_mcg->mc_mtu > pi->p_mtu) {
270*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("advertise mcg (gid=%llx.%llx) mtu too big, "
271*b494511aSVenki Rajagopalan 		    "0x%x > 0x%x", enx_advertise_mgid.gid_prefix,
272*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_guid,
273*b494511aSVenki Rajagopalan 		    info->ti_advertise_mcg->mc_mtu, pi->p_mtu);
274*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
275*b494511aSVenki Rajagopalan 	}
276*b494511aSVenki Rajagopalan 	if (info->ti_advertise_mcg->mc_qkey != EIB_FIP_QKEY) {
277*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("advertise mcg (gid=%llx.%llx) qkey bad, "
278*b494511aSVenki Rajagopalan 		    "actual=0x%x, expected=0x%x",
279*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_prefix, enx_advertise_mgid.gid_guid,
280*b494511aSVenki Rajagopalan 		    info->ti_advertise_mcg->mc_qkey, EIB_FIP_QKEY);
281*b494511aSVenki Rajagopalan 		goto find_mgroups_fail;
282*b494511aSVenki Rajagopalan 	}
283*b494511aSVenki Rajagopalan 
284*b494511aSVenki Rajagopalan 	info->ti_mcg_status |= ENX_MCGS_FOUND;
285*b494511aSVenki Rajagopalan 	mutex_exit(&info->ti_mcg_lock);
286*b494511aSVenki Rajagopalan 
287*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
288*b494511aSVenki Rajagopalan 
289*b494511aSVenki Rajagopalan find_mgroups_fail:
290*b494511aSVenki Rajagopalan 	if (info->ti_advertise_mcg) {
291*b494511aSVenki Rajagopalan 		ibt_free_mcg_info(info->ti_advertise_mcg, 1);
292*b494511aSVenki Rajagopalan 		info->ti_advertise_mcg = NULL;
293*b494511aSVenki Rajagopalan 	}
294*b494511aSVenki Rajagopalan 	if (info->ti_solicit_mcg) {
295*b494511aSVenki Rajagopalan 		ibt_free_mcg_info(info->ti_solicit_mcg, 1);
296*b494511aSVenki Rajagopalan 		info->ti_solicit_mcg = NULL;
297*b494511aSVenki Rajagopalan 	}
298*b494511aSVenki Rajagopalan 	mutex_exit(&info->ti_mcg_lock);
299*b494511aSVenki Rajagopalan 
300*b494511aSVenki Rajagopalan 	return (ENX_E_FAILURE);
301*b494511aSVenki Rajagopalan }
302*b494511aSVenki Rajagopalan 
303*b494511aSVenki Rajagopalan /*
304*b494511aSVenki Rajagopalan  * Allocate and setup a single completion queue for tx and rx
305*b494511aSVenki Rajagopalan  */
306*b494511aSVenki Rajagopalan int
eibnx_setup_cq(eibnx_thr_info_t * info)307*b494511aSVenki Rajagopalan eibnx_setup_cq(eibnx_thr_info_t *info)
308*b494511aSVenki Rajagopalan {
309*b494511aSVenki Rajagopalan 	ibt_hca_attr_t hca_attr;
310*b494511aSVenki Rajagopalan 	ibt_cq_attr_t cq_attr;
311*b494511aSVenki Rajagopalan 	ibt_status_t ret;
312*b494511aSVenki Rajagopalan 	uint_t sz;
313*b494511aSVenki Rajagopalan 
314*b494511aSVenki Rajagopalan 	/*
315*b494511aSVenki Rajagopalan 	 * Get this HCA's attributes
316*b494511aSVenki Rajagopalan 	 */
317*b494511aSVenki Rajagopalan 	ret = ibt_query_hca(info->ti_hca, &hca_attr);
318*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
319*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_query_hca(hca_hdl=0x%llx) failed, ret=%d",
320*b494511aSVenki Rajagopalan 		    info->ti_hca, ret);
321*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
322*b494511aSVenki Rajagopalan 	}
323*b494511aSVenki Rajagopalan 
324*b494511aSVenki Rajagopalan 	/*
325*b494511aSVenki Rajagopalan 	 * Allocate a completion queue for our sends and receives
326*b494511aSVenki Rajagopalan 	 */
327*b494511aSVenki Rajagopalan 	cq_attr.cq_sched = NULL;
328*b494511aSVenki Rajagopalan 	cq_attr.cq_flags = IBT_CQ_NO_FLAGS;
329*b494511aSVenki Rajagopalan 	cq_attr.cq_size = (hca_attr.hca_max_cq_sz < ENX_CQ_SIZE) ?
330*b494511aSVenki Rajagopalan 	    hca_attr.hca_max_cq_sz : ENX_CQ_SIZE;
331*b494511aSVenki Rajagopalan 
332*b494511aSVenki Rajagopalan 	ret = ibt_alloc_cq(info->ti_hca, &cq_attr, &info->ti_cq_hdl, &sz);
333*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
334*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_alloc_cq(hca_hdl=0x%llx, cq_sz=0x%lx) "
335*b494511aSVenki Rajagopalan 		    "failed, ret=%d", info->ti_hca, cq_attr.cq_size, ret);
336*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
337*b494511aSVenki Rajagopalan 	}
338*b494511aSVenki Rajagopalan 
339*b494511aSVenki Rajagopalan 	/*
340*b494511aSVenki Rajagopalan 	 * Set up other parameters for collecting completion information
341*b494511aSVenki Rajagopalan 	 */
342*b494511aSVenki Rajagopalan 	info->ti_cq_sz = sz;
343*b494511aSVenki Rajagopalan 	info->ti_wc = kmem_zalloc(sizeof (ibt_wc_t) * sz, KM_SLEEP);
344*b494511aSVenki Rajagopalan 
345*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
346*b494511aSVenki Rajagopalan }
347*b494511aSVenki Rajagopalan 
348*b494511aSVenki Rajagopalan /*
349*b494511aSVenki Rajagopalan  * Allocate and setup the UD channel parameters
350*b494511aSVenki Rajagopalan  */
351*b494511aSVenki Rajagopalan int
eibnx_setup_ud_channel(eibnx_thr_info_t * info)352*b494511aSVenki Rajagopalan eibnx_setup_ud_channel(eibnx_thr_info_t *info)
353*b494511aSVenki Rajagopalan {
354*b494511aSVenki Rajagopalan 	ibt_ud_chan_alloc_args_t alloc_attr;
355*b494511aSVenki Rajagopalan 	ibt_ud_chan_query_attr_t query_attr;
356*b494511aSVenki Rajagopalan 	ibt_status_t ret;
357*b494511aSVenki Rajagopalan 
358*b494511aSVenki Rajagopalan 	/*
359*b494511aSVenki Rajagopalan 	 * Protect against arbitrary additions to the chan_alloc_args
360*b494511aSVenki Rajagopalan 	 * and chan_query_attr structures (make sure the ones we don't
361*b494511aSVenki Rajagopalan 	 * use are zero'd).
362*b494511aSVenki Rajagopalan 	 */
363*b494511aSVenki Rajagopalan 	bzero(&alloc_attr, sizeof (ibt_ud_chan_alloc_args_t));
364*b494511aSVenki Rajagopalan 	bzero(&query_attr, sizeof (ibt_ud_chan_query_attr_t));
365*b494511aSVenki Rajagopalan 
366*b494511aSVenki Rajagopalan 	/*
367*b494511aSVenki Rajagopalan 	 * This ud channel is not going to be used by the nexus driver
368*b494511aSVenki Rajagopalan 	 * to send any LSO packets, so we won't need the IBT_USES_LSO flag.
369*b494511aSVenki Rajagopalan 	 */
370*b494511aSVenki Rajagopalan 	alloc_attr.ud_flags = IBT_ALL_SIGNALED;
371*b494511aSVenki Rajagopalan 	alloc_attr.ud_hca_port_num = info->ti_pi->p_port_num;
372*b494511aSVenki Rajagopalan 
373*b494511aSVenki Rajagopalan 	ret = ibt_pkey2index(info->ti_hca, info->ti_pi->p_port_num,
374*b494511aSVenki Rajagopalan 	    (ib_pkey_t)EIB_ADMIN_PKEY, &(alloc_attr.ud_pkey_ix));
375*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
376*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_pkey2index(hca_hdl=0x%llx, "
377*b494511aSVenki Rajagopalan 		    "port_num=0x%x, pkey=0x%x) failed, ret=%d",
378*b494511aSVenki Rajagopalan 		    info->ti_hca, info->ti_pi->p_port_num,
379*b494511aSVenki Rajagopalan 		    EIB_ADMIN_PKEY, ret);
380*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
381*b494511aSVenki Rajagopalan 	}
382*b494511aSVenki Rajagopalan 
383*b494511aSVenki Rajagopalan 	alloc_attr.ud_sizes.cs_sq = ENX_NUM_SWQE;
384*b494511aSVenki Rajagopalan 	alloc_attr.ud_sizes.cs_rq = ENX_NUM_RWQE;
385*b494511aSVenki Rajagopalan 	alloc_attr.ud_sizes.cs_sq_sgl = 1;
386*b494511aSVenki Rajagopalan 	alloc_attr.ud_sizes.cs_rq_sgl = 1;
387*b494511aSVenki Rajagopalan 	alloc_attr.ud_sizes.cs_inline = 0;
388*b494511aSVenki Rajagopalan 
389*b494511aSVenki Rajagopalan 	alloc_attr.ud_qkey = EIB_FIP_QKEY;
390*b494511aSVenki Rajagopalan 	alloc_attr.ud_scq = info->ti_cq_hdl;
391*b494511aSVenki Rajagopalan 	alloc_attr.ud_rcq = info->ti_cq_hdl;
392*b494511aSVenki Rajagopalan 	alloc_attr.ud_pd = info->ti_pd;
393*b494511aSVenki Rajagopalan 
394*b494511aSVenki Rajagopalan 	ret = ibt_alloc_ud_channel(info->ti_hca, IBT_ACHAN_NO_FLAGS,
395*b494511aSVenki Rajagopalan 	    &alloc_attr, &info->ti_chan, NULL);
396*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
397*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_alloc_ud_channel(hca_hdl=0x%llx, "
398*b494511aSVenki Rajagopalan 		    "cs_sq=0x%lx, cs_rq=0x%lx) failed, ret=%d",
399*b494511aSVenki Rajagopalan 		    info->ti_hca, alloc_attr.ud_sizes.cs_sq,
400*b494511aSVenki Rajagopalan 		    alloc_attr.ud_sizes.cs_rq, ret);
401*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
402*b494511aSVenki Rajagopalan 	}
403*b494511aSVenki Rajagopalan 
404*b494511aSVenki Rajagopalan 	ret = ibt_query_ud_channel(info->ti_chan, &query_attr);
405*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
406*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_query_ud_channel(chan_hdl=0x%llx) "
407*b494511aSVenki Rajagopalan 		    "failed, ret=%d", info->ti_chan, ret);
408*b494511aSVenki Rajagopalan 		if ((ret = ibt_free_channel(info->ti_chan)) != IBT_SUCCESS) {
409*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_free_channel(chan_hdl=0x%llx) "
410*b494511aSVenki Rajagopalan 			    "failed, ret=%d", info->ti_chan, ret);
411*b494511aSVenki Rajagopalan 		}
412*b494511aSVenki Rajagopalan 		info->ti_chan = NULL;
413*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
414*b494511aSVenki Rajagopalan 	}
415*b494511aSVenki Rajagopalan 	info->ti_qpn = query_attr.ud_qpn;
416*b494511aSVenki Rajagopalan 
417*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
418*b494511aSVenki Rajagopalan }
419*b494511aSVenki Rajagopalan 
420*b494511aSVenki Rajagopalan /*
421*b494511aSVenki Rajagopalan  * Set up the transmit buffers for communicating with the gateway. Since
422*b494511aSVenki Rajagopalan  * the EoIB Nexus driver only exchanges control messages with the
423*b494511aSVenki Rajagopalan  * gateway, we don't really need too much space.
424*b494511aSVenki Rajagopalan  */
425*b494511aSVenki Rajagopalan static int
eibnx_setup_txbufs(eibnx_thr_info_t * info)426*b494511aSVenki Rajagopalan eibnx_setup_txbufs(eibnx_thr_info_t *info)
427*b494511aSVenki Rajagopalan {
428*b494511aSVenki Rajagopalan 	eibnx_tx_t *snd_p = &info->ti_snd;
429*b494511aSVenki Rajagopalan 	eibnx_wqe_t *swqe;
430*b494511aSVenki Rajagopalan 	ibt_mr_attr_t attr;
431*b494511aSVenki Rajagopalan 	ibt_mr_desc_t desc;
432*b494511aSVenki Rajagopalan 	ib_memlen_t tx_bufsz;
433*b494511aSVenki Rajagopalan 	ibt_status_t ret;
434*b494511aSVenki Rajagopalan 	ibt_ud_dest_hdl_t dest;
435*b494511aSVenki Rajagopalan 	uint8_t	*buf;
436*b494511aSVenki Rajagopalan 	uint_t mtu = (128 << info->ti_pi->p_mtu);
437*b494511aSVenki Rajagopalan 	int i;
438*b494511aSVenki Rajagopalan 
439*b494511aSVenki Rajagopalan 	/*
440*b494511aSVenki Rajagopalan 	 * Allocate for the tx buf
441*b494511aSVenki Rajagopalan 	 */
442*b494511aSVenki Rajagopalan 	tx_bufsz = ENX_NUM_SWQE * mtu;
443*b494511aSVenki Rajagopalan 	snd_p->tx_vaddr = (ib_vaddr_t)(uintptr_t)kmem_zalloc(tx_bufsz,
444*b494511aSVenki Rajagopalan 	    KM_SLEEP);
445*b494511aSVenki Rajagopalan 
446*b494511aSVenki Rajagopalan 	/*
447*b494511aSVenki Rajagopalan 	 * Register the memory region with IBTF for use
448*b494511aSVenki Rajagopalan 	 */
449*b494511aSVenki Rajagopalan 	attr.mr_vaddr = snd_p->tx_vaddr;
450*b494511aSVenki Rajagopalan 	attr.mr_len = tx_bufsz;
451*b494511aSVenki Rajagopalan 	attr.mr_as = NULL;
452*b494511aSVenki Rajagopalan 	attr.mr_flags = IBT_MR_SLEEP;
453*b494511aSVenki Rajagopalan 	if ((ret = ibt_register_mr(info->ti_hca, info->ti_pd, &attr,
454*b494511aSVenki Rajagopalan 	    &snd_p->tx_mr, &desc)) != IBT_SUCCESS) {
455*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_register_mr() failed for tx "
456*b494511aSVenki Rajagopalan 		    "region (0x%llx, 0x%llx) with ret=%d",
457*b494511aSVenki Rajagopalan 		    attr.mr_vaddr, attr.mr_len, ret);
458*b494511aSVenki Rajagopalan 		kmem_free((void *)(uintptr_t)(snd_p->tx_vaddr), tx_bufsz);
459*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
460*b494511aSVenki Rajagopalan 	}
461*b494511aSVenki Rajagopalan 	snd_p->tx_lkey = desc.md_lkey;
462*b494511aSVenki Rajagopalan 
463*b494511aSVenki Rajagopalan 	/*
464*b494511aSVenki Rajagopalan 	 * Now setup the send wqes
465*b494511aSVenki Rajagopalan 	 */
466*b494511aSVenki Rajagopalan 	buf = (uint8_t *)(uintptr_t)(snd_p->tx_vaddr);
467*b494511aSVenki Rajagopalan 	for (i = 0; i < ENX_NUM_SWQE; i++) {
468*b494511aSVenki Rajagopalan 		swqe = &snd_p->tx_wqe[i];
469*b494511aSVenki Rajagopalan 
470*b494511aSVenki Rajagopalan 		/*
471*b494511aSVenki Rajagopalan 		 * Allocate a UD destination handle
472*b494511aSVenki Rajagopalan 		 */
473*b494511aSVenki Rajagopalan 		ret = ibt_alloc_ud_dest(info->ti_hca, IBT_UD_DEST_NO_FLAGS,
474*b494511aSVenki Rajagopalan 		    info->ti_pd, &dest);
475*b494511aSVenki Rajagopalan 		if (ret != IBT_SUCCESS) {
476*b494511aSVenki Rajagopalan 			ENX_DPRINTF_ERR("ibt_alloc_ud_dest(hca_hdl=0x%llx) "
477*b494511aSVenki Rajagopalan 			    "failed, ret=%d", info->ti_hca, ret);
478*b494511aSVenki Rajagopalan 			eibnx_rb_setup_txbufs(info);
479*b494511aSVenki Rajagopalan 			return (ENX_E_FAILURE);
480*b494511aSVenki Rajagopalan 		}
481*b494511aSVenki Rajagopalan 
482*b494511aSVenki Rajagopalan 		/*
483*b494511aSVenki Rajagopalan 		 * We set up everything in the send wqes except initialize
484*b494511aSVenki Rajagopalan 		 * the UD destination and the state of the entry. The ds_len
485*b494511aSVenki Rajagopalan 		 * should also be adjusted correctly. All this should be
486*b494511aSVenki Rajagopalan 		 * done later in the appropriate routines, before posting.
487*b494511aSVenki Rajagopalan 		 */
488*b494511aSVenki Rajagopalan 		swqe->qe_type = ENX_QETYP_SWQE;
489*b494511aSVenki Rajagopalan 		swqe->qe_bufsz = mtu;
490*b494511aSVenki Rajagopalan 		swqe->qe_sgl.ds_va = (ib_vaddr_t)(uintptr_t)buf;
491*b494511aSVenki Rajagopalan 		swqe->qe_sgl.ds_key = snd_p->tx_lkey;
492*b494511aSVenki Rajagopalan 		swqe->qe_sgl.ds_len = swqe->qe_bufsz;
493*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_id = (ibt_wrid_t)(uintptr_t)swqe;
494*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_flags = IBT_WR_NO_FLAGS;
495*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_trans = IBT_UD_SRV;
496*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_opcode = IBT_WRC_SEND;
497*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_nds = 1;
498*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr_sgl = &swqe->qe_sgl;
499*b494511aSVenki Rajagopalan 		swqe->qe_wr.send.wr.ud.udwr_dest = dest;
500*b494511aSVenki Rajagopalan 
501*b494511aSVenki Rajagopalan 		mutex_init(&swqe->qe_lock, NULL, MUTEX_DRIVER, NULL);
502*b494511aSVenki Rajagopalan 		swqe->qe_flags = 0;
503*b494511aSVenki Rajagopalan 
504*b494511aSVenki Rajagopalan 		buf += mtu;
505*b494511aSVenki Rajagopalan 	}
506*b494511aSVenki Rajagopalan 
507*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
508*b494511aSVenki Rajagopalan }
509*b494511aSVenki Rajagopalan 
510*b494511aSVenki Rajagopalan /*
511*b494511aSVenki Rajagopalan  * Set up bufs for receiving gateway advertisements
512*b494511aSVenki Rajagopalan  */
513*b494511aSVenki Rajagopalan static int
eibnx_setup_rxbufs(eibnx_thr_info_t * info)514*b494511aSVenki Rajagopalan eibnx_setup_rxbufs(eibnx_thr_info_t *info)
515*b494511aSVenki Rajagopalan {
516*b494511aSVenki Rajagopalan 	eibnx_rx_t *rcv_p = &info->ti_rcv;
517*b494511aSVenki Rajagopalan 	eibnx_wqe_t *rwqe;
518*b494511aSVenki Rajagopalan 	ibt_mr_attr_t attr;
519*b494511aSVenki Rajagopalan 	ibt_mr_desc_t desc;
520*b494511aSVenki Rajagopalan 	ib_memlen_t rx_bufsz;
521*b494511aSVenki Rajagopalan 	ibt_status_t ret;
522*b494511aSVenki Rajagopalan 	uint8_t	*buf;
523*b494511aSVenki Rajagopalan 	uint_t mtu = (128 << info->ti_pi->p_mtu);
524*b494511aSVenki Rajagopalan 	int i;
525*b494511aSVenki Rajagopalan 
526*b494511aSVenki Rajagopalan 	/*
527*b494511aSVenki Rajagopalan 	 * Allocate for the rx buf
528*b494511aSVenki Rajagopalan 	 */
529*b494511aSVenki Rajagopalan 	rx_bufsz = ENX_NUM_RWQE * (mtu + ENX_GRH_SZ);
530*b494511aSVenki Rajagopalan 	rcv_p->rx_vaddr = (ib_vaddr_t)(uintptr_t)kmem_zalloc(rx_bufsz,
531*b494511aSVenki Rajagopalan 	    KM_SLEEP);
532*b494511aSVenki Rajagopalan 
533*b494511aSVenki Rajagopalan 	attr.mr_vaddr = rcv_p->rx_vaddr;
534*b494511aSVenki Rajagopalan 	attr.mr_len = rx_bufsz;
535*b494511aSVenki Rajagopalan 	attr.mr_as = NULL;
536*b494511aSVenki Rajagopalan 	attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
537*b494511aSVenki Rajagopalan 	if ((ret = ibt_register_mr(info->ti_hca, info->ti_pd, &attr,
538*b494511aSVenki Rajagopalan 	    &rcv_p->rx_mr, &desc)) != IBT_SUCCESS) {
539*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_register_mr() failed for rx "
540*b494511aSVenki Rajagopalan 		    "region (0x%llx, 0x%llx) with ret=%d",
541*b494511aSVenki Rajagopalan 		    attr.mr_vaddr, attr.mr_len, ret);
542*b494511aSVenki Rajagopalan 		kmem_free((void *)(uintptr_t)(rcv_p->rx_vaddr), rx_bufsz);
543*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
544*b494511aSVenki Rajagopalan 	}
545*b494511aSVenki Rajagopalan 	rcv_p->rx_lkey = desc.md_lkey;
546*b494511aSVenki Rajagopalan 
547*b494511aSVenki Rajagopalan 	buf = (uint8_t *)(uintptr_t)(rcv_p->rx_vaddr);
548*b494511aSVenki Rajagopalan 	for (i = 0; i < ENX_NUM_RWQE; i++) {
549*b494511aSVenki Rajagopalan 		rwqe = &rcv_p->rx_wqe[i];
550*b494511aSVenki Rajagopalan 
551*b494511aSVenki Rajagopalan 		rwqe->qe_type = ENX_QETYP_RWQE;
552*b494511aSVenki Rajagopalan 		rwqe->qe_bufsz = mtu + ENX_GRH_SZ;
553*b494511aSVenki Rajagopalan 		rwqe->qe_sgl.ds_va = (ib_vaddr_t)(uintptr_t)buf;
554*b494511aSVenki Rajagopalan 		rwqe->qe_sgl.ds_key = rcv_p->rx_lkey;
555*b494511aSVenki Rajagopalan 		rwqe->qe_sgl.ds_len = rwqe->qe_bufsz;
556*b494511aSVenki Rajagopalan 		rwqe->qe_wr.recv.wr_id = (ibt_wrid_t)(uintptr_t)rwqe;
557*b494511aSVenki Rajagopalan 		rwqe->qe_wr.recv.wr_nds = 1;
558*b494511aSVenki Rajagopalan 		rwqe->qe_wr.recv.wr_sgl = &rwqe->qe_sgl;
559*b494511aSVenki Rajagopalan 
560*b494511aSVenki Rajagopalan 		mutex_init(&rwqe->qe_lock, NULL, MUTEX_DRIVER, NULL);
561*b494511aSVenki Rajagopalan 		rwqe->qe_flags = 0;
562*b494511aSVenki Rajagopalan 
563*b494511aSVenki Rajagopalan 		buf += (mtu + ENX_GRH_SZ);
564*b494511aSVenki Rajagopalan 	}
565*b494511aSVenki Rajagopalan 
566*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
567*b494511aSVenki Rajagopalan }
568*b494511aSVenki Rajagopalan 
569*b494511aSVenki Rajagopalan /*
570*b494511aSVenki Rajagopalan  * Set up transmit and receive buffers and post the receive buffers
571*b494511aSVenki Rajagopalan  */
572*b494511aSVenki Rajagopalan int
eibnx_setup_bufs(eibnx_thr_info_t * info)573*b494511aSVenki Rajagopalan eibnx_setup_bufs(eibnx_thr_info_t *info)
574*b494511aSVenki Rajagopalan {
575*b494511aSVenki Rajagopalan 	eibnx_rx_t *rcv_p = &info->ti_rcv;
576*b494511aSVenki Rajagopalan 	eibnx_wqe_t *rwqe;
577*b494511aSVenki Rajagopalan 	ibt_status_t ret;
578*b494511aSVenki Rajagopalan 	int i;
579*b494511aSVenki Rajagopalan 
580*b494511aSVenki Rajagopalan 	if (eibnx_setup_txbufs(info) != ENX_E_SUCCESS)
581*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
582*b494511aSVenki Rajagopalan 
583*b494511aSVenki Rajagopalan 	if (eibnx_setup_rxbufs(info) != ENX_E_SUCCESS) {
584*b494511aSVenki Rajagopalan 		eibnx_rb_setup_txbufs(info);
585*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
586*b494511aSVenki Rajagopalan 	}
587*b494511aSVenki Rajagopalan 
588*b494511aSVenki Rajagopalan 	for (i = 0; i < ENX_NUM_RWQE; i++) {
589*b494511aSVenki Rajagopalan 		rwqe = &rcv_p->rx_wqe[i];
590*b494511aSVenki Rajagopalan 
591*b494511aSVenki Rajagopalan 		mutex_enter(&rwqe->qe_lock);
592*b494511aSVenki Rajagopalan 
593*b494511aSVenki Rajagopalan 		rwqe->qe_flags |= (ENX_QEFL_INUSE | ENX_QEFL_POSTED);
594*b494511aSVenki Rajagopalan 		ret = ibt_post_recv(info->ti_chan, &(rwqe->qe_wr.recv), 1,
595*b494511aSVenki Rajagopalan 		    NULL);
596*b494511aSVenki Rajagopalan 
597*b494511aSVenki Rajagopalan 		mutex_exit(&rwqe->qe_lock);
598*b494511aSVenki Rajagopalan 
599*b494511aSVenki Rajagopalan 		if (ret != IBT_SUCCESS) {
600*b494511aSVenki Rajagopalan 			ENX_DPRINTF_ERR("ibt_post_recv(chan_hdl=0x%llx) "
601*b494511aSVenki Rajagopalan 			    "failed, ret=%d", info->ti_chan, ret);
602*b494511aSVenki Rajagopalan 
603*b494511aSVenki Rajagopalan 			ret = ibt_flush_channel(info->ti_chan);
604*b494511aSVenki Rajagopalan 			if (ret != IBT_SUCCESS) {
605*b494511aSVenki Rajagopalan 				ENX_DPRINTF_WARN("ibt_flush_channel"
606*b494511aSVenki Rajagopalan 				    "(chan_hdl=0x%llx) failed, ret=%d",
607*b494511aSVenki Rajagopalan 				    info->ti_chan, ret);
608*b494511aSVenki Rajagopalan 			}
609*b494511aSVenki Rajagopalan 
610*b494511aSVenki Rajagopalan 			eibnx_rb_setup_rxbufs(info);
611*b494511aSVenki Rajagopalan 			eibnx_rb_setup_txbufs(info);
612*b494511aSVenki Rajagopalan 			return (ENX_E_FAILURE);
613*b494511aSVenki Rajagopalan 		}
614*b494511aSVenki Rajagopalan 	}
615*b494511aSVenki Rajagopalan 
616*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
617*b494511aSVenki Rajagopalan }
618*b494511aSVenki Rajagopalan 
619*b494511aSVenki Rajagopalan /*
620*b494511aSVenki Rajagopalan  * Set up the completion queue handler.  While we don't quit if  we cannot
621*b494511aSVenki Rajagopalan  * use soft interrupts, that path is really unreliable and untested.
622*b494511aSVenki Rajagopalan  */
623*b494511aSVenki Rajagopalan int
eibnx_setup_cq_handler(eibnx_thr_info_t * info)624*b494511aSVenki Rajagopalan eibnx_setup_cq_handler(eibnx_thr_info_t *info)
625*b494511aSVenki Rajagopalan {
626*b494511aSVenki Rajagopalan 	eibnx_t *ss = enx_global_ss;
627*b494511aSVenki Rajagopalan 	ibt_status_t ret;
628*b494511aSVenki Rajagopalan 	int rv;
629*b494511aSVenki Rajagopalan 
630*b494511aSVenki Rajagopalan 	/*
631*b494511aSVenki Rajagopalan 	 * We'll try to use a softintr if possible.  If not, it's not
632*b494511aSVenki Rajagopalan 	 * fatal, we'll try and use the completion handler directly from
633*b494511aSVenki Rajagopalan 	 * the interrupt handler.
634*b494511aSVenki Rajagopalan 	 */
635*b494511aSVenki Rajagopalan 
636*b494511aSVenki Rajagopalan 	rv = ddi_intr_add_softint(ss->nx_dip, &info->ti_softint_hdl,
637*b494511aSVenki Rajagopalan 	    EIB_SOFTPRI_ADM, eibnx_comp_handler, info);
638*b494511aSVenki Rajagopalan 	if (rv != DDI_SUCCESS) {
639*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ddi_intr_add_softint(dip=0x%llx) "
640*b494511aSVenki Rajagopalan 		    "failed, ret=%d", ss->nx_dip, rv);
641*b494511aSVenki Rajagopalan 	}
642*b494511aSVenki Rajagopalan 
643*b494511aSVenki Rajagopalan 	ibt_set_cq_handler(info->ti_cq_hdl, eibnx_comp_intr, info);
644*b494511aSVenki Rajagopalan 
645*b494511aSVenki Rajagopalan 	ret = ibt_enable_cq_notify(info->ti_cq_hdl, IBT_NEXT_COMPLETION);
646*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
647*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_enable_cq_notify(cq_hdl=0x%llx) "
648*b494511aSVenki Rajagopalan 		    "failed, ret=%d", info->ti_cq_hdl, ret);
649*b494511aSVenki Rajagopalan 		if (info->ti_softint_hdl) {
650*b494511aSVenki Rajagopalan 			(void) ddi_intr_remove_softint(info->ti_softint_hdl);
651*b494511aSVenki Rajagopalan 			info->ti_softint_hdl = NULL;
652*b494511aSVenki Rajagopalan 		}
653*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
654*b494511aSVenki Rajagopalan 	}
655*b494511aSVenki Rajagopalan 
656*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
657*b494511aSVenki Rajagopalan }
658*b494511aSVenki Rajagopalan 
659*b494511aSVenki Rajagopalan /*
660*b494511aSVenki Rajagopalan  * Join the solicit multicast group (All-EoIB-GWs-GID) as a full member
661*b494511aSVenki Rajagopalan  */
662*b494511aSVenki Rajagopalan static int
eibnx_join_solicit_mcg(eibnx_thr_info_t * info)663*b494511aSVenki Rajagopalan eibnx_join_solicit_mcg(eibnx_thr_info_t *info)
664*b494511aSVenki Rajagopalan {
665*b494511aSVenki Rajagopalan 	ib_gid_t rgid = info->ti_pi->p_sgid_tbl[0];
666*b494511aSVenki Rajagopalan 	ibt_mcg_attr_t mcg_attr;
667*b494511aSVenki Rajagopalan 	ibt_mcg_info_t mcg_info;
668*b494511aSVenki Rajagopalan 	ibt_status_t ret;
669*b494511aSVenki Rajagopalan 
670*b494511aSVenki Rajagopalan 	bzero(&mcg_attr, sizeof (ibt_mcg_attr_t));
671*b494511aSVenki Rajagopalan 
672*b494511aSVenki Rajagopalan 	mcg_attr.mc_mgid = enx_solicit_mgid;
673*b494511aSVenki Rajagopalan 	mcg_attr.mc_qkey = (ib_qkey_t)EIB_FIP_QKEY;
674*b494511aSVenki Rajagopalan 	mcg_attr.mc_pkey = (ib_pkey_t)EIB_ADMIN_PKEY;
675*b494511aSVenki Rajagopalan 	mcg_attr.mc_join_state = IB_MC_JSTATE_FULL;
676*b494511aSVenki Rajagopalan 	mcg_attr.mc_flow = info->ti_solicit_mcg->mc_adds_vect.av_flow;
677*b494511aSVenki Rajagopalan 	mcg_attr.mc_tclass = info->ti_solicit_mcg->mc_adds_vect.av_tclass;
678*b494511aSVenki Rajagopalan 	mcg_attr.mc_sl = info->ti_solicit_mcg->mc_adds_vect.av_srvl;
679*b494511aSVenki Rajagopalan 	mcg_attr.mc_scope = IB_MC_SCOPE_SUBNET_LOCAL;
680*b494511aSVenki Rajagopalan 
681*b494511aSVenki Rajagopalan 	/*
682*b494511aSVenki Rajagopalan 	 * We only need to send to solicit mcg, so we only need to join
683*b494511aSVenki Rajagopalan 	 * the multicast group, no need to attach our qp to it
684*b494511aSVenki Rajagopalan 	 */
685*b494511aSVenki Rajagopalan 	ret = ibt_join_mcg(rgid, &mcg_attr, &mcg_info, NULL, NULL);
686*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
687*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_join_mcg() failed for solicit "
688*b494511aSVenki Rajagopalan 		    "mgid=%llx.%llx, ret=%x", enx_solicit_mgid.gid_prefix,
689*b494511aSVenki Rajagopalan 		    enx_solicit_mgid.gid_guid, ret);
690*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
691*b494511aSVenki Rajagopalan 	}
692*b494511aSVenki Rajagopalan 
693*b494511aSVenki Rajagopalan 	/*
694*b494511aSVenki Rajagopalan 	 * We can throw away the old mcg info we got when we queried
695*b494511aSVenki Rajagopalan 	 * for the mcg and use the new one. They both should be the
696*b494511aSVenki Rajagopalan 	 * same, really.
697*b494511aSVenki Rajagopalan 	 */
698*b494511aSVenki Rajagopalan 	if (info->ti_solicit_mcg) {
699*b494511aSVenki Rajagopalan 		bcopy(&mcg_info, info->ti_solicit_mcg,
700*b494511aSVenki Rajagopalan 		    sizeof (ibt_mcg_info_t));
701*b494511aSVenki Rajagopalan 	}
702*b494511aSVenki Rajagopalan 
703*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
704*b494511aSVenki Rajagopalan }
705*b494511aSVenki Rajagopalan 
706*b494511aSVenki Rajagopalan /*
707*b494511aSVenki Rajagopalan  * Join and attach to the advertise multicast group (All-EoIB-ENodes-GID)
708*b494511aSVenki Rajagopalan  * to receive unsolicitied advertisements from the gateways.
709*b494511aSVenki Rajagopalan  */
710*b494511aSVenki Rajagopalan static int
eibnx_join_advertise_mcg(eibnx_thr_info_t * info)711*b494511aSVenki Rajagopalan eibnx_join_advertise_mcg(eibnx_thr_info_t *info)
712*b494511aSVenki Rajagopalan {
713*b494511aSVenki Rajagopalan 	ib_gid_t rgid = info->ti_pi->p_sgid_tbl[0];
714*b494511aSVenki Rajagopalan 	ibt_mcg_attr_t mcg_attr;
715*b494511aSVenki Rajagopalan 	ibt_mcg_info_t mcg_info;
716*b494511aSVenki Rajagopalan 	ibt_status_t ret;
717*b494511aSVenki Rajagopalan 
718*b494511aSVenki Rajagopalan 	if (info->ti_chan == NULL)
719*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
720*b494511aSVenki Rajagopalan 
721*b494511aSVenki Rajagopalan 	bzero(&mcg_attr, sizeof (ibt_mcg_attr_t));
722*b494511aSVenki Rajagopalan 
723*b494511aSVenki Rajagopalan 	mcg_attr.mc_mgid = enx_advertise_mgid;
724*b494511aSVenki Rajagopalan 	mcg_attr.mc_qkey = (ib_qkey_t)EIB_FIP_QKEY;
725*b494511aSVenki Rajagopalan 	mcg_attr.mc_pkey = (ib_pkey_t)EIB_ADMIN_PKEY;
726*b494511aSVenki Rajagopalan 	mcg_attr.mc_join_state = IB_MC_JSTATE_FULL;
727*b494511aSVenki Rajagopalan 	mcg_attr.mc_flow = info->ti_advertise_mcg->mc_adds_vect.av_flow;
728*b494511aSVenki Rajagopalan 	mcg_attr.mc_tclass = info->ti_advertise_mcg->mc_adds_vect.av_tclass;
729*b494511aSVenki Rajagopalan 	mcg_attr.mc_sl = info->ti_advertise_mcg->mc_adds_vect.av_srvl;
730*b494511aSVenki Rajagopalan 	mcg_attr.mc_scope = IB_MC_SCOPE_SUBNET_LOCAL;
731*b494511aSVenki Rajagopalan 
732*b494511aSVenki Rajagopalan 	ret = ibt_join_mcg(rgid, &mcg_attr, &mcg_info, NULL, NULL);
733*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
734*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_join_mcg() failed for advertise "
735*b494511aSVenki Rajagopalan 		    "mgid=%llx.%llx, ret=%x", enx_advertise_mgid.gid_prefix,
736*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_guid, ret);
737*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
738*b494511aSVenki Rajagopalan 	}
739*b494511aSVenki Rajagopalan 
740*b494511aSVenki Rajagopalan 	/*
741*b494511aSVenki Rajagopalan 	 * We can throw away the old mcg info we got when we queried
742*b494511aSVenki Rajagopalan 	 * for the mcg and use the new one. They both should be the
743*b494511aSVenki Rajagopalan 	 * same, really.
744*b494511aSVenki Rajagopalan 	 */
745*b494511aSVenki Rajagopalan 	if (info->ti_advertise_mcg) {
746*b494511aSVenki Rajagopalan 		bcopy(&mcg_info, info->ti_advertise_mcg,
747*b494511aSVenki Rajagopalan 		    sizeof (ibt_mcg_info_t));
748*b494511aSVenki Rajagopalan 	}
749*b494511aSVenki Rajagopalan 
750*b494511aSVenki Rajagopalan 	/*
751*b494511aSVenki Rajagopalan 	 * Since we need to receive advertisements, we'll attach our qp
752*b494511aSVenki Rajagopalan 	 * to the advertise mcg
753*b494511aSVenki Rajagopalan 	 */
754*b494511aSVenki Rajagopalan 	ret = ibt_attach_mcg(info->ti_chan, info->ti_advertise_mcg);
755*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
756*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_attach_mcg(chan_hdl=0x%llx, "
757*b494511aSVenki Rajagopalan 		    "advt_mcg=0x%llx) failed, ret=%d", info->ti_chan,
758*b494511aSVenki Rajagopalan 		    info->ti_advertise_mcg, ret);
759*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
760*b494511aSVenki Rajagopalan 	}
761*b494511aSVenki Rajagopalan 
762*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
763*b494511aSVenki Rajagopalan }
764*b494511aSVenki Rajagopalan 
765*b494511aSVenki Rajagopalan /*
766*b494511aSVenki Rajagopalan  * Join the multicast groups we're interested in
767*b494511aSVenki Rajagopalan  */
768*b494511aSVenki Rajagopalan int
eibnx_join_mcgs(eibnx_thr_info_t * info)769*b494511aSVenki Rajagopalan eibnx_join_mcgs(eibnx_thr_info_t *info)
770*b494511aSVenki Rajagopalan {
771*b494511aSVenki Rajagopalan 	mutex_enter(&info->ti_mcg_lock);
772*b494511aSVenki Rajagopalan 
773*b494511aSVenki Rajagopalan 	/*
774*b494511aSVenki Rajagopalan 	 * We should've located the mcg first
775*b494511aSVenki Rajagopalan 	 */
776*b494511aSVenki Rajagopalan 	if ((info->ti_mcg_status & ENX_MCGS_FOUND) == 0) {
777*b494511aSVenki Rajagopalan 		mutex_exit(&info->ti_mcg_lock);
778*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
779*b494511aSVenki Rajagopalan 	}
780*b494511aSVenki Rajagopalan 
781*b494511aSVenki Rajagopalan 	/*
782*b494511aSVenki Rajagopalan 	 * If we're already joined to the mcgs, we must leave first
783*b494511aSVenki Rajagopalan 	 */
784*b494511aSVenki Rajagopalan 	if ((info->ti_mcg_status & ENX_MCGS_JOINED) == ENX_MCGS_JOINED) {
785*b494511aSVenki Rajagopalan 		mutex_exit(&info->ti_mcg_lock);
786*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
787*b494511aSVenki Rajagopalan 	}
788*b494511aSVenki Rajagopalan 
789*b494511aSVenki Rajagopalan 	/*
790*b494511aSVenki Rajagopalan 	 * Join the two mcgs
791*b494511aSVenki Rajagopalan 	 */
792*b494511aSVenki Rajagopalan 	if (eibnx_join_advertise_mcg(info) != ENX_E_SUCCESS) {
793*b494511aSVenki Rajagopalan 		mutex_exit(&info->ti_mcg_lock);
794*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
795*b494511aSVenki Rajagopalan 	}
796*b494511aSVenki Rajagopalan 	if (eibnx_join_solicit_mcg(info) != ENX_E_SUCCESS) {
797*b494511aSVenki Rajagopalan 		eibnx_rb_join_advertise_mcg(info);
798*b494511aSVenki Rajagopalan 		mutex_exit(&info->ti_mcg_lock);
799*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
800*b494511aSVenki Rajagopalan 	}
801*b494511aSVenki Rajagopalan 
802*b494511aSVenki Rajagopalan 	info->ti_mcg_status |= ENX_MCGS_JOINED;
803*b494511aSVenki Rajagopalan 	mutex_exit(&info->ti_mcg_lock);
804*b494511aSVenki Rajagopalan 
805*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
806*b494511aSVenki Rajagopalan }
807*b494511aSVenki Rajagopalan 
808*b494511aSVenki Rajagopalan int
eibnx_rejoin_mcgs(eibnx_thr_info_t * info)809*b494511aSVenki Rajagopalan eibnx_rejoin_mcgs(eibnx_thr_info_t *info)
810*b494511aSVenki Rajagopalan {
811*b494511aSVenki Rajagopalan 	/*
812*b494511aSVenki Rajagopalan 	 * Lookup the MCGs again and join them
813*b494511aSVenki Rajagopalan 	 */
814*b494511aSVenki Rajagopalan 	eibnx_rb_join_mcgs(info);
815*b494511aSVenki Rajagopalan 	eibnx_rb_find_mgroups(info);
816*b494511aSVenki Rajagopalan 
817*b494511aSVenki Rajagopalan 	if (eibnx_find_mgroups(info) != ENX_E_SUCCESS)
818*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
819*b494511aSVenki Rajagopalan 
820*b494511aSVenki Rajagopalan 	if (eibnx_join_mcgs(info) != ENX_E_SUCCESS)
821*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
822*b494511aSVenki Rajagopalan 
823*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
824*b494511aSVenki Rajagopalan }
825*b494511aSVenki Rajagopalan 
826*b494511aSVenki Rajagopalan int
eibnx_ibt_fini(eibnx_t * ss)827*b494511aSVenki Rajagopalan eibnx_ibt_fini(eibnx_t *ss)
828*b494511aSVenki Rajagopalan {
829*b494511aSVenki Rajagopalan 	return (eibnx_rb_ibt_init(ss));
830*b494511aSVenki Rajagopalan }
831*b494511aSVenki Rajagopalan 
832*b494511aSVenki Rajagopalan static int
eibnx_rb_ibt_init(eibnx_t * ss)833*b494511aSVenki Rajagopalan eibnx_rb_ibt_init(eibnx_t *ss)
834*b494511aSVenki Rajagopalan {
835*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca;
836*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca_next;
837*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca_list;
838*b494511aSVenki Rajagopalan 	ibt_status_t	ret;
839*b494511aSVenki Rajagopalan 
840*b494511aSVenki Rajagopalan 	/*
841*b494511aSVenki Rajagopalan 	 * Disable subnet notices callbacks
842*b494511aSVenki Rajagopalan 	 */
843*b494511aSVenki Rajagopalan 	ibt_register_subnet_notices(ss->nx_ibt_hdl, NULL, NULL);
844*b494511aSVenki Rajagopalan 
845*b494511aSVenki Rajagopalan 	/*
846*b494511aSVenki Rajagopalan 	 * Remove the hca list from the state structure
847*b494511aSVenki Rajagopalan 	 */
848*b494511aSVenki Rajagopalan 	mutex_enter(&ss->nx_lock);
849*b494511aSVenki Rajagopalan 	hca_list = ss->nx_hca;
850*b494511aSVenki Rajagopalan 	ss->nx_hca = NULL;
851*b494511aSVenki Rajagopalan 	mutex_exit(&ss->nx_lock);
852*b494511aSVenki Rajagopalan 
853*b494511aSVenki Rajagopalan 	/*
854*b494511aSVenki Rajagopalan 	 * For each HCA in the list, free up the portinfo/port structs,
855*b494511aSVenki Rajagopalan 	 * free the pd, close the hca handle and release the hca struct.
856*b494511aSVenki Rajagopalan 	 * If something goes wrong, try to put back whatever good remains
857*b494511aSVenki Rajagopalan 	 * back on the hca list and return failure.
858*b494511aSVenki Rajagopalan 	 */
859*b494511aSVenki Rajagopalan 	for (hca = hca_list; hca; hca = hca_next) {
860*b494511aSVenki Rajagopalan 		hca_next = hca->hc_next;
861*b494511aSVenki Rajagopalan 		if (eibnx_cleanup_hca(hca) != ENX_E_SUCCESS) {
862*b494511aSVenki Rajagopalan 			mutex_enter(&ss->nx_lock);
863*b494511aSVenki Rajagopalan 			ss->nx_hca = hca_next;
864*b494511aSVenki Rajagopalan 			mutex_exit(&ss->nx_lock);
865*b494511aSVenki Rajagopalan 			return (ENX_E_FAILURE);
866*b494511aSVenki Rajagopalan 		}
867*b494511aSVenki Rajagopalan 	}
868*b494511aSVenki Rajagopalan 
869*b494511aSVenki Rajagopalan 	if ((ret = ibt_detach(ss->nx_ibt_hdl)) != IBT_SUCCESS) {
870*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_detach(ibt_hdl=0x%llx) "
871*b494511aSVenki Rajagopalan 		    "failed, ret=%d", ss->nx_ibt_hdl, ret);
872*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
873*b494511aSVenki Rajagopalan 	}
874*b494511aSVenki Rajagopalan 	ss->nx_ibt_hdl = NULL;
875*b494511aSVenki Rajagopalan 
876*b494511aSVenki Rajagopalan 	eibnx_rb_state_init();
877*b494511aSVenki Rajagopalan 
878*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
879*b494511aSVenki Rajagopalan }
880*b494511aSVenki Rajagopalan 
881*b494511aSVenki Rajagopalan static void
eibnx_rb_state_init(void)882*b494511aSVenki Rajagopalan eibnx_rb_state_init(void)
883*b494511aSVenki Rajagopalan {
884*b494511aSVenki Rajagopalan 	eibnx_t *ss = enx_global_ss;
885*b494511aSVenki Rajagopalan 	kt_did_t thr_id;
886*b494511aSVenki Rajagopalan 
887*b494511aSVenki Rajagopalan 	/*
888*b494511aSVenki Rajagopalan 	 * Ask the eoib node creation thread to die and wait for
889*b494511aSVenki Rajagopalan 	 * it to happen
890*b494511aSVenki Rajagopalan 	 */
891*b494511aSVenki Rajagopalan 	mutex_enter(&ss->nx_nodeq_lock);
892*b494511aSVenki Rajagopalan 
893*b494511aSVenki Rajagopalan 	thr_id = ss->nx_nodeq_kt_did;
894*b494511aSVenki Rajagopalan 	ss->nx_nodeq_thr_die = 1;
895*b494511aSVenki Rajagopalan 	ss->nx_nodeq_kt_did = 0;
896*b494511aSVenki Rajagopalan 
897*b494511aSVenki Rajagopalan 	cv_signal(&ss->nx_nodeq_cv);
898*b494511aSVenki Rajagopalan 	mutex_exit(&ss->nx_nodeq_lock);
899*b494511aSVenki Rajagopalan 
900*b494511aSVenki Rajagopalan 	if (thr_id) {
901*b494511aSVenki Rajagopalan 		thread_join(thr_id);
902*b494511aSVenki Rajagopalan 	}
903*b494511aSVenki Rajagopalan 
904*b494511aSVenki Rajagopalan 	cv_destroy(&ss->nx_busop_cv);
905*b494511aSVenki Rajagopalan 	mutex_destroy(&ss->nx_busop_lock);
906*b494511aSVenki Rajagopalan 	cv_destroy(&ss->nx_nodeq_cv);
907*b494511aSVenki Rajagopalan 	mutex_destroy(&ss->nx_nodeq_lock);
908*b494511aSVenki Rajagopalan 	mutex_destroy(&ss->nx_lock);
909*b494511aSVenki Rajagopalan }
910*b494511aSVenki Rajagopalan 
911*b494511aSVenki Rajagopalan void
eibnx_rb_find_mgroups(eibnx_thr_info_t * info)912*b494511aSVenki Rajagopalan eibnx_rb_find_mgroups(eibnx_thr_info_t *info)
913*b494511aSVenki Rajagopalan {
914*b494511aSVenki Rajagopalan 	mutex_enter(&info->ti_mcg_lock);
915*b494511aSVenki Rajagopalan 	if ((info->ti_mcg_status & ENX_MCGS_FOUND) == ENX_MCGS_FOUND) {
916*b494511aSVenki Rajagopalan 		if (info->ti_advertise_mcg) {
917*b494511aSVenki Rajagopalan 			ibt_free_mcg_info(info->ti_advertise_mcg, 1);
918*b494511aSVenki Rajagopalan 			info->ti_advertise_mcg = NULL;
919*b494511aSVenki Rajagopalan 		}
920*b494511aSVenki Rajagopalan 		if (info->ti_solicit_mcg) {
921*b494511aSVenki Rajagopalan 			ibt_free_mcg_info(info->ti_solicit_mcg, 1);
922*b494511aSVenki Rajagopalan 			info->ti_solicit_mcg = NULL;
923*b494511aSVenki Rajagopalan 		}
924*b494511aSVenki Rajagopalan 		info->ti_mcg_status &= (~ENX_MCGS_FOUND);
925*b494511aSVenki Rajagopalan 	}
926*b494511aSVenki Rajagopalan 	mutex_exit(&info->ti_mcg_lock);
927*b494511aSVenki Rajagopalan }
928*b494511aSVenki Rajagopalan 
929*b494511aSVenki Rajagopalan void
eibnx_rb_setup_cq(eibnx_thr_info_t * info)930*b494511aSVenki Rajagopalan eibnx_rb_setup_cq(eibnx_thr_info_t *info)
931*b494511aSVenki Rajagopalan {
932*b494511aSVenki Rajagopalan 	ibt_status_t ret;
933*b494511aSVenki Rajagopalan 
934*b494511aSVenki Rajagopalan 	if (info->ti_wc && info->ti_cq_sz)
935*b494511aSVenki Rajagopalan 		kmem_free(info->ti_wc, sizeof (ibt_wc_t) * info->ti_cq_sz);
936*b494511aSVenki Rajagopalan 
937*b494511aSVenki Rajagopalan 	info->ti_cq_sz = 0;
938*b494511aSVenki Rajagopalan 	info->ti_wc = NULL;
939*b494511aSVenki Rajagopalan 
940*b494511aSVenki Rajagopalan 	if (info->ti_cq_hdl) {
941*b494511aSVenki Rajagopalan 		ret = ibt_free_cq(info->ti_cq_hdl);
942*b494511aSVenki Rajagopalan 		if (ret != IBT_SUCCESS) {
943*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_free_cq(cq_hdl=0x%llx) "
944*b494511aSVenki Rajagopalan 			    "failed, ret=%d", info->ti_cq_hdl, ret);
945*b494511aSVenki Rajagopalan 		}
946*b494511aSVenki Rajagopalan 		info->ti_cq_hdl = NULL;
947*b494511aSVenki Rajagopalan 	}
948*b494511aSVenki Rajagopalan }
949*b494511aSVenki Rajagopalan 
950*b494511aSVenki Rajagopalan void
eibnx_rb_setup_ud_channel(eibnx_thr_info_t * info)951*b494511aSVenki Rajagopalan eibnx_rb_setup_ud_channel(eibnx_thr_info_t *info)
952*b494511aSVenki Rajagopalan {
953*b494511aSVenki Rajagopalan 	ibt_status_t ret;
954*b494511aSVenki Rajagopalan 
955*b494511aSVenki Rajagopalan 	if ((ret = ibt_free_channel(info->ti_chan)) != IBT_SUCCESS) {
956*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_free_channel(chan=0x%llx) "
957*b494511aSVenki Rajagopalan 		    "failed, ret=%d", info->ti_chan, ret);
958*b494511aSVenki Rajagopalan 	}
959*b494511aSVenki Rajagopalan 	info->ti_chan = NULL;
960*b494511aSVenki Rajagopalan 	info->ti_qpn = 0;
961*b494511aSVenki Rajagopalan }
962*b494511aSVenki Rajagopalan 
963*b494511aSVenki Rajagopalan static void
eibnx_rb_setup_txbufs(eibnx_thr_info_t * info)964*b494511aSVenki Rajagopalan eibnx_rb_setup_txbufs(eibnx_thr_info_t *info)
965*b494511aSVenki Rajagopalan {
966*b494511aSVenki Rajagopalan 	eibnx_tx_t *snd_p = &info->ti_snd;
967*b494511aSVenki Rajagopalan 	eibnx_wqe_t *swqe;
968*b494511aSVenki Rajagopalan 	ibt_status_t ret;
969*b494511aSVenki Rajagopalan 	int i;
970*b494511aSVenki Rajagopalan 	uint_t mtu = (128 << info->ti_pi->p_mtu);
971*b494511aSVenki Rajagopalan 
972*b494511aSVenki Rajagopalan 	/*
973*b494511aSVenki Rajagopalan 	 * Release any UD destination handle we may have allocated.  Note that
974*b494511aSVenki Rajagopalan 	 * the per swqe lock would've been initialized only if we were able to
975*b494511aSVenki Rajagopalan 	 * allocate the UD dest handle.
976*b494511aSVenki Rajagopalan 	 */
977*b494511aSVenki Rajagopalan 	for (i = 0; i < ENX_NUM_SWQE; i++) {
978*b494511aSVenki Rajagopalan 		swqe = &snd_p->tx_wqe[i];
979*b494511aSVenki Rajagopalan 
980*b494511aSVenki Rajagopalan 		if (swqe->qe_wr.send.wr.ud.udwr_dest) {
981*b494511aSVenki Rajagopalan 			mutex_destroy(&swqe->qe_lock);
982*b494511aSVenki Rajagopalan 
983*b494511aSVenki Rajagopalan 			ret =
984*b494511aSVenki Rajagopalan 			    ibt_free_ud_dest(swqe->qe_wr.send.wr.ud.udwr_dest);
985*b494511aSVenki Rajagopalan 			if (ret != IBT_SUCCESS) {
986*b494511aSVenki Rajagopalan 				ENX_DPRINTF_WARN("ibt_free_ud_dest(dest=0x%llx)"
987*b494511aSVenki Rajagopalan 				    " failed, ret=%d",
988*b494511aSVenki Rajagopalan 				    swqe->qe_wr.send.wr.ud.udwr_dest, ret);
989*b494511aSVenki Rajagopalan 			}
990*b494511aSVenki Rajagopalan 		}
991*b494511aSVenki Rajagopalan 	}
992*b494511aSVenki Rajagopalan 
993*b494511aSVenki Rajagopalan 	/*
994*b494511aSVenki Rajagopalan 	 * Clear all the workq entries
995*b494511aSVenki Rajagopalan 	 */
996*b494511aSVenki Rajagopalan 	bzero(snd_p->tx_wqe, sizeof (eibnx_wqe_t) * ENX_NUM_SWQE);
997*b494511aSVenki Rajagopalan 
998*b494511aSVenki Rajagopalan 	/*
999*b494511aSVenki Rajagopalan 	 * Clear Lkey and deregister any memory region we may have
1000*b494511aSVenki Rajagopalan 	 * registered earlier
1001*b494511aSVenki Rajagopalan 	 */
1002*b494511aSVenki Rajagopalan 	snd_p->tx_lkey = 0;
1003*b494511aSVenki Rajagopalan 	if (snd_p->tx_mr) {
1004*b494511aSVenki Rajagopalan 		if ((ret = ibt_deregister_mr(info->ti_hca,
1005*b494511aSVenki Rajagopalan 		    snd_p->tx_mr)) != IBT_SUCCESS) {
1006*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_deregister_TXmr(hca_hdl=0x%llx,"
1007*b494511aSVenki Rajagopalan 			    "mr=0x%llx) failed, ret=%d", info->ti_hca,
1008*b494511aSVenki Rajagopalan 			    snd_p->tx_mr, ret);
1009*b494511aSVenki Rajagopalan 		}
1010*b494511aSVenki Rajagopalan 		snd_p->tx_mr = NULL;
1011*b494511aSVenki Rajagopalan 	}
1012*b494511aSVenki Rajagopalan 
1013*b494511aSVenki Rajagopalan 	/*
1014*b494511aSVenki Rajagopalan 	 * Release any memory allocated for the tx bufs
1015*b494511aSVenki Rajagopalan 	 */
1016*b494511aSVenki Rajagopalan 	if (snd_p->tx_vaddr) {
1017*b494511aSVenki Rajagopalan 		kmem_free((void *)(uintptr_t)(snd_p->tx_vaddr),
1018*b494511aSVenki Rajagopalan 		    ENX_NUM_SWQE * mtu);
1019*b494511aSVenki Rajagopalan 		snd_p->tx_vaddr = 0;
1020*b494511aSVenki Rajagopalan 	}
1021*b494511aSVenki Rajagopalan 
1022*b494511aSVenki Rajagopalan }
1023*b494511aSVenki Rajagopalan 
1024*b494511aSVenki Rajagopalan static void
eibnx_rb_setup_rxbufs(eibnx_thr_info_t * info)1025*b494511aSVenki Rajagopalan eibnx_rb_setup_rxbufs(eibnx_thr_info_t *info)
1026*b494511aSVenki Rajagopalan {
1027*b494511aSVenki Rajagopalan 	eibnx_rx_t *rcv_p = &info->ti_rcv;
1028*b494511aSVenki Rajagopalan 	eibnx_wqe_t *rwqe;
1029*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1030*b494511aSVenki Rajagopalan 	uint_t mtu = (128 << info->ti_pi->p_mtu);
1031*b494511aSVenki Rajagopalan 	int i;
1032*b494511aSVenki Rajagopalan 
1033*b494511aSVenki Rajagopalan 	for (i = 0; i < ENX_NUM_RWQE; i++) {
1034*b494511aSVenki Rajagopalan 		rwqe = &rcv_p->rx_wqe[i];
1035*b494511aSVenki Rajagopalan 		mutex_destroy(&rwqe->qe_lock);
1036*b494511aSVenki Rajagopalan 	}
1037*b494511aSVenki Rajagopalan 	bzero(rcv_p->rx_wqe, sizeof (eibnx_wqe_t) * ENX_NUM_RWQE);
1038*b494511aSVenki Rajagopalan 
1039*b494511aSVenki Rajagopalan 	rcv_p->rx_lkey = 0;
1040*b494511aSVenki Rajagopalan 
1041*b494511aSVenki Rajagopalan 	if ((ret = ibt_deregister_mr(info->ti_hca,
1042*b494511aSVenki Rajagopalan 	    rcv_p->rx_mr)) != IBT_SUCCESS) {
1043*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_deregister_RXmr(hca_hdl=0x%llx,"
1044*b494511aSVenki Rajagopalan 		    "mr=0x%llx) failed, ret=%d", info->ti_hca,
1045*b494511aSVenki Rajagopalan 		    rcv_p->rx_mr, ret);
1046*b494511aSVenki Rajagopalan 	}
1047*b494511aSVenki Rajagopalan 	rcv_p->rx_mr = NULL;
1048*b494511aSVenki Rajagopalan 
1049*b494511aSVenki Rajagopalan 	kmem_free((void *)(uintptr_t)(rcv_p->rx_vaddr),
1050*b494511aSVenki Rajagopalan 	    ENX_NUM_RWQE * (mtu + ENX_GRH_SZ));
1051*b494511aSVenki Rajagopalan 	rcv_p->rx_vaddr = 0;
1052*b494511aSVenki Rajagopalan }
1053*b494511aSVenki Rajagopalan 
1054*b494511aSVenki Rajagopalan void
eibnx_rb_setup_bufs(eibnx_thr_info_t * info)1055*b494511aSVenki Rajagopalan eibnx_rb_setup_bufs(eibnx_thr_info_t *info)
1056*b494511aSVenki Rajagopalan {
1057*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1058*b494511aSVenki Rajagopalan 
1059*b494511aSVenki Rajagopalan 	if ((ret = ibt_flush_channel(info->ti_chan)) != IBT_SUCCESS) {
1060*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_flush_channel(chan_hdl=0x%llx) "
1061*b494511aSVenki Rajagopalan 		    "failed, ret=%d", info->ti_chan, ret);
1062*b494511aSVenki Rajagopalan 	}
1063*b494511aSVenki Rajagopalan 
1064*b494511aSVenki Rajagopalan 	eibnx_rb_setup_rxbufs(info);
1065*b494511aSVenki Rajagopalan 
1066*b494511aSVenki Rajagopalan 	eibnx_rb_setup_txbufs(info);
1067*b494511aSVenki Rajagopalan }
1068*b494511aSVenki Rajagopalan 
1069*b494511aSVenki Rajagopalan void
eibnx_rb_setup_cq_handler(eibnx_thr_info_t * info)1070*b494511aSVenki Rajagopalan eibnx_rb_setup_cq_handler(eibnx_thr_info_t *info)
1071*b494511aSVenki Rajagopalan {
1072*b494511aSVenki Rajagopalan 	ibt_set_cq_handler(info->ti_cq_hdl, NULL, NULL);
1073*b494511aSVenki Rajagopalan 
1074*b494511aSVenki Rajagopalan 	if (info->ti_softint_hdl) {
1075*b494511aSVenki Rajagopalan 		(void) ddi_intr_remove_softint(info->ti_softint_hdl);
1076*b494511aSVenki Rajagopalan 		info->ti_softint_hdl = NULL;
1077*b494511aSVenki Rajagopalan 	}
1078*b494511aSVenki Rajagopalan }
1079*b494511aSVenki Rajagopalan 
1080*b494511aSVenki Rajagopalan static void
eibnx_rb_join_solicit_mcg(eibnx_thr_info_t * info)1081*b494511aSVenki Rajagopalan eibnx_rb_join_solicit_mcg(eibnx_thr_info_t *info)
1082*b494511aSVenki Rajagopalan {
1083*b494511aSVenki Rajagopalan 	ib_gid_t rgid = info->ti_pi->p_sgid_tbl[0];
1084*b494511aSVenki Rajagopalan 	ib_gid_t rsvd_gid;
1085*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1086*b494511aSVenki Rajagopalan 
1087*b494511aSVenki Rajagopalan 	rsvd_gid.gid_prefix = 0;
1088*b494511aSVenki Rajagopalan 	rsvd_gid.gid_guid = 0;
1089*b494511aSVenki Rajagopalan 
1090*b494511aSVenki Rajagopalan 	ret = ibt_leave_mcg(rgid, enx_solicit_mgid,
1091*b494511aSVenki Rajagopalan 	    rsvd_gid, IB_MC_JSTATE_FULL);
1092*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
1093*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_leave_mcg(slct_mgid=%llx.%llx) "
1094*b494511aSVenki Rajagopalan 		    "failed, ret=%d", enx_solicit_mgid.gid_prefix,
1095*b494511aSVenki Rajagopalan 		    enx_solicit_mgid.gid_guid, ret);
1096*b494511aSVenki Rajagopalan 	}
1097*b494511aSVenki Rajagopalan }
1098*b494511aSVenki Rajagopalan 
1099*b494511aSVenki Rajagopalan static void
eibnx_rb_join_advertise_mcg(eibnx_thr_info_t * info)1100*b494511aSVenki Rajagopalan eibnx_rb_join_advertise_mcg(eibnx_thr_info_t *info)
1101*b494511aSVenki Rajagopalan {
1102*b494511aSVenki Rajagopalan 	ib_gid_t rgid = info->ti_pi->p_sgid_tbl[0];
1103*b494511aSVenki Rajagopalan 	ib_gid_t rsvd_gid;
1104*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1105*b494511aSVenki Rajagopalan 
1106*b494511aSVenki Rajagopalan 	ret = ibt_detach_mcg(info->ti_chan, info->ti_advertise_mcg);
1107*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
1108*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_detach_mcg(chan_hdl=0x%llx, "
1109*b494511aSVenki Rajagopalan 		    "advt_mcg=0x%llx) failed, ret=%d",
1110*b494511aSVenki Rajagopalan 		    info->ti_chan, info->ti_advertise_mcg, ret);
1111*b494511aSVenki Rajagopalan 	}
1112*b494511aSVenki Rajagopalan 
1113*b494511aSVenki Rajagopalan 	rsvd_gid.gid_prefix = 0;
1114*b494511aSVenki Rajagopalan 	rsvd_gid.gid_guid = 0;
1115*b494511aSVenki Rajagopalan 
1116*b494511aSVenki Rajagopalan 	ret = ibt_leave_mcg(rgid, enx_advertise_mgid,
1117*b494511aSVenki Rajagopalan 	    rsvd_gid, IB_MC_JSTATE_FULL);
1118*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
1119*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_leave_mcg(advt_mgid=%llx.%llx) "
1120*b494511aSVenki Rajagopalan 		    "failed, ret=%d", enx_advertise_mgid.gid_prefix,
1121*b494511aSVenki Rajagopalan 		    enx_advertise_mgid.gid_guid, ret);
1122*b494511aSVenki Rajagopalan 	}
1123*b494511aSVenki Rajagopalan }
1124*b494511aSVenki Rajagopalan 
1125*b494511aSVenki Rajagopalan void
eibnx_rb_join_mcgs(eibnx_thr_info_t * info)1126*b494511aSVenki Rajagopalan eibnx_rb_join_mcgs(eibnx_thr_info_t *info)
1127*b494511aSVenki Rajagopalan {
1128*b494511aSVenki Rajagopalan 	mutex_enter(&info->ti_mcg_lock);
1129*b494511aSVenki Rajagopalan 	if ((info->ti_mcg_status & ENX_MCGS_JOINED) == ENX_MCGS_JOINED) {
1130*b494511aSVenki Rajagopalan 		eibnx_rb_join_solicit_mcg(info);
1131*b494511aSVenki Rajagopalan 		eibnx_rb_join_advertise_mcg(info);
1132*b494511aSVenki Rajagopalan 
1133*b494511aSVenki Rajagopalan 		info->ti_mcg_status &= (~ENX_MCGS_JOINED);
1134*b494511aSVenki Rajagopalan 	}
1135*b494511aSVenki Rajagopalan 	mutex_exit(&info->ti_mcg_lock);
1136*b494511aSVenki Rajagopalan }
1137*b494511aSVenki Rajagopalan 
1138*b494511aSVenki Rajagopalan eibnx_hca_t *
eibnx_prepare_hca(ib_guid_t hca_guid)1139*b494511aSVenki Rajagopalan eibnx_prepare_hca(ib_guid_t hca_guid)
1140*b494511aSVenki Rajagopalan {
1141*b494511aSVenki Rajagopalan 	eibnx_t *ss = enx_global_ss;
1142*b494511aSVenki Rajagopalan 	eibnx_hca_t *hca;
1143*b494511aSVenki Rajagopalan 	eibnx_port_t *port;
1144*b494511aSVenki Rajagopalan 	eibnx_port_t *port_tail;
1145*b494511aSVenki Rajagopalan 	ibt_hca_hdl_t hca_hdl;
1146*b494511aSVenki Rajagopalan 	ibt_pd_hdl_t pd_hdl;
1147*b494511aSVenki Rajagopalan 	ibt_hca_portinfo_t *pi;
1148*b494511aSVenki Rajagopalan 	uint_t num_pi;
1149*b494511aSVenki Rajagopalan 	uint_t size_pi;
1150*b494511aSVenki Rajagopalan 	ibt_hca_attr_t hca_attr;
1151*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1152*b494511aSVenki Rajagopalan 	int i;
1153*b494511aSVenki Rajagopalan 
1154*b494511aSVenki Rajagopalan 	ret = ibt_open_hca(ss->nx_ibt_hdl, hca_guid, &hca_hdl);
1155*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
1156*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_open_hca(hca_guid=0x%llx) "
1157*b494511aSVenki Rajagopalan 		    "failed, ret=%d", hca_guid, ret);
1158*b494511aSVenki Rajagopalan 		return (NULL);
1159*b494511aSVenki Rajagopalan 	}
1160*b494511aSVenki Rajagopalan 
1161*b494511aSVenki Rajagopalan 	bzero(&hca_attr, sizeof (ibt_hca_attr_t));
1162*b494511aSVenki Rajagopalan 	if ((ret = ibt_query_hca(hca_hdl, &hca_attr)) != IBT_SUCCESS) {
1163*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_query_hca(hca_hdl=0x%llx, "
1164*b494511aSVenki Rajagopalan 		    "hca_guid=0x%llx) failed, ret=%d",
1165*b494511aSVenki Rajagopalan 		    hca_hdl, hca_guid, ret);
1166*b494511aSVenki Rajagopalan 
1167*b494511aSVenki Rajagopalan 		if ((ret = ibt_close_hca(hca_hdl)) != IBT_SUCCESS) {
1168*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_close_hca(hca_hdl=0x%llx) "
1169*b494511aSVenki Rajagopalan 			    "failed, ret=%d", hca_hdl, ret);
1170*b494511aSVenki Rajagopalan 		}
1171*b494511aSVenki Rajagopalan 		return (NULL);
1172*b494511aSVenki Rajagopalan 	}
1173*b494511aSVenki Rajagopalan 
1174*b494511aSVenki Rajagopalan 	ret = ibt_alloc_pd(hca_hdl, IBT_PD_NO_FLAGS, &pd_hdl);
1175*b494511aSVenki Rajagopalan 	if (ret != IBT_SUCCESS) {
1176*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("ibt_alloc_pd(hca_hdl=0x%llx, "
1177*b494511aSVenki Rajagopalan 		    "hca_guid=0x%llx) failed, ret=%d",
1178*b494511aSVenki Rajagopalan 		    hca_hdl, hca_guid, ret);
1179*b494511aSVenki Rajagopalan 
1180*b494511aSVenki Rajagopalan 		if ((ret = ibt_close_hca(hca_hdl)) != IBT_SUCCESS) {
1181*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_close_hca(hca_hdl=0x%llx) "
1182*b494511aSVenki Rajagopalan 			    "failed, ret=%d", hca_hdl, ret);
1183*b494511aSVenki Rajagopalan 		}
1184*b494511aSVenki Rajagopalan 		return (NULL);
1185*b494511aSVenki Rajagopalan 	}
1186*b494511aSVenki Rajagopalan 
1187*b494511aSVenki Rajagopalan 	/*
1188*b494511aSVenki Rajagopalan 	 * We have all the information we want about this hca, create
1189*b494511aSVenki Rajagopalan 	 * a new struct and return it.
1190*b494511aSVenki Rajagopalan 	 */
1191*b494511aSVenki Rajagopalan 	hca = kmem_zalloc(sizeof (eibnx_hca_t), KM_SLEEP);
1192*b494511aSVenki Rajagopalan 	hca->hc_next = NULL;
1193*b494511aSVenki Rajagopalan 	hca->hc_guid = hca_guid;
1194*b494511aSVenki Rajagopalan 	hca->hc_hdl = hca_hdl;
1195*b494511aSVenki Rajagopalan 	hca->hc_pd = pd_hdl;
1196*b494511aSVenki Rajagopalan 	hca->hc_port = port_tail = NULL;
1197*b494511aSVenki Rajagopalan 
1198*b494511aSVenki Rajagopalan 	for (i = 0; i < hca_attr.hca_nports; i++) {
1199*b494511aSVenki Rajagopalan 		ret = ibt_query_hca_ports(hca_hdl, i + 1, &pi,
1200*b494511aSVenki Rajagopalan 		    &num_pi, &size_pi);
1201*b494511aSVenki Rajagopalan 		if (ret != IBT_SUCCESS) {
1202*b494511aSVenki Rajagopalan 			ENX_DPRINTF_WARN("ibt_query_hca_ports(hca_hdl=0x%llx, "
1203*b494511aSVenki Rajagopalan 			    "port=0x%x) failed, ret=%d", hca_hdl, i + 1, ret);
1204*b494511aSVenki Rajagopalan 		} else {
1205*b494511aSVenki Rajagopalan 			port = kmem_zalloc(sizeof (eibnx_port_t), KM_SLEEP);
1206*b494511aSVenki Rajagopalan 			port->po_next = NULL;
1207*b494511aSVenki Rajagopalan 			port->po_pi = pi;
1208*b494511aSVenki Rajagopalan 			port->po_pi_size = size_pi;
1209*b494511aSVenki Rajagopalan 
1210*b494511aSVenki Rajagopalan 			if (port_tail) {
1211*b494511aSVenki Rajagopalan 				port_tail->po_next = port;
1212*b494511aSVenki Rajagopalan 			} else {
1213*b494511aSVenki Rajagopalan 				hca->hc_port = port;
1214*b494511aSVenki Rajagopalan 			}
1215*b494511aSVenki Rajagopalan 			port_tail = port;
1216*b494511aSVenki Rajagopalan 		}
1217*b494511aSVenki Rajagopalan 	}
1218*b494511aSVenki Rajagopalan 
1219*b494511aSVenki Rajagopalan 	/*
1220*b494511aSVenki Rajagopalan 	 * If we couldn't query about any ports on the HCA, return failure
1221*b494511aSVenki Rajagopalan 	 */
1222*b494511aSVenki Rajagopalan 	if (hca->hc_port == NULL) {
1223*b494511aSVenki Rajagopalan 		ENX_DPRINTF_ERR("all hca port queries failed for "
1224*b494511aSVenki Rajagopalan 		    "hca_guid=0x%llx", hca_guid);
1225*b494511aSVenki Rajagopalan 		(void) eibnx_cleanup_hca(hca);
1226*b494511aSVenki Rajagopalan 		return (NULL);
1227*b494511aSVenki Rajagopalan 	}
1228*b494511aSVenki Rajagopalan 
1229*b494511aSVenki Rajagopalan 	return (hca);
1230*b494511aSVenki Rajagopalan }
1231*b494511aSVenki Rajagopalan 
1232*b494511aSVenki Rajagopalan int
eibnx_cleanup_hca(eibnx_hca_t * hca)1233*b494511aSVenki Rajagopalan eibnx_cleanup_hca(eibnx_hca_t *hca)
1234*b494511aSVenki Rajagopalan {
1235*b494511aSVenki Rajagopalan 	eibnx_port_t *port;
1236*b494511aSVenki Rajagopalan 	eibnx_port_t *port_next;
1237*b494511aSVenki Rajagopalan 	ibt_status_t ret;
1238*b494511aSVenki Rajagopalan 
1239*b494511aSVenki Rajagopalan 	for (port = hca->hc_port; port; port = port_next) {
1240*b494511aSVenki Rajagopalan 		port_next = port->po_next;
1241*b494511aSVenki Rajagopalan 
1242*b494511aSVenki Rajagopalan 		ibt_free_portinfo(port->po_pi, port->po_pi_size);
1243*b494511aSVenki Rajagopalan 		kmem_free(port, sizeof (eibnx_port_t));
1244*b494511aSVenki Rajagopalan 	}
1245*b494511aSVenki Rajagopalan 
1246*b494511aSVenki Rajagopalan 	if ((ret = ibt_free_pd(hca->hc_hdl, hca->hc_pd)) != IBT_SUCCESS) {
1247*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_free_pd(hca_hdl=0x%lx, pd_hd=0x%lx) "
1248*b494511aSVenki Rajagopalan 		    "failed, ret=%d", hca->hc_hdl, hca->hc_pd, ret);
1249*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
1250*b494511aSVenki Rajagopalan 	}
1251*b494511aSVenki Rajagopalan 
1252*b494511aSVenki Rajagopalan 	if ((ret = ibt_close_hca(hca->hc_hdl)) != IBT_SUCCESS) {
1253*b494511aSVenki Rajagopalan 		ENX_DPRINTF_WARN("ibt_close_hca(hca_hdl=0x%lx) failed, "
1254*b494511aSVenki Rajagopalan 		    "ret=%d", hca->hc_hdl, ret);
1255*b494511aSVenki Rajagopalan 		return (ENX_E_FAILURE);
1256*b494511aSVenki Rajagopalan 	}
1257*b494511aSVenki Rajagopalan 
1258*b494511aSVenki Rajagopalan 	kmem_free(hca, sizeof (eibnx_hca_t));
1259*b494511aSVenki Rajagopalan 
1260*b494511aSVenki Rajagopalan 	return (ENX_E_SUCCESS);
1261*b494511aSVenki Rajagopalan }
1262