xref: /illumos-gate/usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c (revision fc8ae2ec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
26  */
27 
28 /*
29  * ibdm.c
30  *
31  * This file contains the InifiniBand Device Manager (IBDM) support functions.
32  * IB nexus driver will only be the client for the IBDM module.
33  *
34  * IBDM registers with IBTF for HCA arrival/removal notification.
35  * IBDM registers with SA access to send DM MADs to discover the IOC's behind
36  * the IOU's.
37  *
38  * IB nexus driver registers with IBDM to find the information about the
39  * HCA's and IOC's (behind the IOU) present on the IB fabric.
40  */
41 
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/taskq.h>
45 #include <sys/ib/mgt/ibdm/ibdm_impl.h>
46 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
47 #include <sys/ib/ibtl/impl/ibtl_ibnex.h>
48 #include <sys/modctl.h>
49 
50 /* Function Prototype declarations */
51 static int	ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **);
52 static int	ibdm_fini(void);
53 static int	ibdm_init(void);
54 static int	ibdm_get_reachable_ports(ibdm_port_attr_t *,
55 			ibdm_hca_list_t *);
56 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t);
57 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *);
58 static boolean_t ibdm_is_cisco(ib_guid_t);
59 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *);
60 static void	ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *);
61 static int	ibdm_set_classportinfo(ibdm_dp_gidinfo_t *);
62 static int	ibdm_send_classportinfo(ibdm_dp_gidinfo_t *);
63 static int	ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *);
64 static int	ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *);
65 static int	ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t,
66 		    ib_guid_t *, ib_guid_t *);
67 static int	ibdm_retry_command(ibdm_timeout_cb_args_t *);
68 static int	ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int);
69 static int	ibdm_verify_mad_status(ib_mad_hdr_t *);
70 static int	ibdm_handle_redirection(ibmf_msg_t *,
71 		    ibdm_dp_gidinfo_t *, int *);
72 static void	ibdm_wait_probe_completion(void);
73 static void	ibdm_sweep_fabric(int);
74 static void	ibdm_probe_gid_thread(void *);
75 static void	ibdm_wakeup_probe_gid_cv(void);
76 static void	ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int);
77 static int	ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int);
78 static void	ibdm_update_port_attr(ibdm_port_attr_t *);
79 static void	ibdm_handle_hca_attach(ib_guid_t);
80 static void	ibdm_handle_srventry_mad(ibmf_msg_t *,
81 		    ibdm_dp_gidinfo_t *, int *);
82 static void	ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *);
83 static void	ibdm_recv_incoming_mad(void *);
84 static void	ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *);
85 static void	ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *);
86 static void	ibdm_pkt_timeout_hdlr(void *arg);
87 static void	ibdm_initialize_port(ibdm_port_attr_t *);
88 static void	ibdm_update_port_pkeys(ibdm_port_attr_t *port);
89 static void	ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
90 static void	ibdm_probe_gid(ibdm_dp_gidinfo_t *);
91 static void	ibdm_alloc_send_buffers(ibmf_msg_t *);
92 static void	ibdm_free_send_buffers(ibmf_msg_t *);
93 static void	ibdm_handle_hca_detach(ib_guid_t);
94 static void	ibdm_handle_port_change_event(ibt_async_event_t *);
95 static int	ibdm_fini_port(ibdm_port_attr_t *);
96 static int	ibdm_uninit_hca(ibdm_hca_list_t *);
97 static void	ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *,
98 		    ibdm_dp_gidinfo_t *, int *);
99 static void	ibdm_handle_iounitinfo(ibmf_handle_t,
100 		    ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
101 static void	ibdm_handle_ioc_profile(ibmf_handle_t,
102 		    ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
103 static void	ibdm_event_hdlr(void *, ibt_hca_hdl_t,
104 		    ibt_async_code_t, ibt_async_event_t *);
105 static void	ibdm_handle_classportinfo(ibmf_handle_t,
106 		    ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
107 static void	ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *,
108 		    ibdm_dp_gidinfo_t *);
109 
110 static ibdm_hca_list_t		*ibdm_dup_hca_attr(ibdm_hca_list_t *);
111 static ibdm_ioc_info_t		*ibdm_dup_ioc_info(ibdm_ioc_info_t *,
112 				    ibdm_dp_gidinfo_t *gid_list);
113 static void			ibdm_probe_ioc(ib_guid_t, ib_guid_t, int);
114 static ibdm_ioc_info_t		*ibdm_is_ioc_present(ib_guid_t,
115 				    ibdm_dp_gidinfo_t *, int *);
116 static ibdm_port_attr_t		*ibdm_get_port_attr(ibt_async_event_t *,
117 				    ibdm_hca_list_t **);
118 static sa_node_record_t		*ibdm_get_node_records(ibmf_saa_handle_t,
119 				    size_t *, ib_guid_t);
120 static int			ibdm_get_node_record_by_port(ibmf_saa_handle_t,
121 				    ib_guid_t, sa_node_record_t **, size_t *);
122 static sa_portinfo_record_t	*ibdm_get_portinfo(ibmf_saa_handle_t, size_t *,
123 				    ib_lid_t);
124 static ibdm_dp_gidinfo_t	*ibdm_create_gid_info(ibdm_port_attr_t *,
125 				    ib_gid_t, ib_gid_t);
126 static ibdm_dp_gidinfo_t	*ibdm_find_gid(ib_guid_t, ib_guid_t);
127 static int	ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t);
128 static ibdm_ioc_info_t	*ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int);
129 static void	ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t,
130 		    ibmf_saa_event_details_t *, void *);
131 static void	ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *,
132     ibdm_dp_gidinfo_t *);
133 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *);
134 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *,
135     ibdm_dp_gidinfo_t *);
136 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *);
137 static void ibdm_free_gid_list(ibdm_gid_t *);
138 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid);
139 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *);
140 static void ibdm_saa_event_taskq(void *);
141 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *);
142 static void ibdm_get_next_port(ibdm_hca_list_t **,
143     ibdm_port_attr_t **, int);
144 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *,
145     ibdm_dp_gidinfo_t *);
146 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *,
147     ibdm_hca_list_t *);
148 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *);
149 static void ibdm_saa_handle_new_gid(void *);
150 static void ibdm_reset_all_dgids(ibmf_saa_handle_t);
151 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *);
152 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *);
153 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *);
154 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *);
155 static ibdm_ioc_info_t	*ibdm_handle_prev_iou();
156 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *,
157     int);
158 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t,
159     ibdm_dp_gidinfo_t **);
160 
161 int	ibdm_dft_timeout	= IBDM_DFT_TIMEOUT;
162 int	ibdm_dft_retry_cnt	= IBDM_DFT_NRETRIES;
163 #ifdef DEBUG
164 int	ibdm_ignore_saa_event = 0;
165 #endif
166 int	ibdm_enumerate_iocs = 0;
167 
168 /* Modload support */
169 static struct modlmisc ibdm_modlmisc	= {
170 	&mod_miscops,
171 	"InfiniBand Device Manager"
172 };
173 
174 struct modlinkage ibdm_modlinkage = {
175 	MODREV_1,
176 	(void *)&ibdm_modlmisc,
177 	NULL
178 };
179 
180 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = {
181 	IBTI_V_CURR,
182 	IBT_DM,
183 	ibdm_event_hdlr,
184 	NULL,
185 	"ibdm"
186 };
187 
188 /* Global variables */
189 ibdm_t	ibdm;
190 int	ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING;
191 char	*ibdm_string = "ibdm";
192 
193 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv",
194     ibdm.ibdm_dp_gidlist_head))
195 
196 /*
197  * _init
198  *	Loadable module init, called before any other module.
199  *	Initialize mutex
200  *	Register with IBTF
201  */
202 int
_init(void)203 _init(void)
204 {
205 	int		err;
206 
207 	IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm);
208 
209 	if ((err = ibdm_init()) != IBDM_SUCCESS) {
210 		IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err);
211 		(void) ibdm_fini();
212 		return (DDI_FAILURE);
213 	}
214 
215 	if ((err = mod_install(&ibdm_modlinkage)) != 0) {
216 		IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err);
217 		(void) ibdm_fini();
218 	}
219 	return (err);
220 }
221 
222 
223 int
_fini(void)224 _fini(void)
225 {
226 	int err;
227 
228 	if ((err = ibdm_fini()) != IBDM_SUCCESS) {
229 		IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err);
230 		(void) ibdm_init();
231 		return (EBUSY);
232 	}
233 
234 	if ((err = mod_remove(&ibdm_modlinkage)) != 0) {
235 		IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err);
236 		(void) ibdm_init();
237 	}
238 	return (err);
239 }
240 
241 
242 int
_info(struct modinfo * modinfop)243 _info(struct modinfo *modinfop)
244 {
245 	return (mod_info(&ibdm_modlinkage, modinfop));
246 }
247 
248 
249 /*
250  * ibdm_init():
251  * 	Register with IBTF
252  *	Allocate memory for the HCAs
253  *	Allocate minor-nodes for the HCAs
254  */
255 static int
ibdm_init(void)256 ibdm_init(void)
257 {
258 	int			i, hca_count;
259 	ib_guid_t		*hca_guids;
260 	ibt_status_t		status;
261 
262 	IBTF_DPRINTF_L4("ibdm", "\tibdm_init:");
263 	if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) {
264 		mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL);
265 		mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL);
266 		mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL);
267 		cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL);
268 		mutex_enter(&ibdm.ibdm_mutex);
269 		ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED;
270 	}
271 
272 	if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) {
273 		if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL,
274 		    (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) {
275 			IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach "
276 			    "failed %x", status);
277 			mutex_exit(&ibdm.ibdm_mutex);
278 			return (IBDM_FAILURE);
279 		}
280 
281 		ibdm.ibdm_state |= IBDM_IBT_ATTACHED;
282 		mutex_exit(&ibdm.ibdm_mutex);
283 	}
284 
285 
286 	if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) {
287 		hca_count = ibt_get_hca_list(&hca_guids);
288 		IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count);
289 		for (i = 0; i < hca_count; i++)
290 			(void) ibdm_handle_hca_attach(hca_guids[i]);
291 		if (hca_count)
292 			ibt_free_hca_list(hca_guids, hca_count);
293 
294 		mutex_enter(&ibdm.ibdm_mutex);
295 		ibdm.ibdm_state |= IBDM_HCA_ATTACHED;
296 		mutex_exit(&ibdm.ibdm_mutex);
297 	}
298 
299 	if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) {
300 		cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL);
301 		cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL);
302 		mutex_enter(&ibdm.ibdm_mutex);
303 		ibdm.ibdm_state |= IBDM_CVS_ALLOCED;
304 		mutex_exit(&ibdm.ibdm_mutex);
305 	}
306 	return (IBDM_SUCCESS);
307 }
308 
309 
310 static int
ibdm_free_iou_info(ibdm_dp_gidinfo_t * gid_info,ibdm_iou_info_t ** ioup)311 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup)
312 {
313 	int			ii, k, niocs;
314 	size_t			size;
315 	ibdm_gid_t		*delete, *head;
316 	timeout_id_t		timeout_id;
317 	ibdm_ioc_info_t		*ioc;
318 	ibdm_iou_info_t		*gl_iou = *ioup;
319 
320 	ASSERT(mutex_owned(&gid_info->gl_mutex));
321 	if (gl_iou == NULL) {
322 		IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU");
323 		return (0);
324 	}
325 
326 	niocs = gl_iou->iou_info.iou_num_ctrl_slots;
327 	IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d",
328 	    gid_info, niocs);
329 
330 	for (ii = 0; ii < niocs; ii++) {
331 		ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii];
332 
333 		/* handle the case where an ioc_timeout_id is scheduled */
334 		if (ioc->ioc_timeout_id) {
335 			timeout_id = ioc->ioc_timeout_id;
336 			ioc->ioc_timeout_id = 0;
337 			mutex_exit(&gid_info->gl_mutex);
338 			IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
339 			    "ioc_timeout_id = 0x%x", timeout_id);
340 			if (untimeout(timeout_id) == -1) {
341 				IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
342 				    "untimeout ioc_timeout_id failed");
343 				mutex_enter(&gid_info->gl_mutex);
344 				return (-1);
345 			}
346 			mutex_enter(&gid_info->gl_mutex);
347 		}
348 
349 		/* handle the case where an ioc_dc_timeout_id is scheduled */
350 		if (ioc->ioc_dc_timeout_id) {
351 			timeout_id = ioc->ioc_dc_timeout_id;
352 			ioc->ioc_dc_timeout_id = 0;
353 			mutex_exit(&gid_info->gl_mutex);
354 			IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
355 			    "ioc_dc_timeout_id = 0x%x", timeout_id);
356 			if (untimeout(timeout_id) == -1) {
357 				IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
358 				    "untimeout ioc_dc_timeout_id failed");
359 				mutex_enter(&gid_info->gl_mutex);
360 				return (-1);
361 			}
362 			mutex_enter(&gid_info->gl_mutex);
363 		}
364 
365 		/* handle the case where serv[k].se_timeout_id is scheduled */
366 		for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) {
367 			if (ioc->ioc_serv[k].se_timeout_id) {
368 				timeout_id = ioc->ioc_serv[k].se_timeout_id;
369 				ioc->ioc_serv[k].se_timeout_id = 0;
370 				mutex_exit(&gid_info->gl_mutex);
371 				IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
372 				    "ioc->ioc_serv[%d].se_timeout_id = 0x%x",
373 				    k, timeout_id);
374 				if (untimeout(timeout_id) == -1) {
375 					IBTF_DPRINTF_L2("ibdm", "free_iou_info:"
376 					    " untimeout se_timeout_id failed");
377 					mutex_enter(&gid_info->gl_mutex);
378 					return (-1);
379 				}
380 				mutex_enter(&gid_info->gl_mutex);
381 			}
382 		}
383 
384 		/* delete GID list in IOC */
385 		head = ioc->ioc_gid_list;
386 		while (head) {
387 			IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: "
388 			    "Deleting gid_list struct %p", head);
389 			delete = head;
390 			head = head->gid_next;
391 			kmem_free(delete, sizeof (ibdm_gid_t));
392 		}
393 		ioc->ioc_gid_list = NULL;
394 
395 		/* delete ioc_serv */
396 		size = ioc->ioc_profile.ioc_service_entries *
397 		    sizeof (ibdm_srvents_info_t);
398 		if (ioc->ioc_serv && size) {
399 			kmem_free(ioc->ioc_serv, size);
400 			ioc->ioc_serv = NULL;
401 		}
402 	}
403 	/*
404 	 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information
405 	 * via the switch during the probe process.
406 	 */
407 	gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE;
408 
409 	IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC");
410 	size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t);
411 	kmem_free(gl_iou, size);
412 	*ioup = NULL;
413 	return (0);
414 }
415 
416 
417 /*
418  * ibdm_fini():
419  * 	Un-register with IBTF
420  *	De allocate memory for the GID info
421  */
422 static int
ibdm_fini()423 ibdm_fini()
424 {
425 	int			ii;
426 	ibdm_hca_list_t		*hca_list, *temp;
427 	ibdm_dp_gidinfo_t	*gid_info, *tmp;
428 	ibdm_gid_t		*head, *delete;
429 
430 	IBTF_DPRINTF_L4("ibdm", "\tibdm_fini");
431 
432 	mutex_enter(&ibdm.ibdm_hl_mutex);
433 	if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) {
434 		if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) {
435 			IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed");
436 			mutex_exit(&ibdm.ibdm_hl_mutex);
437 			return (IBDM_FAILURE);
438 		}
439 		ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED;
440 		ibdm.ibdm_ibt_clnt_hdl = NULL;
441 	}
442 
443 	hca_list = ibdm.ibdm_hca_list_head;
444 	IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count);
445 	for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
446 		temp = hca_list;
447 		hca_list = hca_list->hl_next;
448 		IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp);
449 		if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) {
450 			IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: "
451 			    "uninit_hca %p failed", temp);
452 			mutex_exit(&ibdm.ibdm_hl_mutex);
453 			return (IBDM_FAILURE);
454 		}
455 	}
456 	mutex_exit(&ibdm.ibdm_hl_mutex);
457 
458 	mutex_enter(&ibdm.ibdm_mutex);
459 	if (ibdm.ibdm_state & IBDM_HCA_ATTACHED)
460 		ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED;
461 
462 	gid_info = ibdm.ibdm_dp_gidlist_head;
463 	while (gid_info) {
464 		mutex_enter(&gid_info->gl_mutex);
465 		(void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
466 		mutex_exit(&gid_info->gl_mutex);
467 		ibdm_delete_glhca_list(gid_info);
468 
469 		tmp = gid_info;
470 		gid_info = gid_info->gl_next;
471 		mutex_destroy(&tmp->gl_mutex);
472 		head = tmp->gl_gid;
473 		while (head) {
474 			IBTF_DPRINTF_L4("ibdm",
475 			    "\tibdm_fini: Deleting gid structs");
476 			delete = head;
477 			head = head->gid_next;
478 			kmem_free(delete, sizeof (ibdm_gid_t));
479 		}
480 		kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t));
481 	}
482 	mutex_exit(&ibdm.ibdm_mutex);
483 
484 	if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) {
485 		ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED;
486 		mutex_destroy(&ibdm.ibdm_mutex);
487 		mutex_destroy(&ibdm.ibdm_hl_mutex);
488 		mutex_destroy(&ibdm.ibdm_ibnex_mutex);
489 		cv_destroy(&ibdm.ibdm_port_settle_cv);
490 	}
491 	if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) {
492 		ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED;
493 		cv_destroy(&ibdm.ibdm_probe_cv);
494 		cv_destroy(&ibdm.ibdm_busy_cv);
495 	}
496 	return (IBDM_SUCCESS);
497 }
498 
499 
500 /*
501  * ibdm_event_hdlr()
502  *
503  *	IBDM registers  this asynchronous event handler at the time of
504  *	ibt_attach. IBDM support the following async events. For other
505  *	event, simply returns success.
506  *	IBT_HCA_ATTACH_EVENT:
507  *		Retrieves the  information about all the port that are
508  *		present on this HCA,  allocates  the  port  attributes
509  *		structure  and calls IB  nexus  callback  routine with
510  *		the port attributes structure as an input argument.
511  *	IBT_HCA_DETACH_EVENT:
512  *		Retrieves the information about all the ports that are
513  *		present on  this HCA and  calls IB nexus callback with
514  *		port guid as an argument
515  *	IBT_EVENT_PORT_UP:
516  *		Register with IBMF and SA access
517  *		Setup IBMF receive callback routine
518  *	IBT_EVENT_PORT_DOWN:
519  *		Un-Register with IBMF and SA access
520  *		Teardown IBMF receive callback routine
521  */
522 /*ARGSUSED*/
523 static void
ibdm_event_hdlr(void * clnt_hdl,ibt_hca_hdl_t hca_hdl,ibt_async_code_t code,ibt_async_event_t * event)524 ibdm_event_hdlr(void *clnt_hdl,
525     ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event)
526 {
527 	ibdm_hca_list_t		*hca_list;
528 	ibdm_port_attr_t	*port;
529 	ibmf_saa_handle_t	port_sa_hdl;
530 
531 	IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code);
532 
533 	switch (code) {
534 	case IBT_HCA_ATTACH_EVENT:	/* New HCA registered with IBTF */
535 		ibdm_handle_hca_attach(event->ev_hca_guid);
536 		break;
537 
538 	case IBT_HCA_DETACH_EVENT:	/* HCA unregistered with IBTF */
539 		ibdm_handle_hca_detach(event->ev_hca_guid);
540 		mutex_enter(&ibdm.ibdm_ibnex_mutex);
541 		if (ibdm.ibdm_ibnex_callback != NULL) {
542 			(*ibdm.ibdm_ibnex_callback)((void *)
543 			    &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED);
544 		}
545 		mutex_exit(&ibdm.ibdm_ibnex_mutex);
546 		break;
547 
548 	case IBT_EVENT_PORT_UP:
549 		IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP");
550 		mutex_enter(&ibdm.ibdm_hl_mutex);
551 		port = ibdm_get_port_attr(event, &hca_list);
552 		if (port == NULL) {
553 			IBTF_DPRINTF_L2("ibdm",
554 			    "\tevent_hdlr: HCA not present");
555 			mutex_exit(&ibdm.ibdm_hl_mutex);
556 			break;
557 		}
558 		ibdm_initialize_port(port);
559 		hca_list->hl_nports_active++;
560 		cv_broadcast(&ibdm.ibdm_port_settle_cv);
561 		mutex_exit(&ibdm.ibdm_hl_mutex);
562 
563 		/* Inform IB nexus driver */
564 		mutex_enter(&ibdm.ibdm_ibnex_mutex);
565 		if (ibdm.ibdm_ibnex_callback != NULL) {
566 			(*ibdm.ibdm_ibnex_callback)((void *)
567 			    &event->ev_hca_guid, IBDM_EVENT_PORT_UP);
568 		}
569 		mutex_exit(&ibdm.ibdm_ibnex_mutex);
570 		break;
571 
572 	case IBT_ERROR_PORT_DOWN:
573 		IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN");
574 		mutex_enter(&ibdm.ibdm_hl_mutex);
575 		port = ibdm_get_port_attr(event, &hca_list);
576 		if (port == NULL) {
577 			IBTF_DPRINTF_L2("ibdm",
578 			    "\tevent_hdlr: HCA not present");
579 			mutex_exit(&ibdm.ibdm_hl_mutex);
580 			break;
581 		}
582 		hca_list->hl_nports_active--;
583 		port_sa_hdl = port->pa_sa_hdl;
584 		(void) ibdm_fini_port(port);
585 		port->pa_state = IBT_PORT_DOWN;
586 		cv_broadcast(&ibdm.ibdm_port_settle_cv);
587 		mutex_exit(&ibdm.ibdm_hl_mutex);
588 		ibdm_reset_all_dgids(port_sa_hdl);
589 		break;
590 
591 	case IBT_PORT_CHANGE_EVENT:
592 		IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE");
593 		if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY)
594 			ibdm_handle_port_change_event(event);
595 		break;
596 
597 	default:		/* Ignore all other events/errors */
598 		break;
599 	}
600 }
601 
602 static void
ibdm_handle_port_change_event(ibt_async_event_t * event)603 ibdm_handle_port_change_event(ibt_async_event_t *event)
604 {
605 	ibdm_port_attr_t	*port;
606 	ibdm_hca_list_t		*hca_list;
607 
608 	IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:"
609 	    " HCA guid  %llx", event->ev_hca_guid);
610 	mutex_enter(&ibdm.ibdm_hl_mutex);
611 	port = ibdm_get_port_attr(event, &hca_list);
612 	if (port == NULL) {
613 		IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present");
614 		mutex_exit(&ibdm.ibdm_hl_mutex);
615 		return;
616 	}
617 	ibdm_update_port_pkeys(port);
618 	cv_broadcast(&ibdm.ibdm_port_settle_cv);
619 	mutex_exit(&ibdm.ibdm_hl_mutex);
620 
621 	/* Inform IB nexus driver */
622 	mutex_enter(&ibdm.ibdm_ibnex_mutex);
623 	if (ibdm.ibdm_ibnex_callback != NULL) {
624 		(*ibdm.ibdm_ibnex_callback)((void *)
625 		    &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE);
626 	}
627 	mutex_exit(&ibdm.ibdm_ibnex_mutex);
628 }
629 
630 /*
631  * ibdm_update_port_pkeys()
632  *	Update the pkey table
633  *	Update the port attributes
634  */
635 static void
ibdm_update_port_pkeys(ibdm_port_attr_t * port)636 ibdm_update_port_pkeys(ibdm_port_attr_t *port)
637 {
638 	uint_t				nports, size;
639 	uint_t				pkey_idx, opkey_idx;
640 	uint16_t			npkeys;
641 	ibt_hca_portinfo_t		*pinfop;
642 	ib_pkey_t			pkey;
643 	ibdm_pkey_tbl_t			*pkey_tbl;
644 	ibdm_port_attr_t		newport;
645 
646 	IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:");
647 	ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
648 
649 	/* Check whether the port is active */
650 	if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
651 	    NULL) != IBT_SUCCESS)
652 		return;
653 
654 	if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
655 	    &pinfop, &nports, &size) != IBT_SUCCESS) {
656 		/* This should not occur */
657 		port->pa_npkeys = 0;
658 		port->pa_pkey_tbl = NULL;
659 		return;
660 	}
661 
662 	npkeys = pinfop->p_pkey_tbl_sz;
663 	pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
664 	newport.pa_pkey_tbl = pkey_tbl;
665 	newport.pa_ibmf_hdl = port->pa_ibmf_hdl;
666 
667 	for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) {
668 		pkey = pkey_tbl[pkey_idx].pt_pkey =
669 		    pinfop->p_pkey_tbl[pkey_idx];
670 		/*
671 		 * Is this pkey present in the current table ?
672 		 */
673 		for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
674 			if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) {
675 				pkey_tbl[pkey_idx].pt_qp_hdl =
676 				    port->pa_pkey_tbl[opkey_idx].pt_qp_hdl;
677 				port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL;
678 				break;
679 			}
680 		}
681 
682 		if (opkey_idx == port->pa_npkeys) {
683 			pkey = pkey_tbl[pkey_idx].pt_pkey;
684 			if (IBDM_INVALID_PKEY(pkey)) {
685 				pkey_tbl[pkey_idx].pt_qp_hdl = NULL;
686 				continue;
687 			}
688 			ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx);
689 		}
690 	}
691 
692 	for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
693 		if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) {
694 			if (ibdm_port_attr_ibmf_fini(port, opkey_idx) !=
695 			    IBDM_SUCCESS) {
696 				IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: "
697 				    "ibdm_port_attr_ibmf_fini failed for "
698 				    "port pkey 0x%x",
699 				    port->pa_pkey_tbl[opkey_idx].pt_pkey);
700 			}
701 		}
702 	}
703 
704 	if (port->pa_pkey_tbl != NULL) {
705 		kmem_free(port->pa_pkey_tbl,
706 		    port->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
707 	}
708 
709 	port->pa_npkeys = npkeys;
710 	port->pa_pkey_tbl = pkey_tbl;
711 	port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
712 	port->pa_state = pinfop->p_linkstate;
713 	ibt_free_portinfo(pinfop, size);
714 }
715 
716 /*
717  * ibdm_initialize_port()
718  *	Register with IBMF
719  *	Register with SA access
720  *	Register a receive callback routine with IBMF. IBMF invokes
721  *	this routine whenever a MAD arrives at this port.
722  *	Update the port attributes
723  */
724 static void
ibdm_initialize_port(ibdm_port_attr_t * port)725 ibdm_initialize_port(ibdm_port_attr_t *port)
726 {
727 	int				ii;
728 	uint_t				nports, size;
729 	uint_t				pkey_idx;
730 	ib_pkey_t			pkey;
731 	ibt_hca_portinfo_t		*pinfop;
732 	ibmf_register_info_t		ibmf_reg;
733 	ibmf_saa_subnet_event_args_t	event_args;
734 
735 	IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:");
736 	ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
737 
738 	/* Check whether the port is active */
739 	if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
740 	    NULL) != IBT_SUCCESS)
741 		return;
742 
743 	if (port->pa_sa_hdl != NULL || port->pa_pkey_tbl != NULL)
744 		return;
745 
746 	if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
747 	    &pinfop, &nports, &size) != IBT_SUCCESS) {
748 		/* This should not occur */
749 		port->pa_npkeys		= 0;
750 		port->pa_pkey_tbl	= NULL;
751 		return;
752 	}
753 	port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
754 
755 	port->pa_state		= pinfop->p_linkstate;
756 	port->pa_npkeys		= pinfop->p_pkey_tbl_sz;
757 	port->pa_pkey_tbl	= (ibdm_pkey_tbl_t *)kmem_zalloc(
758 	    port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
759 
760 	for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++)
761 		port->pa_pkey_tbl[pkey_idx].pt_pkey =
762 		    pinfop->p_pkey_tbl[pkey_idx];
763 
764 	ibt_free_portinfo(pinfop, size);
765 
766 	if (ibdm_enumerate_iocs) {
767 		event_args.is_event_callback = ibdm_saa_event_cb;
768 		event_args.is_event_callback_arg = port;
769 		if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args,
770 		    IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) {
771 			IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
772 			    "sa access registration failed");
773 			(void) ibdm_fini_port(port);
774 			return;
775 		}
776 
777 		ibmf_reg.ir_ci_guid		= port->pa_hca_guid;
778 		ibmf_reg.ir_port_num		= port->pa_port_num;
779 		ibmf_reg.ir_client_class	= DEV_MGT_MANAGER;
780 
781 		if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL,
782 		    &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) {
783 			IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
784 			    "IBMF registration failed");
785 			(void) ibdm_fini_port(port);
786 			return;
787 		}
788 
789 		if (ibmf_setup_async_cb(port->pa_ibmf_hdl,
790 		    IBMF_QP_HANDLE_DEFAULT,
791 		    ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) {
792 			IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
793 			    "IBMF setup recv cb failed");
794 			(void) ibdm_fini_port(port);
795 			return;
796 		}
797 	} else {
798 		port->pa_sa_hdl = NULL;
799 		port->pa_ibmf_hdl = NULL;
800 	}
801 
802 	for (ii = 0; ii < port->pa_npkeys; ii++) {
803 		pkey = port->pa_pkey_tbl[ii].pt_pkey;
804 		if (IBDM_INVALID_PKEY(pkey)) {
805 			port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
806 			continue;
807 		}
808 		ibdm_port_attr_ibmf_init(port, pkey, ii);
809 	}
810 }
811 
812 
813 /*
814  * ibdm_port_attr_ibmf_init:
815  *	With IBMF - Alloc QP Handle and Setup Async callback
816  */
817 static void
ibdm_port_attr_ibmf_init(ibdm_port_attr_t * port,ib_pkey_t pkey,int ii)818 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii)
819 {
820 	int ret;
821 
822 	if (ibdm_enumerate_iocs == 0) {
823 		port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
824 		return;
825 	}
826 
827 	if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY,
828 	    IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) !=
829 	    IBMF_SUCCESS) {
830 		IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
831 		    "IBMF failed to alloc qp %d", ret);
832 		port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
833 		return;
834 	}
835 
836 	IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p",
837 	    port->pa_ibmf_hdl);
838 
839 	if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl,
840 	    port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) !=
841 	    IBMF_SUCCESS) {
842 		IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
843 		    "IBMF setup recv cb failed %d", ret);
844 		(void) ibmf_free_qp(port->pa_ibmf_hdl,
845 		    &port->pa_pkey_tbl[ii].pt_qp_hdl, 0);
846 		port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
847 	}
848 }
849 
850 
851 /*
852  * ibdm_get_port_attr()
853  *	Get port attributes from HCA guid and port number
854  *	Return pointer to ibdm_port_attr_t on Success
855  *	and NULL on failure
856  */
857 static ibdm_port_attr_t *
ibdm_get_port_attr(ibt_async_event_t * event,ibdm_hca_list_t ** retval)858 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval)
859 {
860 	ibdm_hca_list_t		*hca_list;
861 	ibdm_port_attr_t	*port_attr;
862 	int			ii;
863 
864 	IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port);
865 	ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
866 	hca_list = ibdm.ibdm_hca_list_head;
867 	while (hca_list) {
868 		if (hca_list->hl_hca_guid == event->ev_hca_guid) {
869 			for (ii = 0; ii < hca_list->hl_nports; ii++) {
870 				port_attr = &hca_list->hl_port_attr[ii];
871 				if (port_attr->pa_port_num == event->ev_port) {
872 					*retval = hca_list;
873 					return (port_attr);
874 				}
875 			}
876 		}
877 		hca_list = hca_list->hl_next;
878 	}
879 	return (NULL);
880 }
881 
882 
883 /*
884  * ibdm_update_port_attr()
885  *	Update the port attributes
886  */
887 static void
ibdm_update_port_attr(ibdm_port_attr_t * port)888 ibdm_update_port_attr(ibdm_port_attr_t *port)
889 {
890 	uint_t			nports, size;
891 	uint_t			pkey_idx;
892 	ibt_hca_portinfo_t	*portinfop;
893 
894 	IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin");
895 	if (ibt_query_hca_ports(port->pa_hca_hdl,
896 	    port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) {
897 		/* This should not occur */
898 		port->pa_npkeys		= 0;
899 		port->pa_pkey_tbl	= NULL;
900 		return;
901 	}
902 	port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix;
903 
904 	port->pa_state		= portinfop->p_linkstate;
905 
906 	/*
907 	 * PKey information in portinfo valid only if port is
908 	 * ACTIVE. Bail out if not.
909 	 */
910 	if (port->pa_state != IBT_PORT_ACTIVE) {
911 		port->pa_npkeys		= 0;
912 		port->pa_pkey_tbl	= NULL;
913 		ibt_free_portinfo(portinfop, size);
914 		return;
915 	}
916 
917 	port->pa_npkeys		= portinfop->p_pkey_tbl_sz;
918 	port->pa_pkey_tbl	= (ibdm_pkey_tbl_t *)kmem_zalloc(
919 	    port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
920 
921 	for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) {
922 		port->pa_pkey_tbl[pkey_idx].pt_pkey =
923 		    portinfop->p_pkey_tbl[pkey_idx];
924 	}
925 	ibt_free_portinfo(portinfop, size);
926 }
927 
928 
929 /*
930  * ibdm_handle_hca_attach()
931  */
932 static void
ibdm_handle_hca_attach(ib_guid_t hca_guid)933 ibdm_handle_hca_attach(ib_guid_t hca_guid)
934 {
935 	uint_t			size;
936 	uint_t			ii, nports;
937 	ibt_status_t		status;
938 	ibt_hca_hdl_t		hca_hdl;
939 	ibt_hca_attr_t		*hca_attr;
940 	ibdm_hca_list_t		*hca_list, *temp;
941 	ibdm_port_attr_t	*port_attr;
942 	ibt_hca_portinfo_t	*portinfop;
943 
944 	IBTF_DPRINTF_L4("ibdm",
945 	    "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid);
946 
947 	/* open the HCA first */
948 	if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid,
949 	    &hca_hdl)) != IBT_SUCCESS) {
950 		IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
951 		    "open_hca failed, status 0x%x", status);
952 		return;
953 	}
954 
955 	hca_attr = (ibt_hca_attr_t *)
956 	    kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP);
957 	/* ibt_query_hca always returns IBT_SUCCESS */
958 	(void) ibt_query_hca(hca_hdl, hca_attr);
959 
960 	IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x,"
961 	    " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id,
962 	    hca_attr->hca_version_id, hca_attr->hca_nports);
963 
964 	if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports,
965 	    &size)) != IBT_SUCCESS) {
966 		IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
967 		    "ibt_query_hca_ports failed, status 0x%x", status);
968 		kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
969 		(void) ibt_close_hca(hca_hdl);
970 		return;
971 	}
972 	hca_list = (ibdm_hca_list_t *)
973 	    kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP);
974 	hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
975 	    (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP);
976 	hca_list->hl_hca_guid = hca_attr->hca_node_guid;
977 	hca_list->hl_nports = hca_attr->hca_nports;
978 	hca_list->hl_attach_time = gethrtime();
979 	hca_list->hl_hca_hdl = hca_hdl;
980 
981 	/*
982 	 * Init a dummy port attribute for the HCA node
983 	 * This is for Per-HCA Node. Initialize port_attr :
984 	 * 	hca_guid & port_guid -> hca_guid
985 	 *	npkeys, pkey_tbl is NULL
986 	 *	port_num, sn_prefix is 0
987 	 *	vendorid, product_id, dev_version from HCA
988 	 *	pa_state is IBT_PORT_ACTIVE
989 	 */
990 	hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
991 	    sizeof (ibdm_port_attr_t), KM_SLEEP);
992 	port_attr = hca_list->hl_hca_port_attr;
993 	port_attr->pa_vendorid  = hca_attr->hca_vendor_id;
994 	port_attr->pa_productid	= hca_attr->hca_device_id;
995 	port_attr->pa_dev_version = hca_attr->hca_version_id;
996 	port_attr->pa_hca_guid	= hca_attr->hca_node_guid;
997 	port_attr->pa_hca_hdl	= hca_list->hl_hca_hdl;
998 	port_attr->pa_port_guid	= hca_attr->hca_node_guid;
999 	port_attr->pa_state	= IBT_PORT_ACTIVE;
1000 
1001 
1002 	for (ii = 0; ii < nports; ii++) {
1003 		port_attr		= &hca_list->hl_port_attr[ii];
1004 		port_attr->pa_vendorid	= hca_attr->hca_vendor_id;
1005 		port_attr->pa_productid	= hca_attr->hca_device_id;
1006 		port_attr->pa_dev_version = hca_attr->hca_version_id;
1007 		port_attr->pa_hca_guid	= hca_attr->hca_node_guid;
1008 		port_attr->pa_hca_hdl	= hca_list->hl_hca_hdl;
1009 		port_attr->pa_port_guid	= portinfop[ii].p_sgid_tbl->gid_guid;
1010 		port_attr->pa_sn_prefix	= portinfop[ii].p_sgid_tbl->gid_prefix;
1011 		port_attr->pa_port_num	= portinfop[ii].p_port_num;
1012 		port_attr->pa_state	= portinfop[ii].p_linkstate;
1013 
1014 		/*
1015 		 * Register with IBMF, SA access when the port is in
1016 		 * ACTIVE state. Also register a callback routine
1017 		 * with IBMF to receive incoming DM MAD's.
1018 		 * The IBDM event handler takes care of registration of
1019 		 * port which are not active.
1020 		 */
1021 		IBTF_DPRINTF_L4("ibdm",
1022 		    "\thandle_hca_attach: port guid %llx Port state 0x%x",
1023 		    port_attr->pa_port_guid, portinfop[ii].p_linkstate);
1024 
1025 		if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) {
1026 			mutex_enter(&ibdm.ibdm_hl_mutex);
1027 			hca_list->hl_nports_active++;
1028 			ibdm_initialize_port(port_attr);
1029 			cv_broadcast(&ibdm.ibdm_port_settle_cv);
1030 			mutex_exit(&ibdm.ibdm_hl_mutex);
1031 		}
1032 	}
1033 	mutex_enter(&ibdm.ibdm_hl_mutex);
1034 	for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) {
1035 		if (temp->hl_hca_guid == hca_guid) {
1036 			IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX "
1037 			    "already seen by IBDM", hca_guid);
1038 			mutex_exit(&ibdm.ibdm_hl_mutex);
1039 			(void) ibdm_uninit_hca(hca_list);
1040 			return;
1041 		}
1042 	}
1043 	ibdm.ibdm_hca_count++;
1044 	if (ibdm.ibdm_hca_list_head == NULL) {
1045 		ibdm.ibdm_hca_list_head = hca_list;
1046 		ibdm.ibdm_hca_list_tail = hca_list;
1047 	} else {
1048 		ibdm.ibdm_hca_list_tail->hl_next = hca_list;
1049 		ibdm.ibdm_hca_list_tail = hca_list;
1050 	}
1051 	mutex_exit(&ibdm.ibdm_hl_mutex);
1052 	mutex_enter(&ibdm.ibdm_ibnex_mutex);
1053 	if (ibdm.ibdm_ibnex_callback != NULL) {
1054 		(*ibdm.ibdm_ibnex_callback)((void *)
1055 		    &hca_guid, IBDM_EVENT_HCA_ADDED);
1056 	}
1057 	mutex_exit(&ibdm.ibdm_ibnex_mutex);
1058 
1059 	kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
1060 	ibt_free_portinfo(portinfop, size);
1061 }
1062 
1063 
1064 /*
1065  * ibdm_handle_hca_detach()
1066  */
1067 static void
ibdm_handle_hca_detach(ib_guid_t hca_guid)1068 ibdm_handle_hca_detach(ib_guid_t hca_guid)
1069 {
1070 	ibdm_hca_list_t		*head, *prev = NULL;
1071 	size_t			len;
1072 	ibdm_dp_gidinfo_t	*gidinfo;
1073 	ibdm_port_attr_t	*port_attr;
1074 	int			i;
1075 
1076 	IBTF_DPRINTF_L4("ibdm",
1077 	    "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid);
1078 
1079 	/* Make sure no probes are running */
1080 	mutex_enter(&ibdm.ibdm_mutex);
1081 	while (ibdm.ibdm_busy & IBDM_BUSY)
1082 		cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1083 	ibdm.ibdm_busy |= IBDM_BUSY;
1084 	mutex_exit(&ibdm.ibdm_mutex);
1085 
1086 	mutex_enter(&ibdm.ibdm_hl_mutex);
1087 	head = ibdm.ibdm_hca_list_head;
1088 	while (head) {
1089 		if (head->hl_hca_guid == hca_guid) {
1090 			if (prev == NULL)
1091 				ibdm.ibdm_hca_list_head = head->hl_next;
1092 			else
1093 				prev->hl_next = head->hl_next;
1094 			if (ibdm.ibdm_hca_list_tail == head)
1095 				ibdm.ibdm_hca_list_tail = prev;
1096 			ibdm.ibdm_hca_count--;
1097 			break;
1098 		}
1099 		prev = head;
1100 		head = head->hl_next;
1101 	}
1102 	mutex_exit(&ibdm.ibdm_hl_mutex);
1103 	if (ibdm_uninit_hca(head) != IBDM_SUCCESS)
1104 		(void) ibdm_handle_hca_attach(hca_guid);
1105 
1106 #ifdef DEBUG
1107 	if (ibdm_enumerate_iocs == 0) {
1108 		ASSERT(ibdm.ibdm_dp_gidlist_head == NULL);
1109 	}
1110 #endif
1111 
1112 	/*
1113 	 * Now clean up the HCA lists in the gidlist.
1114 	 */
1115 	for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo =
1116 	    gidinfo->gl_next) {
1117 		prev = NULL;
1118 		head = gidinfo->gl_hca_list;
1119 		while (head) {
1120 			if (head->hl_hca_guid == hca_guid) {
1121 				if (prev == NULL)
1122 					gidinfo->gl_hca_list =
1123 					    head->hl_next;
1124 				else
1125 					prev->hl_next = head->hl_next;
1126 				for (i = 0; i < head->hl_nports; i++) {
1127 					port_attr = &head->hl_port_attr[i];
1128 					if (port_attr->pa_pkey_tbl != NULL)
1129 						kmem_free(
1130 						    port_attr->pa_pkey_tbl,
1131 						    port_attr->pa_npkeys *
1132 						    sizeof (ibdm_pkey_tbl_t));
1133 				}
1134 				len = sizeof (ibdm_hca_list_t) +
1135 				    (head->hl_nports *
1136 				    sizeof (ibdm_port_attr_t));
1137 				kmem_free(head, len);
1138 
1139 				break;
1140 			}
1141 			prev = head;
1142 			head = head->hl_next;
1143 		}
1144 	}
1145 
1146 	mutex_enter(&ibdm.ibdm_mutex);
1147 	ibdm.ibdm_busy &= ~IBDM_BUSY;
1148 	cv_broadcast(&ibdm.ibdm_busy_cv);
1149 	mutex_exit(&ibdm.ibdm_mutex);
1150 }
1151 
1152 
1153 static int
ibdm_uninit_hca(ibdm_hca_list_t * head)1154 ibdm_uninit_hca(ibdm_hca_list_t *head)
1155 {
1156 	int			ii;
1157 	ibdm_port_attr_t	*port_attr;
1158 
1159 	for (ii = 0; ii < head->hl_nports; ii++) {
1160 		port_attr = &head->hl_port_attr[ii];
1161 		if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) {
1162 			IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x "
1163 			    "ibdm_fini_port() failed", head, ii);
1164 			return (IBDM_FAILURE);
1165 		}
1166 	}
1167 	if (head->hl_hca_hdl)
1168 		if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) {
1169 			IBTF_DPRINTF_L2("ibdm", "uninit_hca: "
1170 			    "ibt_close_hca() failed");
1171 			return (IBDM_FAILURE);
1172 		}
1173 	kmem_free(head->hl_port_attr,
1174 	    head->hl_nports * sizeof (ibdm_port_attr_t));
1175 	kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t));
1176 	kmem_free(head, sizeof (ibdm_hca_list_t));
1177 	return (IBDM_SUCCESS);
1178 }
1179 
1180 
1181 /*
1182  * For each port on the HCA,
1183  *	1) Teardown IBMF receive callback function
1184  *	2) Unregister with IBMF
1185  *	3) Unregister with SA access
1186  */
1187 static int
ibdm_fini_port(ibdm_port_attr_t * port_attr)1188 ibdm_fini_port(ibdm_port_attr_t *port_attr)
1189 {
1190 	int	ii, ibmf_status;
1191 
1192 	for (ii = 0; ii < port_attr->pa_npkeys; ii++) {
1193 		if (port_attr->pa_pkey_tbl == NULL)
1194 			break;
1195 		if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl)
1196 			continue;
1197 		if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) {
1198 			IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1199 			    "ibdm_port_attr_ibmf_fini failed for "
1200 			    "port pkey 0x%x", ii);
1201 			return (IBDM_FAILURE);
1202 		}
1203 	}
1204 
1205 	if (port_attr->pa_ibmf_hdl) {
1206 		ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1207 		    IBMF_QP_HANDLE_DEFAULT, 0);
1208 		if (ibmf_status != IBMF_SUCCESS) {
1209 			IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1210 			    "ibmf_tear_down_async_cb failed %d", ibmf_status);
1211 			return (IBDM_FAILURE);
1212 		}
1213 
1214 		ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0);
1215 		if (ibmf_status != IBMF_SUCCESS) {
1216 			IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1217 			    "ibmf_unregister failed %d", ibmf_status);
1218 			return (IBDM_FAILURE);
1219 		}
1220 
1221 		port_attr->pa_ibmf_hdl = NULL;
1222 	}
1223 
1224 	if (port_attr->pa_sa_hdl) {
1225 		ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0);
1226 		if (ibmf_status != IBMF_SUCCESS) {
1227 			IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1228 			    "ibmf_sa_session_close failed %d", ibmf_status);
1229 			return (IBDM_FAILURE);
1230 		}
1231 		port_attr->pa_sa_hdl = NULL;
1232 	}
1233 
1234 	if (port_attr->pa_pkey_tbl != NULL) {
1235 		kmem_free(port_attr->pa_pkey_tbl,
1236 		    port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
1237 		port_attr->pa_pkey_tbl = NULL;
1238 		port_attr->pa_npkeys = 0;
1239 	}
1240 
1241 	return (IBDM_SUCCESS);
1242 }
1243 
1244 
1245 /*
1246  * ibdm_port_attr_ibmf_fini:
1247  *	With IBMF - Tear down Async callback and free QP Handle
1248  */
1249 static int
ibdm_port_attr_ibmf_fini(ibdm_port_attr_t * port_attr,int ii)1250 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii)
1251 {
1252 	int ibmf_status;
1253 
1254 	IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:");
1255 
1256 	if (ibdm_enumerate_iocs == 0) {
1257 		ASSERT(port_attr->pa_pkey_tbl[ii].pt_qp_hdl == NULL);
1258 		return (IBDM_SUCCESS);
1259 	}
1260 
1261 	if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) {
1262 		ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1263 		    port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1264 		if (ibmf_status != IBMF_SUCCESS) {
1265 			IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1266 			    "ibmf_tear_down_async_cb failed %d", ibmf_status);
1267 			return (IBDM_FAILURE);
1268 		}
1269 		ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl,
1270 		    &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1271 		if (ibmf_status != IBMF_SUCCESS) {
1272 			IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1273 			    "ibmf_free_qp failed %d", ibmf_status);
1274 			return (IBDM_FAILURE);
1275 		}
1276 		port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
1277 	}
1278 	return (IBDM_SUCCESS);
1279 }
1280 
1281 
1282 /*
1283  * ibdm_gid_decr_pending:
1284  *	decrement gl_pending_cmds. If zero wakeup sleeping threads
1285  */
1286 static void
ibdm_gid_decr_pending(ibdm_dp_gidinfo_t * gidinfo)1287 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo)
1288 {
1289 	mutex_enter(&ibdm.ibdm_mutex);
1290 	mutex_enter(&gidinfo->gl_mutex);
1291 	if (--gidinfo->gl_pending_cmds == 0) {
1292 		/*
1293 		 * Handle DGID getting removed.
1294 		 */
1295 		if (gidinfo->gl_disconnected) {
1296 			mutex_exit(&gidinfo->gl_mutex);
1297 			mutex_exit(&ibdm.ibdm_mutex);
1298 
1299 			IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: "
1300 			    "gidinfo %p hot removal", gidinfo);
1301 			ibdm_delete_gidinfo(gidinfo);
1302 
1303 			mutex_enter(&ibdm.ibdm_mutex);
1304 			ibdm.ibdm_ngid_probes_in_progress--;
1305 			ibdm_wait_probe_completion();
1306 			mutex_exit(&ibdm.ibdm_mutex);
1307 			return;
1308 		}
1309 		mutex_exit(&gidinfo->gl_mutex);
1310 		mutex_exit(&ibdm.ibdm_mutex);
1311 		ibdm_notify_newgid_iocs(gidinfo);
1312 		mutex_enter(&ibdm.ibdm_mutex);
1313 		mutex_enter(&gidinfo->gl_mutex);
1314 
1315 		ibdm.ibdm_ngid_probes_in_progress--;
1316 		ibdm_wait_probe_completion();
1317 	}
1318 	mutex_exit(&gidinfo->gl_mutex);
1319 	mutex_exit(&ibdm.ibdm_mutex);
1320 }
1321 
1322 
1323 /*
1324  * ibdm_wait_probe_completion:
1325  *	wait for probing to complete
1326  */
1327 static void
ibdm_wait_probe_completion(void)1328 ibdm_wait_probe_completion(void)
1329 {
1330 	ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1331 	if (ibdm.ibdm_ngid_probes_in_progress) {
1332 		IBTF_DPRINTF_L4("ibdm",	"\twait for probe complete");
1333 		ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS;
1334 		while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS)
1335 			cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex);
1336 	}
1337 }
1338 
1339 
1340 /*
1341  * ibdm_wait_cisco_probe_completion:
1342  *	wait for the reply from the Cisco FC GW switch after a setclassportinfo
1343  *	request is sent. This wait can be achieved on each gid.
1344  */
1345 static void
ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t * gidinfo)1346 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo)
1347 {
1348 	ASSERT(MUTEX_HELD(&gidinfo->gl_mutex));
1349 	IBTF_DPRINTF_L4("ibdm",	"\twait for cisco probe complete");
1350 	gidinfo->gl_flag |= IBDM_CISCO_PROBE;
1351 	while (gidinfo->gl_flag & IBDM_CISCO_PROBE)
1352 		cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex);
1353 }
1354 
1355 
1356 /*
1357  * ibdm_wakeup_probe_gid_cv:
1358  *	wakeup waiting threads (based on ibdm_ngid_probes_in_progress)
1359  */
1360 static void
ibdm_wakeup_probe_gid_cv(void)1361 ibdm_wakeup_probe_gid_cv(void)
1362 {
1363 	ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1364 	if (!ibdm.ibdm_ngid_probes_in_progress) {
1365 		IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup");
1366 		ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
1367 		cv_broadcast(&ibdm.ibdm_probe_cv);
1368 	}
1369 
1370 }
1371 
1372 
1373 /*
1374  * ibdm_sweep_fabric(reprobe_flag)
1375  *	Find all possible Managed IOU's and their IOC's that are visible
1376  *	to the host. The algorithm used is as follows
1377  *
1378  *	Send a "bus walk" request for each port on the host HCA to SA access
1379  *	SA returns complete set of GID's that are reachable from
1380  *	source port. This is done in parallel.
1381  *
1382  *	Initialize GID state to IBDM_GID_PROBE_NOT_DONE
1383  *
1384  *	Sort the GID list and eliminate duplicate GID's
1385  *		1) Use DGID for sorting
1386  *		2) use PortGuid for sorting
1387  *			Send SA query to retrieve NodeRecord and
1388  *			extract PortGuid from that.
1389  *
1390  *	Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont
1391  *	support DM MAD's
1392  *		Send a "Portinfo" query to get the port capabilities and
1393  *		then check for DM MAD's support
1394  *
1395  *	Send "ClassPortInfo" request for all the GID's in parallel,
1396  *	set the GID state to IBDM_GET_CLASSPORTINFO and wait on the
1397  *	cv_signal to complete.
1398  *
1399  *	When DM agent on the remote GID sends back the response, IBMF
1400  *	invokes DM callback routine.
1401  *
1402  *	If the response is proper, send "IOUnitInfo" request and set
1403  *	GID state to IBDM_GET_IOUNITINFO.
1404  *
1405  *	If the response is proper, send "IocProfileInfo" request to
1406  *	all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS.
1407  *
1408  *	Send request to get Service entries simultaneously
1409  *
1410  *	Signal the waiting thread when received response for all the commands.
1411  *
1412  *	Set the GID state to IBDM_GID_PROBE_FAILED when received a error
1413  *	response during the probing period.
1414  *
1415  *	Note:
1416  *	ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds
1417  *	keep track of number commands in progress at any point of time.
1418  *	MAD transaction ID is used to identify a particular GID
1419  *	TBD: Consider registering the IBMF receive callback on demand
1420  *
1421  *	Note: This routine must be called with ibdm.ibdm_mutex held
1422  *	TBD: Re probe the failure GID (for certain failures) when requested
1423  *	     for fabric sweep next time
1424  *
1425  *	Parameters : If reprobe_flag is set, All IOCs will be reprobed.
1426  */
1427 static void
ibdm_sweep_fabric(int reprobe_flag)1428 ibdm_sweep_fabric(int reprobe_flag)
1429 {
1430 	int			ii;
1431 	int			new_paths = 0;
1432 	uint8_t			niocs;
1433 	taskqid_t		tid;
1434 	ibdm_ioc_info_t		*ioc;
1435 	ibdm_hca_list_t		*hca_list = NULL;
1436 	ibdm_port_attr_t	*port = NULL;
1437 	ibdm_dp_gidinfo_t 	*gid_info;
1438 
1439 	IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter");
1440 	ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1441 
1442 	/*
1443 	 * Check whether a sweep already in progress. If so, just
1444 	 * wait for the fabric sweep to complete
1445 	 */
1446 	while (ibdm.ibdm_busy & IBDM_BUSY)
1447 		cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1448 	ibdm.ibdm_busy |= IBDM_BUSY;
1449 	mutex_exit(&ibdm.ibdm_mutex);
1450 
1451 	ibdm_dump_sweep_fabric_timestamp(0);
1452 
1453 	/* Rescan the GID list for any removed GIDs for reprobe */
1454 	if (reprobe_flag)
1455 		ibdm_rescan_gidlist(NULL);
1456 
1457 	/*
1458 	 * Get list of all the ports reachable from the local known HCA
1459 	 * ports which are active
1460 	 */
1461 	mutex_enter(&ibdm.ibdm_hl_mutex);
1462 	for (ibdm_get_next_port(&hca_list, &port, 1); port;
1463 	    ibdm_get_next_port(&hca_list, &port, 1)) {
1464 		/*
1465 		 * Get PATHS to all the reachable ports from
1466 		 * SGID and update the global ibdm structure.
1467 		 */
1468 		new_paths = ibdm_get_reachable_ports(port, hca_list);
1469 		ibdm.ibdm_ngids += new_paths;
1470 	}
1471 	mutex_exit(&ibdm.ibdm_hl_mutex);
1472 
1473 	mutex_enter(&ibdm.ibdm_mutex);
1474 	ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids;
1475 	mutex_exit(&ibdm.ibdm_mutex);
1476 
1477 	/* Send a request to probe GIDs asynchronously. */
1478 	for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1479 	    gid_info = gid_info->gl_next) {
1480 		mutex_enter(&gid_info->gl_mutex);
1481 		gid_info->gl_reprobe_flag = reprobe_flag;
1482 		mutex_exit(&gid_info->gl_mutex);
1483 
1484 		/* process newly encountered GIDs */
1485 		tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread,
1486 		    (void *)gid_info, TQ_NOSLEEP);
1487 		IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p"
1488 		    " taskq_id = %x", gid_info, tid);
1489 		/* taskq failed to dispatch call it directly */
1490 		if (tid == TASKQID_INVALID)
1491 			ibdm_probe_gid_thread((void *)gid_info);
1492 	}
1493 
1494 	mutex_enter(&ibdm.ibdm_mutex);
1495 	ibdm_wait_probe_completion();
1496 
1497 	/*
1498 	 * Update the properties, if reprobe_flag is set
1499 	 * Skip if gl_reprobe_flag is set, this will be
1500 	 * a re-inserted / new GID, for which notifications
1501 	 * have already been send.
1502 	 */
1503 	if (reprobe_flag) {
1504 		for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1505 		    gid_info = gid_info->gl_next) {
1506 			if (gid_info->gl_iou == NULL)
1507 				continue;
1508 			if (gid_info->gl_reprobe_flag) {
1509 				gid_info->gl_reprobe_flag = 0;
1510 				continue;
1511 			}
1512 
1513 			niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1514 			for (ii = 0; ii < niocs; ii++) {
1515 				ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1516 				if (ioc)
1517 					ibdm_reprobe_update_port_srv(ioc,
1518 					    gid_info);
1519 			}
1520 		}
1521 	} else if (ibdm.ibdm_prev_iou) {
1522 		ibdm_ioc_info_t	*ioc_list;
1523 
1524 		/*
1525 		 * Get the list of IOCs which have changed.
1526 		 * If any IOCs have changed, Notify IBNexus
1527 		 */
1528 		ibdm.ibdm_prev_iou = 0;
1529 		ioc_list = ibdm_handle_prev_iou();
1530 		if (ioc_list) {
1531 			if (ibdm.ibdm_ibnex_callback != NULL) {
1532 				(*ibdm.ibdm_ibnex_callback)(
1533 				    (void *)ioc_list,
1534 				    IBDM_EVENT_IOC_PROP_UPDATE);
1535 			}
1536 		}
1537 	}
1538 
1539 	ibdm_dump_sweep_fabric_timestamp(1);
1540 
1541 	ibdm.ibdm_busy &= ~IBDM_BUSY;
1542 	cv_broadcast(&ibdm.ibdm_busy_cv);
1543 	IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT");
1544 }
1545 
1546 
1547 /*
1548  * ibdm_is_cisco:
1549  * 	Check if this is a Cisco device or not.
1550  */
1551 static boolean_t
ibdm_is_cisco(ib_guid_t guid)1552 ibdm_is_cisco(ib_guid_t guid)
1553 {
1554 	if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID)
1555 		return (B_TRUE);
1556 	return (B_FALSE);
1557 }
1558 
1559 
1560 /*
1561  * ibdm_is_cisco_switch:
1562  * 	Check if this switch is a CISCO switch or not.
1563  * 	Note that if this switch is already activated, ibdm_is_cisco_switch()
1564  * 	returns B_FALSE not to re-activate it again.
1565  */
1566 static boolean_t
ibdm_is_cisco_switch(ibdm_dp_gidinfo_t * gid_info)1567 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info)
1568 {
1569 	int company_id, device_id;
1570 	ASSERT(gid_info != 0);
1571 	ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
1572 
1573 	/*
1574 	 * If this switch is already activated, don't re-activate it.
1575 	 */
1576 	if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE)
1577 		return (B_FALSE);
1578 
1579 	/*
1580 	 * Check if this switch is a Cisco FC GW or not.
1581 	 * Use the node guid (the OUI part) instead of the vendor id
1582 	 * since the vendor id is zero in practice.
1583 	 */
1584 	company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT;
1585 	device_id = gid_info->gl_devid;
1586 
1587 	if (company_id == IBDM_CISCO_COMPANY_ID &&
1588 	    device_id == IBDM_CISCO_DEVICE_ID)
1589 		return (B_TRUE);
1590 	return (B_FALSE);
1591 }
1592 
1593 
1594 /*
1595  * ibdm_probe_gid_thread:
1596  *	thread that does the actual work for sweeping the fabric
1597  *	for a given GID
1598  */
1599 static void
ibdm_probe_gid_thread(void * args)1600 ibdm_probe_gid_thread(void *args)
1601 {
1602 	int			reprobe_flag;
1603 	ib_guid_t		node_guid;
1604 	ib_guid_t		port_guid;
1605 	ibdm_dp_gidinfo_t	*gid_info;
1606 
1607 	gid_info = (ibdm_dp_gidinfo_t *)args;
1608 	reprobe_flag = gid_info->gl_reprobe_flag;
1609 	IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d",
1610 	    gid_info, reprobe_flag);
1611 	ASSERT(gid_info != NULL);
1612 	ASSERT(gid_info->gl_pending_cmds == 0);
1613 
1614 	if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE &&
1615 	    reprobe_flag == 0) {
1616 		/*
1617 		 * This GID may have been already probed. Send
1618 		 * in a CLP to check if IOUnitInfo changed?
1619 		 * Explicitly set gl_reprobe_flag to 0 so that
1620 		 * IBnex is not notified on completion
1621 		 */
1622 		if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) {
1623 			IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: "
1624 			    "get new IOCs information");
1625 			mutex_enter(&gid_info->gl_mutex);
1626 			gid_info->gl_pending_cmds++;
1627 			gid_info->gl_state = IBDM_GET_IOUNITINFO;
1628 			gid_info->gl_reprobe_flag = 0;
1629 			mutex_exit(&gid_info->gl_mutex);
1630 			if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) {
1631 				mutex_enter(&gid_info->gl_mutex);
1632 				--gid_info->gl_pending_cmds;
1633 				mutex_exit(&gid_info->gl_mutex);
1634 				mutex_enter(&ibdm.ibdm_mutex);
1635 				--ibdm.ibdm_ngid_probes_in_progress;
1636 				ibdm_wakeup_probe_gid_cv();
1637 				mutex_exit(&ibdm.ibdm_mutex);
1638 			}
1639 		} else {
1640 			mutex_enter(&ibdm.ibdm_mutex);
1641 			--ibdm.ibdm_ngid_probes_in_progress;
1642 			ibdm_wakeup_probe_gid_cv();
1643 			mutex_exit(&ibdm.ibdm_mutex);
1644 		}
1645 		return;
1646 	} else if (reprobe_flag && gid_info->gl_state ==
1647 	    IBDM_GID_PROBING_COMPLETE) {
1648 		/*
1649 		 * Reprobe all IOCs for the GID which has completed
1650 		 * probe. Skip other port GIDs to same IOU.
1651 		 * Explicitly set gl_reprobe_flag to 0 so that
1652 		 * IBnex is not notified on completion
1653 		 */
1654 		ibdm_ioc_info_t *ioc_info;
1655 		uint8_t		niocs, ii;
1656 
1657 		ASSERT(gid_info->gl_iou);
1658 		mutex_enter(&gid_info->gl_mutex);
1659 		niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1660 		gid_info->gl_state = IBDM_GET_IOC_DETAILS;
1661 		gid_info->gl_pending_cmds += niocs;
1662 		gid_info->gl_reprobe_flag = 0;
1663 		mutex_exit(&gid_info->gl_mutex);
1664 		for (ii = 0; ii < niocs; ii++) {
1665 			uchar_t			slot_info;
1666 			ib_dm_io_unitinfo_t	*giou_info;
1667 
1668 			/*
1669 			 * Check whether IOC is present in the slot
1670 			 * Series of nibbles (in the field
1671 			 * iou_ctrl_list) represents a slot in the
1672 			 * IOU.
1673 			 * Byte format: 76543210
1674 			 * Bits 0-3 of first byte represent Slot 2
1675 			 * bits 4-7 of first byte represent slot 1,
1676 			 * bits 0-3 of second byte represent slot 4
1677 			 * and so on
1678 			 * Each 4-bit nibble has the following meaning
1679 			 * 0x0 : IOC not installed
1680 			 * 0x1 : IOC is present
1681 			 * 0xf : Slot does not exist
1682 			 * and all other values are reserved.
1683 			 */
1684 			ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1685 			giou_info = &gid_info->gl_iou->iou_info;
1686 			slot_info = giou_info->iou_ctrl_list[(ii/2)];
1687 			if ((ii % 2) == 0)
1688 				slot_info = (slot_info >> 4);
1689 
1690 			if ((slot_info & 0xf) != 1) {
1691 				ioc_info->ioc_state =
1692 				    IBDM_IOC_STATE_PROBE_FAILED;
1693 				ibdm_gid_decr_pending(gid_info);
1694 				continue;
1695 			}
1696 
1697 			if (ibdm_send_ioc_profile(gid_info, ii) !=
1698 			    IBDM_SUCCESS) {
1699 				ibdm_gid_decr_pending(gid_info);
1700 			}
1701 		}
1702 
1703 		return;
1704 	} else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) {
1705 		mutex_enter(&ibdm.ibdm_mutex);
1706 		--ibdm.ibdm_ngid_probes_in_progress;
1707 		ibdm_wakeup_probe_gid_cv();
1708 		mutex_exit(&ibdm.ibdm_mutex);
1709 		return;
1710 	}
1711 
1712 	/*
1713 	 * Check whether the destination GID supports DM agents. If
1714 	 * not, stop probing the GID and continue with the next GID
1715 	 * in the list.
1716 	 */
1717 	if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) {
1718 		mutex_enter(&gid_info->gl_mutex);
1719 		gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1720 		gid_info->gl_is_dm_capable = B_FALSE;
1721 		mutex_exit(&gid_info->gl_mutex);
1722 		ibdm_delete_glhca_list(gid_info);
1723 		mutex_enter(&ibdm.ibdm_mutex);
1724 		--ibdm.ibdm_ngid_probes_in_progress;
1725 		ibdm_wakeup_probe_gid_cv();
1726 		mutex_exit(&ibdm.ibdm_mutex);
1727 		return;
1728 	}
1729 
1730 	/*
1731 	 * This GID is Device management capable
1732 	 */
1733 	mutex_enter(&gid_info->gl_mutex);
1734 	gid_info->gl_is_dm_capable = B_TRUE;
1735 	mutex_exit(&gid_info->gl_mutex);
1736 
1737 	/* Get the nodeguid and portguid of the port */
1738 	if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid,
1739 	    &node_guid, &port_guid) != IBDM_SUCCESS) {
1740 		mutex_enter(&gid_info->gl_mutex);
1741 		gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1742 		mutex_exit(&gid_info->gl_mutex);
1743 		ibdm_delete_glhca_list(gid_info);
1744 		mutex_enter(&ibdm.ibdm_mutex);
1745 		--ibdm.ibdm_ngid_probes_in_progress;
1746 		ibdm_wakeup_probe_gid_cv();
1747 		mutex_exit(&ibdm.ibdm_mutex);
1748 		return;
1749 	}
1750 
1751 	/*
1752 	 * Check whether we already knew about this NodeGuid
1753 	 * If so, do not probe the GID and continue with the
1754 	 * next  GID  in the gid  list. Set the GID state to
1755 	 * probing done.
1756 	 */
1757 	mutex_enter(&ibdm.ibdm_mutex);
1758 	gid_info->gl_nodeguid = node_guid;
1759 	gid_info->gl_portguid = port_guid;
1760 	if (ibdm_check_dest_nodeguid(gid_info) != NULL) {
1761 		mutex_exit(&ibdm.ibdm_mutex);
1762 		mutex_enter(&gid_info->gl_mutex);
1763 		gid_info->gl_state = IBDM_GID_PROBING_SKIPPED;
1764 		mutex_exit(&gid_info->gl_mutex);
1765 		ibdm_delete_glhca_list(gid_info);
1766 		mutex_enter(&ibdm.ibdm_mutex);
1767 		--ibdm.ibdm_ngid_probes_in_progress;
1768 		ibdm_wakeup_probe_gid_cv();
1769 		mutex_exit(&ibdm.ibdm_mutex);
1770 		return;
1771 	}
1772 	ibdm_add_to_gl_gid(gid_info, gid_info);
1773 	mutex_exit(&ibdm.ibdm_mutex);
1774 
1775 	/*
1776 	 * New or reinserted GID : Enable notification to IBnex
1777 	 */
1778 	mutex_enter(&gid_info->gl_mutex);
1779 	gid_info->gl_reprobe_flag = 1;
1780 
1781 	/*
1782 	 * A Cisco FC GW needs the special handling to get IOUnitInfo.
1783 	 */
1784 	if (ibdm_is_cisco_switch(gid_info)) {
1785 		gid_info->gl_pending_cmds++;
1786 		gid_info->gl_state = IBDM_SET_CLASSPORTINFO;
1787 		mutex_exit(&gid_info->gl_mutex);
1788 
1789 		if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) {
1790 			mutex_enter(&gid_info->gl_mutex);
1791 			gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1792 			--gid_info->gl_pending_cmds;
1793 			mutex_exit(&gid_info->gl_mutex);
1794 
1795 			/* free the hca_list on this gid_info */
1796 			ibdm_delete_glhca_list(gid_info);
1797 
1798 			mutex_enter(&ibdm.ibdm_mutex);
1799 			--ibdm.ibdm_ngid_probes_in_progress;
1800 			ibdm_wakeup_probe_gid_cv();
1801 			mutex_exit(&ibdm.ibdm_mutex);
1802 
1803 			return;
1804 		}
1805 
1806 		mutex_enter(&gid_info->gl_mutex);
1807 		ibdm_wait_cisco_probe_completion(gid_info);
1808 
1809 		IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: "
1810 		    "CISCO Wakeup signal received");
1811 	}
1812 
1813 	/* move on to the 'GET_CLASSPORTINFO' stage */
1814 	gid_info->gl_pending_cmds++;
1815 	gid_info->gl_state = IBDM_GET_CLASSPORTINFO;
1816 	mutex_exit(&gid_info->gl_mutex);
1817 
1818 	IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: "
1819 	    "%d: gid_info %p gl_state %d pending_cmds %d",
1820 	    __LINE__, gid_info, gid_info->gl_state,
1821 	    gid_info->gl_pending_cmds);
1822 
1823 	/*
1824 	 * Send ClassPortInfo request to the GID asynchronously.
1825 	 */
1826 	if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) {
1827 
1828 		mutex_enter(&gid_info->gl_mutex);
1829 		gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1830 		--gid_info->gl_pending_cmds;
1831 		mutex_exit(&gid_info->gl_mutex);
1832 
1833 		/* free the hca_list on this gid_info */
1834 		ibdm_delete_glhca_list(gid_info);
1835 
1836 		mutex_enter(&ibdm.ibdm_mutex);
1837 		--ibdm.ibdm_ngid_probes_in_progress;
1838 		ibdm_wakeup_probe_gid_cv();
1839 		mutex_exit(&ibdm.ibdm_mutex);
1840 
1841 		return;
1842 	}
1843 }
1844 
1845 
1846 /*
1847  * ibdm_check_dest_nodeguid
1848  *	Searches for the NodeGuid in the GID list
1849  *	Returns matching gid_info if found and otherwise NULL
1850  *
1851  *	This function is called to handle new GIDs discovered
1852  *	during device sweep / probe or for GID_AVAILABLE event.
1853  *
1854  *	Parameter :
1855  *		gid_info	GID to check
1856  */
1857 static ibdm_dp_gidinfo_t *
ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t * gid_info)1858 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info)
1859 {
1860 	ibdm_dp_gidinfo_t	*gid_list;
1861 	ibdm_gid_t		*tmp;
1862 
1863 	IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid");
1864 
1865 	gid_list = ibdm.ibdm_dp_gidlist_head;
1866 	while (gid_list) {
1867 		if ((gid_list != gid_info) &&
1868 		    (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) {
1869 			IBTF_DPRINTF_L4("ibdm",
1870 			    "\tcheck_dest_nodeguid: NodeGuid is present");
1871 
1872 			/* Add to gid_list */
1873 			tmp = kmem_zalloc(sizeof (ibdm_gid_t),
1874 			    KM_SLEEP);
1875 			tmp->gid_dgid_hi = gid_info->gl_dgid_hi;
1876 			tmp->gid_dgid_lo = gid_info->gl_dgid_lo;
1877 			tmp->gid_next = gid_list->gl_gid;
1878 			gid_list->gl_gid = tmp;
1879 			gid_list->gl_ngids++;
1880 			return (gid_list);
1881 		}
1882 
1883 		gid_list = gid_list->gl_next;
1884 	}
1885 
1886 	return (NULL);
1887 }
1888 
1889 
1890 /*
1891  * ibdm_is_dev_mgt_supported
1892  *	Get the PortInfo attribute (SA Query)
1893  *	Check "CompatabilityMask" field in the Portinfo.
1894  *	Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set)
1895  *	by the port, otherwise IBDM_FAILURE
1896  */
1897 static int
ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t * gid_info)1898 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info)
1899 {
1900 	int			ret;
1901 	size_t			length = 0;
1902 	sa_portinfo_record_t	req, *resp = NULL;
1903 	ibmf_saa_access_args_t	qargs;
1904 
1905 	bzero(&req, sizeof (sa_portinfo_record_t));
1906 	req.EndportLID	= gid_info->gl_dlid;
1907 
1908 	qargs.sq_attr_id	= SA_PORTINFORECORD_ATTRID;
1909 	qargs.sq_access_type	= IBMF_SAA_RETRIEVE;
1910 	qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID;
1911 	qargs.sq_template	= &req;
1912 	qargs.sq_callback	= NULL;
1913 	qargs.sq_callback_arg	= NULL;
1914 
1915 	ret = ibmf_sa_access(gid_info->gl_sa_hdl,
1916 	    &qargs, 0, &length, (void **)&resp);
1917 
1918 	if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1919 		IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:"
1920 		    "failed to get PORTINFO attribute %d", ret);
1921 		return (IBDM_FAILURE);
1922 	}
1923 
1924 	if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) {
1925 		IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!");
1926 		ret = IBDM_SUCCESS;
1927 	} else {
1928 		IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: "
1929 		    "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask);
1930 		ret = IBDM_FAILURE;
1931 	}
1932 	kmem_free(resp, length);
1933 	return (ret);
1934 }
1935 
1936 
1937 /*
1938  * ibdm_get_node_port_guids()
1939  *	Get the NodeInfoRecord of the port
1940  *	Save NodeGuid and PortGUID values in the GID list structure.
1941  *	Return IBDM_SUCCESS/IBDM_FAILURE
1942  */
1943 static int
ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl,ib_lid_t dlid,ib_guid_t * node_guid,ib_guid_t * port_guid)1944 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid,
1945     ib_guid_t *node_guid, ib_guid_t *port_guid)
1946 {
1947 	int			ret;
1948 	size_t			length = 0;
1949 	sa_node_record_t	req, *resp = NULL;
1950 	ibmf_saa_access_args_t	qargs;
1951 
1952 	IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids");
1953 
1954 	bzero(&req, sizeof (sa_node_record_t));
1955 	req.LID = dlid;
1956 
1957 	qargs.sq_attr_id	= SA_NODERECORD_ATTRID;
1958 	qargs.sq_access_type	= IBMF_SAA_RETRIEVE;
1959 	qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID;
1960 	qargs.sq_template	= &req;
1961 	qargs.sq_callback	= NULL;
1962 	qargs.sq_callback_arg	= NULL;
1963 
1964 	ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp);
1965 	if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1966 		IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:"
1967 		    " SA Retrieve Failed: %d", ret);
1968 		return (IBDM_FAILURE);
1969 	}
1970 	IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port"
1971 	    "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID);
1972 
1973 	*node_guid = resp->NodeInfo.NodeGUID;
1974 	*port_guid = resp->NodeInfo.PortGUID;
1975 	kmem_free(resp, length);
1976 	return (IBDM_SUCCESS);
1977 }
1978 
1979 
1980 /*
1981  * ibdm_get_reachable_ports()
1982  *	Get list of the destination GID (and its path  records) by
1983  *	querying the SA access.
1984  *
1985  *	Returns Number paths
1986  */
1987 static int
ibdm_get_reachable_ports(ibdm_port_attr_t * portinfo,ibdm_hca_list_t * hca)1988 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca)
1989 {
1990 	uint_t			ii, jj, nrecs;
1991 	uint_t			npaths = 0;
1992 	size_t			length;
1993 	ib_gid_t		sgid;
1994 	ibdm_pkey_tbl_t		*pkey_tbl;
1995 	sa_path_record_t	*result;
1996 	sa_path_record_t	*precp;
1997 	ibdm_dp_gidinfo_t	*gid_info;
1998 
1999 	ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
2000 	IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo);
2001 
2002 	sgid.gid_prefix = portinfo->pa_sn_prefix;
2003 	sgid.gid_guid	= portinfo->pa_port_guid;
2004 
2005 	/* get reversible paths */
2006 	if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl,
2007 	    sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result)
2008 	    != IBMF_SUCCESS) {
2009 		IBTF_DPRINTF_L2("ibdm",
2010 		    "\tget_reachable_ports: Getting path records failed");
2011 		return (0);
2012 	}
2013 
2014 	for (ii = 0; ii < nrecs; ii++) {
2015 		sa_node_record_t *nrec;
2016 		size_t length;
2017 
2018 		precp = &result[ii];
2019 		if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid,
2020 		    precp->DGID.gid_prefix)) != NULL) {
2021 			IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: "
2022 			    "Already exists nrecs %d, ii %d", nrecs, ii);
2023 			ibdm_addto_glhcalist(gid_info, hca);
2024 			continue;
2025 		}
2026 		/*
2027 		 * This is a new GID. Allocate a GID structure and
2028 		 * initialize the structure
2029 		 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0)
2030 		 * by kmem_zalloc call
2031 		 */
2032 		gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP);
2033 		mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL);
2034 		cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL);
2035 		gid_info->gl_dgid_hi		= precp->DGID.gid_prefix;
2036 		gid_info->gl_dgid_lo		= precp->DGID.gid_guid;
2037 		gid_info->gl_sgid_hi		= precp->SGID.gid_prefix;
2038 		gid_info->gl_sgid_lo		= precp->SGID.gid_guid;
2039 		gid_info->gl_p_key		= precp->P_Key;
2040 		gid_info->gl_sa_hdl		= portinfo->pa_sa_hdl;
2041 		gid_info->gl_ibmf_hdl		= portinfo->pa_ibmf_hdl;
2042 		gid_info->gl_slid		= precp->SLID;
2043 		gid_info->gl_dlid		= precp->DLID;
2044 		gid_info->gl_transactionID	= (++ibdm.ibdm_transactionID)
2045 		    << IBDM_GID_TRANSACTIONID_SHIFT;
2046 		gid_info->gl_min_transactionID  = gid_info->gl_transactionID;
2047 		gid_info->gl_max_transactionID  = (ibdm.ibdm_transactionID +1)
2048 		    << IBDM_GID_TRANSACTIONID_SHIFT;
2049 		gid_info->gl_SL			= precp->SL;
2050 
2051 		/*
2052 		 * get the node record with this guid if the destination
2053 		 * device is a Cisco one.
2054 		 */
2055 		if (ibdm_is_cisco(precp->DGID.gid_guid) &&
2056 		    (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) &&
2057 		    ibdm_get_node_record_by_port(portinfo->pa_sa_hdl,
2058 		    precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) {
2059 			gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID;
2060 			gid_info->gl_devid = nrec->NodeInfo.DeviceID;
2061 			kmem_free(nrec, length);
2062 		}
2063 
2064 		ibdm_addto_glhcalist(gid_info,  hca);
2065 
2066 		ibdm_dump_path_info(precp);
2067 
2068 		gid_info->gl_qp_hdl = NULL;
2069 		ASSERT(portinfo->pa_pkey_tbl != NULL &&
2070 		    portinfo->pa_npkeys != 0);
2071 
2072 		for (jj = 0; jj < portinfo->pa_npkeys; jj++) {
2073 			pkey_tbl = &portinfo->pa_pkey_tbl[jj];
2074 			if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) &&
2075 			    (pkey_tbl->pt_qp_hdl != NULL)) {
2076 				gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
2077 				break;
2078 			}
2079 		}
2080 
2081 		/*
2082 		 * QP handle for GID not initialized. No matching Pkey
2083 		 * was found!! ibdm should *not* hit this case. Flag an
2084 		 * error and drop the GID if ibdm does encounter this.
2085 		 */
2086 		if (gid_info->gl_qp_hdl == NULL) {
2087 			IBTF_DPRINTF_L2(ibdm_string,
2088 			    "\tget_reachable_ports: No matching Pkey");
2089 			ibdm_delete_gidinfo(gid_info);
2090 			continue;
2091 		}
2092 		if (ibdm.ibdm_dp_gidlist_head == NULL) {
2093 			ibdm.ibdm_dp_gidlist_head = gid_info;
2094 			ibdm.ibdm_dp_gidlist_tail = gid_info;
2095 		} else {
2096 			ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info;
2097 			gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail;
2098 			ibdm.ibdm_dp_gidlist_tail = gid_info;
2099 		}
2100 		npaths++;
2101 	}
2102 	kmem_free(result, length);
2103 	IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths);
2104 	return (npaths);
2105 }
2106 
2107 
2108 /*
2109  * ibdm_check_dgid()
2110  *	Look in the global list to check whether we know this DGID already
2111  *	Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT
2112  */
2113 static ibdm_dp_gidinfo_t *
ibdm_check_dgid(ib_guid_t guid,ib_sn_prefix_t prefix)2114 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix)
2115 {
2116 	ibdm_dp_gidinfo_t	*gid_list;
2117 
2118 	for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2119 	    gid_list = gid_list->gl_next) {
2120 		if ((guid == gid_list->gl_dgid_lo) &&
2121 		    (prefix == gid_list->gl_dgid_hi)) {
2122 			break;
2123 		}
2124 	}
2125 	return (gid_list);
2126 }
2127 
2128 
2129 /*
2130  * ibdm_find_gid()
2131  *	Look in the global list to find a GID entry with matching
2132  *	port & node GUID.
2133  *	Return pointer to gidinfo if found, else return NULL
2134  */
2135 static ibdm_dp_gidinfo_t *
ibdm_find_gid(ib_guid_t nodeguid,ib_guid_t portguid)2136 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid)
2137 {
2138 	ibdm_dp_gidinfo_t	*gid_list;
2139 
2140 	IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n",
2141 	    nodeguid, portguid);
2142 
2143 	for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2144 	    gid_list = gid_list->gl_next) {
2145 		if ((portguid == gid_list->gl_portguid) &&
2146 		    (nodeguid == gid_list->gl_nodeguid)) {
2147 			break;
2148 		}
2149 	}
2150 
2151 	IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n",
2152 	    gid_list);
2153 	return (gid_list);
2154 }
2155 
2156 
2157 /*
2158  * ibdm_set_classportinfo()
2159  *	ibdm_set_classportinfo() is a function to activate a Cisco FC GW
2160  *	by sending the setClassPortInfo request with the trapLID, trapGID
2161  *	and etc. to the gateway since the gateway doesn't provide the IO
2162  *	Unit Information othewise. This behavior is the Cisco specific one,
2163  *	and this function is called to a Cisco FC GW only.
2164  *	Returns IBDM_SUCCESS/IBDM_FAILURE
2165  */
2166 static int
ibdm_set_classportinfo(ibdm_dp_gidinfo_t * gid_info)2167 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2168 {
2169 	ibmf_msg_t		*msg;
2170 	ib_mad_hdr_t		*hdr;
2171 	ibdm_timeout_cb_args_t	*cb_args;
2172 	void			*data;
2173 	ib_mad_classportinfo_t *cpi;
2174 
2175 	IBTF_DPRINTF_L4("ibdm",
2176 	    "\tset_classportinfo: gid info 0x%p", gid_info);
2177 
2178 	/*
2179 	 * Send command to set classportinfo attribute. Allocate a IBMF
2180 	 * packet and initialize the packet.
2181 	 */
2182 	if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2183 	    &msg) != IBMF_SUCCESS) {
2184 		IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail");
2185 		return (IBDM_FAILURE);
2186 	}
2187 
2188 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2189 	ibdm_alloc_send_buffers(msg);
2190 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2191 
2192 	msg->im_local_addr.ia_local_lid		= gid_info->gl_slid;
2193 	msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2194 	msg->im_local_addr.ia_remote_qno	= 1;
2195 	msg->im_local_addr.ia_p_key		= gid_info->gl_p_key;
2196 	msg->im_local_addr.ia_q_key		= IB_GSI_QKEY;
2197 	msg->im_local_addr.ia_service_level	= gid_info->gl_SL;
2198 
2199 	hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
2200 	hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
2201 	hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
2202 	hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
2203 	hdr->R_Method		= IB_DM_DEVMGT_METHOD_SET;
2204 	hdr->Status		= 0;
2205 	hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
2206 	hdr->AttributeID	= h2b16(IB_DM_ATTR_CLASSPORTINFO);
2207 	hdr->AttributeModifier	= 0;
2208 
2209 	data = msg->im_msgbufs_send.im_bufs_cl_data;
2210 	cpi = (ib_mad_classportinfo_t *)data;
2211 
2212 	/*
2213 	 * Set the classportinfo values to activate this Cisco FC GW.
2214 	 */
2215 	cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi);
2216 	cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo);
2217 	cpi->TrapLID = h2b16(gid_info->gl_slid);
2218 	cpi->TrapSL = gid_info->gl_SL;
2219 	cpi->TrapP_Key = h2b16(gid_info->gl_p_key);
2220 	cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn));
2221 	cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *)
2222 	    gid_info->gl_qp_hdl)->isq_qkey));
2223 
2224 	cb_args = &gid_info->gl_cpi_cb_args;
2225 	cb_args->cb_gid_info = gid_info;
2226 	cb_args->cb_retry_count	= ibdm_dft_retry_cnt;
2227 	cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2228 
2229 	mutex_enter(&gid_info->gl_mutex);
2230 	gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2231 	    cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2232 	mutex_exit(&gid_info->gl_mutex);
2233 
2234 	IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: "
2235 	    "timeout id %x", gid_info->gl_timeout_id);
2236 
2237 	if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2238 	    msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2239 		IBTF_DPRINTF_L2("ibdm",
2240 		    "\tset_classportinfo: ibmf send failed");
2241 		ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2242 	}
2243 
2244 	return (IBDM_SUCCESS);
2245 }
2246 
2247 
2248 /*
2249  * ibdm_send_classportinfo()
2250  *	Send classportinfo request. When the request is completed
2251  *	IBMF calls ibdm_classportinfo_cb routine to inform about
2252  *	the completion.
2253  *	Returns IBDM_SUCCESS/IBDM_FAILURE
2254  */
2255 static int
ibdm_send_classportinfo(ibdm_dp_gidinfo_t * gid_info)2256 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2257 {
2258 	ibmf_msg_t		*msg;
2259 	ib_mad_hdr_t		*hdr;
2260 	ibdm_timeout_cb_args_t	*cb_args;
2261 
2262 	IBTF_DPRINTF_L4("ibdm",
2263 	    "\tsend_classportinfo: gid info 0x%p", gid_info);
2264 
2265 	/*
2266 	 * Send command to get classportinfo attribute. Allocate a IBMF
2267 	 * packet and initialize the packet.
2268 	 */
2269 	if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2270 	    &msg) != IBMF_SUCCESS) {
2271 		IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail");
2272 		return (IBDM_FAILURE);
2273 	}
2274 
2275 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2276 	ibdm_alloc_send_buffers(msg);
2277 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2278 
2279 	msg->im_local_addr.ia_local_lid		= gid_info->gl_slid;
2280 	msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2281 	msg->im_local_addr.ia_remote_qno	= 1;
2282 	msg->im_local_addr.ia_p_key		= gid_info->gl_p_key;
2283 	msg->im_local_addr.ia_q_key		= IB_GSI_QKEY;
2284 	msg->im_local_addr.ia_service_level	= gid_info->gl_SL;
2285 
2286 	hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
2287 	hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
2288 	hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
2289 	hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
2290 	hdr->R_Method		= IB_DM_DEVMGT_METHOD_GET;
2291 	hdr->Status		= 0;
2292 	hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
2293 	hdr->AttributeID	= h2b16(IB_DM_ATTR_CLASSPORTINFO);
2294 	hdr->AttributeModifier	= 0;
2295 
2296 	cb_args = &gid_info->gl_cpi_cb_args;
2297 	cb_args->cb_gid_info = gid_info;
2298 	cb_args->cb_retry_count	= ibdm_dft_retry_cnt;
2299 	cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2300 
2301 	mutex_enter(&gid_info->gl_mutex);
2302 	gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2303 	    cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2304 	mutex_exit(&gid_info->gl_mutex);
2305 
2306 	IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: "
2307 	    "timeout id %x", gid_info->gl_timeout_id);
2308 
2309 	if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2310 	    msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2311 		IBTF_DPRINTF_L2("ibdm",
2312 		    "\tsend_classportinfo: ibmf send failed");
2313 		ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2314 	}
2315 
2316 	return (IBDM_SUCCESS);
2317 }
2318 
2319 
2320 /*
2321  * ibdm_handle_setclassportinfo()
2322  *	Invoked by the IBMF when setClassPortInfo request is completed.
2323  */
2324 static void
ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2325 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl,
2326     ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2327 {
2328 	void			*data;
2329 	timeout_id_t		timeout_id;
2330 	ib_mad_classportinfo_t *cpi;
2331 
2332 	IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl "
2333 	    "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2334 
2335 	if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2336 		IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: "
2337 		    "Not a ClassPortInfo resp");
2338 		*flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2339 		return;
2340 	}
2341 
2342 	/*
2343 	 * Verify whether timeout handler is created/active.
2344 	 * If created/ active,  cancel the timeout  handler
2345 	 */
2346 	mutex_enter(&gid_info->gl_mutex);
2347 	if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) {
2348 		IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp");
2349 		*flag |= IBDM_IBMF_PKT_DUP_RESP;
2350 		mutex_exit(&gid_info->gl_mutex);
2351 		return;
2352 	}
2353 	ibdm_bump_transactionID(gid_info);
2354 
2355 	gid_info->gl_iou_cb_args.cb_req_type = 0;
2356 	if (gid_info->gl_timeout_id) {
2357 		timeout_id = gid_info->gl_timeout_id;
2358 		mutex_exit(&gid_info->gl_mutex);
2359 		IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: "
2360 		    "gl_timeout_id = 0x%x", timeout_id);
2361 		if (untimeout(timeout_id) == -1) {
2362 			IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: "
2363 			    "untimeout gl_timeout_id failed");
2364 		}
2365 		mutex_enter(&gid_info->gl_mutex);
2366 		gid_info->gl_timeout_id = 0;
2367 	}
2368 	mutex_exit(&gid_info->gl_mutex);
2369 
2370 	data = msg->im_msgbufs_recv.im_bufs_cl_data;
2371 	cpi = (ib_mad_classportinfo_t *)data;
2372 
2373 	ibdm_dump_classportinfo(cpi);
2374 }
2375 
2376 
2377 /*
2378  * ibdm_handle_classportinfo()
2379  *	Invoked by the IBMF when the classportinfo request is completed.
2380  */
2381 static void
ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2382 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl,
2383     ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2384 {
2385 	void			*data;
2386 	timeout_id_t		timeout_id;
2387 	ib_mad_hdr_t		*hdr;
2388 	ib_mad_classportinfo_t *cpi;
2389 
2390 	IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl "
2391 	    "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2392 
2393 	if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2394 		IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: "
2395 		    "Not a ClassPortInfo resp");
2396 		*flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2397 		return;
2398 	}
2399 
2400 	/*
2401 	 * Verify whether timeout handler is created/active.
2402 	 * If created/ active,  cancel the timeout  handler
2403 	 */
2404 	mutex_enter(&gid_info->gl_mutex);
2405 	ibdm_bump_transactionID(gid_info);
2406 	if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) {
2407 		IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp");
2408 		*flag |= IBDM_IBMF_PKT_DUP_RESP;
2409 		mutex_exit(&gid_info->gl_mutex);
2410 		return;
2411 	}
2412 	gid_info->gl_iou_cb_args.cb_req_type = 0;
2413 	if (gid_info->gl_timeout_id) {
2414 		timeout_id = gid_info->gl_timeout_id;
2415 		mutex_exit(&gid_info->gl_mutex);
2416 		IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: "
2417 		    "gl_timeout_id = 0x%x", timeout_id);
2418 		if (untimeout(timeout_id) == -1) {
2419 			IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: "
2420 			    "untimeout gl_timeout_id failed");
2421 		}
2422 		mutex_enter(&gid_info->gl_mutex);
2423 		gid_info->gl_timeout_id = 0;
2424 	}
2425 	gid_info->gl_state = IBDM_GET_IOUNITINFO;
2426 	gid_info->gl_pending_cmds++;
2427 	mutex_exit(&gid_info->gl_mutex);
2428 
2429 	data = msg->im_msgbufs_recv.im_bufs_cl_data;
2430 	cpi = (ib_mad_classportinfo_t *)data;
2431 
2432 	/*
2433 	 * Cache the "RespTimeValue" and redirection information in the
2434 	 * global gid list data structure. This cached information will
2435 	 * be used to send any further requests to the GID.
2436 	 */
2437 	gid_info->gl_resp_timeout	=
2438 	    (b2h32(cpi->RespTimeValue) & 0x1F);
2439 
2440 	gid_info->gl_redirected		= ((IBDM_IN_IBMFMSG_STATUS(msg) &
2441 	    MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE);
2442 	gid_info->gl_redirect_dlid	= b2h16(cpi->RedirectLID);
2443 	gid_info->gl_redirect_QP	= (b2h32(cpi->RedirectQP) & 0xffffff);
2444 	gid_info->gl_redirect_pkey	= b2h16(cpi->RedirectP_Key);
2445 	gid_info->gl_redirect_qkey	= b2h32(cpi->RedirectQ_Key);
2446 	gid_info->gl_redirectGID_hi	= b2h64(cpi->RedirectGID_hi);
2447 	gid_info->gl_redirectGID_lo	= b2h64(cpi->RedirectGID_lo);
2448 	gid_info->gl_redirectSL		= cpi->RedirectSL;
2449 
2450 	ibdm_dump_classportinfo(cpi);
2451 
2452 	/*
2453 	 * Send IOUnitInfo request
2454 	 * Reuse previously allocated IBMF packet for sending ClassPortInfo
2455 	 * Check whether DM agent on the remote node requested redirection
2456 	 * If so, send the request to the redirect DGID/DLID/PKEY/QP.
2457 	 */
2458 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2459 	ibdm_alloc_send_buffers(msg);
2460 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2461 	msg->im_local_addr.ia_local_lid	= gid_info->gl_slid;
2462 	msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2463 
2464 	if (gid_info->gl_redirected == B_TRUE) {
2465 		if (gid_info->gl_redirect_dlid != 0) {
2466 			msg->im_local_addr.ia_remote_lid =
2467 			    gid_info->gl_redirect_dlid;
2468 		}
2469 		msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
2470 		msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
2471 		msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
2472 		msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
2473 	} else {
2474 		msg->im_local_addr.ia_remote_qno = 1;
2475 		msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2476 		msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2477 		msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2478 	}
2479 
2480 	hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
2481 	hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
2482 	hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
2483 	hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
2484 	hdr->R_Method		= IB_DM_DEVMGT_METHOD_GET;
2485 	hdr->Status		= 0;
2486 	hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
2487 	hdr->AttributeID	= h2b16(IB_DM_ATTR_IO_UNITINFO);
2488 	hdr->AttributeModifier	= 0;
2489 
2490 	gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2491 	gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2492 	gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2493 
2494 	mutex_enter(&gid_info->gl_mutex);
2495 	gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2496 	    &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2497 	mutex_exit(&gid_info->gl_mutex);
2498 
2499 	IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:"
2500 	    "timeout %x", gid_info->gl_timeout_id);
2501 
2502 	if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL,
2503 	    ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) {
2504 		IBTF_DPRINTF_L2("ibdm",
2505 		    "\thandle_classportinfo: msg transport failed");
2506 		ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args);
2507 	}
2508 	(*flag) |= IBDM_IBMF_PKT_REUSED;
2509 }
2510 
2511 
2512 /*
2513  * ibdm_send_iounitinfo:
2514  *	Sends a DM request to get IOU unitinfo.
2515  */
2516 static int
ibdm_send_iounitinfo(ibdm_dp_gidinfo_t * gid_info)2517 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info)
2518 {
2519 	ibmf_msg_t	*msg;
2520 	ib_mad_hdr_t	*hdr;
2521 
2522 	IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info);
2523 
2524 	/*
2525 	 * Send command to get iounitinfo attribute. Allocate a IBMF
2526 	 * packet and initialize the packet.
2527 	 */
2528 	if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) !=
2529 	    IBMF_SUCCESS) {
2530 		IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail");
2531 		return (IBDM_FAILURE);
2532 	}
2533 
2534 	mutex_enter(&gid_info->gl_mutex);
2535 	ibdm_bump_transactionID(gid_info);
2536 	mutex_exit(&gid_info->gl_mutex);
2537 
2538 
2539 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2540 	ibdm_alloc_send_buffers(msg);
2541 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2542 	msg->im_local_addr.ia_local_lid		= gid_info->gl_slid;
2543 	msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2544 	msg->im_local_addr.ia_remote_qno	= 1;
2545 	msg->im_local_addr.ia_p_key		= gid_info->gl_p_key;
2546 	msg->im_local_addr.ia_q_key		= IB_GSI_QKEY;
2547 	msg->im_local_addr.ia_service_level	= gid_info->gl_SL;
2548 
2549 	hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
2550 	hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
2551 	hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
2552 	hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
2553 	hdr->R_Method		= IB_DM_DEVMGT_METHOD_GET;
2554 	hdr->Status		= 0;
2555 	hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
2556 	hdr->AttributeID	= h2b16(IB_DM_ATTR_IO_UNITINFO);
2557 	hdr->AttributeModifier	= 0;
2558 
2559 	gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2560 	gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2561 	gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2562 
2563 	mutex_enter(&gid_info->gl_mutex);
2564 	gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2565 	    &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2566 	mutex_exit(&gid_info->gl_mutex);
2567 
2568 	IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:"
2569 	    "timeout %x", gid_info->gl_timeout_id);
2570 
2571 	if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg,
2572 	    NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) !=
2573 	    IBMF_SUCCESS) {
2574 		IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed");
2575 		ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl,
2576 		    msg, &gid_info->gl_iou_cb_args);
2577 	}
2578 	return (IBDM_SUCCESS);
2579 }
2580 
2581 /*
2582  * ibdm_handle_iounitinfo()
2583  *	Invoked by the IBMF when IO Unitinfo request is completed.
2584  */
2585 static void
ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2586 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl,
2587     ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2588 {
2589 	int			ii, first = B_TRUE;
2590 	int			num_iocs;
2591 	size_t			size;
2592 	uchar_t			slot_info;
2593 	timeout_id_t		timeout_id;
2594 	ib_mad_hdr_t		*hdr;
2595 	ibdm_ioc_info_t		*ioc_info;
2596 	ib_dm_io_unitinfo_t	*iou_info;
2597 	ib_dm_io_unitinfo_t	*giou_info;
2598 	ibdm_timeout_cb_args_t	*cb_args;
2599 
2600 	IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:"
2601 	    " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info);
2602 
2603 	if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) {
2604 		IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: "
2605 		    "Unexpected response");
2606 		(*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2607 		return;
2608 	}
2609 
2610 	mutex_enter(&gid_info->gl_mutex);
2611 	if (gid_info->gl_state != IBDM_GET_IOUNITINFO) {
2612 		IBTF_DPRINTF_L4("ibdm",
2613 		    "\thandle_iounitinfo: DUP resp");
2614 		mutex_exit(&gid_info->gl_mutex);
2615 		(*flag) = IBDM_IBMF_PKT_DUP_RESP;
2616 		return;
2617 	}
2618 	gid_info->gl_iou_cb_args.cb_req_type = 0;
2619 	if (gid_info->gl_timeout_id) {
2620 		timeout_id = gid_info->gl_timeout_id;
2621 		mutex_exit(&gid_info->gl_mutex);
2622 		IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: "
2623 		    "gl_timeout_id = 0x%x", timeout_id);
2624 		if (untimeout(timeout_id) == -1) {
2625 			IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: "
2626 			    "untimeout gl_timeout_id failed");
2627 		}
2628 		mutex_enter(&gid_info->gl_mutex);
2629 		gid_info->gl_timeout_id = 0;
2630 	}
2631 	gid_info->gl_state = IBDM_GET_IOC_DETAILS;
2632 
2633 	iou_info = IBDM_IN_IBMFMSG2IOU(msg);
2634 	ibdm_dump_iounitinfo(iou_info);
2635 	num_iocs = iou_info->iou_num_ctrl_slots;
2636 	/*
2637 	 * check if number of IOCs reported is zero? if yes, return.
2638 	 * when num_iocs are reported zero internal IOC database needs
2639 	 * to be updated. To ensure that save the number of IOCs in
2640 	 * the new field "gl_num_iocs". Use a new field instead of
2641 	 * "giou_info->iou_num_ctrl_slots" as that would prevent
2642 	 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0.
2643 	 */
2644 	if (num_iocs == 0 && gid_info->gl_num_iocs == 0) {
2645 		IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's");
2646 		mutex_exit(&gid_info->gl_mutex);
2647 		return;
2648 	}
2649 	IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs);
2650 
2651 	/*
2652 	 * if there is an existing gl_iou (IOU has been probed before)
2653 	 * check if the "iou_changeid" is same as saved entry in
2654 	 * "giou_info->iou_changeid".
2655 	 * (note: this logic can prevent IOC enumeration if a given
2656 	 * vendor doesn't support setting iou_changeid field for its IOU)
2657 	 *
2658 	 * if there is an existing gl_iou and iou_changeid has changed :
2659 	 * free up existing gl_iou info and its related structures.
2660 	 * reallocate gl_iou info all over again.
2661 	 * if we donot free this up; then this leads to memory leaks
2662 	 */
2663 	if (gid_info->gl_iou) {
2664 		giou_info = &gid_info->gl_iou->iou_info;
2665 		if (b2h16(iou_info->iou_changeid) ==
2666 		    giou_info->iou_changeid) {
2667 			IBTF_DPRINTF_L3("ibdm",
2668 			    "\thandle_iounitinfo: no IOCs changed");
2669 			gid_info->gl_state = IBDM_GID_PROBING_COMPLETE;
2670 			mutex_exit(&gid_info->gl_mutex);
2671 			return;
2672 		}
2673 
2674 		/*
2675 		 * Store the iou info as prev_iou to be used after
2676 		 * sweep is done.
2677 		 */
2678 		ASSERT(gid_info->gl_prev_iou == NULL);
2679 		IBTF_DPRINTF_L4(ibdm_string,
2680 		    "\thandle_iounitinfo: setting gl_prev_iou %p",
2681 		    gid_info->gl_prev_iou);
2682 		gid_info->gl_prev_iou = gid_info->gl_iou;
2683 		ibdm.ibdm_prev_iou = 1;
2684 		gid_info->gl_iou = NULL;
2685 	}
2686 
2687 	size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t);
2688 	gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP);
2689 	giou_info = &gid_info->gl_iou->iou_info;
2690 	gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *)
2691 	    ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t));
2692 
2693 	giou_info->iou_num_ctrl_slots	= gid_info->gl_num_iocs	= num_iocs;
2694 	giou_info->iou_flag		= iou_info->iou_flag;
2695 	bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128);
2696 	giou_info->iou_changeid	= b2h16(iou_info->iou_changeid);
2697 	gid_info->gl_pending_cmds++; /* for diag code */
2698 	mutex_exit(&gid_info->gl_mutex);
2699 
2700 	if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) {
2701 		mutex_enter(&gid_info->gl_mutex);
2702 		gid_info->gl_pending_cmds--;
2703 		mutex_exit(&gid_info->gl_mutex);
2704 	}
2705 	/*
2706 	 * Parallelize getting IOC controller profiles from here.
2707 	 * Allocate IBMF packets and send commands to get IOC profile for
2708 	 * each IOC present on the IOU.
2709 	 */
2710 	for (ii = 0; ii < num_iocs; ii++) {
2711 		/*
2712 		 * Check whether IOC is present in the slot
2713 		 * Series of nibbles (in the field iou_ctrl_list) represents
2714 		 * a slot in the IOU.
2715 		 * Byte format: 76543210
2716 		 * Bits 0-3 of first byte represent Slot 2
2717 		 * bits 4-7 of first byte represent slot 1,
2718 		 * bits 0-3 of second byte represent slot 4 and so on
2719 		 * Each 4-bit nibble has the following meaning
2720 		 * 0x0 : IOC not installed
2721 		 * 0x1 : IOC is present
2722 		 * 0xf : Slot does not exist
2723 		 * and all other values are reserved.
2724 		 */
2725 		ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
2726 		slot_info = giou_info->iou_ctrl_list[(ii/2)];
2727 		if ((ii % 2) == 0)
2728 			slot_info = (slot_info >> 4);
2729 
2730 		if ((slot_info & 0xf) != 1) {
2731 			IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2732 			    "No IOC is present in the slot = %d", ii);
2733 			ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
2734 			continue;
2735 		}
2736 
2737 		mutex_enter(&gid_info->gl_mutex);
2738 		ibdm_bump_transactionID(gid_info);
2739 		mutex_exit(&gid_info->gl_mutex);
2740 
2741 		/*
2742 		 * Re use the already allocated packet (for IOUnitinfo) to
2743 		 * send the first IOC controller attribute. Allocate new
2744 		 * IBMF packets for the rest of the IOC's
2745 		 */
2746 		if (first != B_TRUE) {
2747 			msg = NULL;
2748 			if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2749 			    &msg) != IBMF_SUCCESS) {
2750 				IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2751 				    "IBMF packet allocation failed");
2752 				continue;
2753 			}
2754 
2755 		}
2756 
2757 		/* allocate send buffers for all messages */
2758 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2759 		ibdm_alloc_send_buffers(msg);
2760 		_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2761 
2762 		msg->im_local_addr.ia_local_lid	= gid_info->gl_slid;
2763 		msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2764 		if (gid_info->gl_redirected == B_TRUE) {
2765 			if (gid_info->gl_redirect_dlid != 0) {
2766 				msg->im_local_addr.ia_remote_lid =
2767 				    gid_info->gl_redirect_dlid;
2768 			}
2769 			msg->im_local_addr.ia_remote_qno =
2770 			    gid_info->gl_redirect_QP;
2771 			msg->im_local_addr.ia_p_key =
2772 			    gid_info->gl_redirect_pkey;
2773 			msg->im_local_addr.ia_q_key =
2774 			    gid_info->gl_redirect_qkey;
2775 			msg->im_local_addr.ia_service_level =
2776 			    gid_info->gl_redirectSL;
2777 		} else {
2778 			msg->im_local_addr.ia_remote_qno = 1;
2779 			msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2780 			msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2781 			msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2782 		}
2783 
2784 		hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
2785 		hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
2786 		hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
2787 		hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
2788 		hdr->R_Method		= IB_DM_DEVMGT_METHOD_GET;
2789 		hdr->Status		= 0;
2790 		hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
2791 		hdr->AttributeID	= h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
2792 		hdr->AttributeModifier 	= h2b32(ii + 1);
2793 
2794 		ioc_info->ioc_state	= IBDM_IOC_STATE_PROBE_INVALID;
2795 		cb_args			= &ioc_info->ioc_cb_args;
2796 		cb_args->cb_gid_info	= gid_info;
2797 		cb_args->cb_retry_count	= ibdm_dft_retry_cnt;
2798 		cb_args->cb_req_type	= IBDM_REQ_TYPE_IOCINFO;
2799 		cb_args->cb_ioc_num	= ii;
2800 
2801 		mutex_enter(&gid_info->gl_mutex);
2802 		gid_info->gl_pending_cmds++; /* for diag code */
2803 
2804 		ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2805 		    cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2806 		mutex_exit(&gid_info->gl_mutex);
2807 
2808 		IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:"
2809 		    "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii);
2810 
2811 		if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
2812 		    NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2813 			IBTF_DPRINTF_L2("ibdm",
2814 			    "\thandle_iounitinfo: msg transport failed");
2815 			ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
2816 		}
2817 		(*flag) |= IBDM_IBMF_PKT_REUSED;
2818 		first = B_FALSE;
2819 		gid_info->gl_iou->iou_niocs_probe_in_progress++;
2820 	}
2821 }
2822 
2823 
2824 /*
2825  * ibdm_handle_ioc_profile()
2826  *	Invoked by the IBMF when the IOCControllerProfile request
2827  *	gets completed
2828  */
2829 static void
ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl,ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)2830 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl,
2831     ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2832 {
2833 	int				first = B_TRUE, reprobe = 0;
2834 	uint_t				ii, ioc_no, srv_start;
2835 	uint_t				nserv_entries;
2836 	timeout_id_t			timeout_id;
2837 	ib_mad_hdr_t			*hdr;
2838 	ibdm_ioc_info_t			*ioc_info;
2839 	ibdm_timeout_cb_args_t		*cb_args;
2840 	ib_dm_ioc_ctrl_profile_t	*ioc, *gioc;
2841 
2842 	IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2843 	    " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2844 
2845 	ioc = IBDM_IN_IBMFMSG2IOC(msg);
2846 	/*
2847 	 * Check whether we know this IOC already
2848 	 * This will return NULL if reprobe is in progress
2849 	 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set.
2850 	 * Do not hold mutexes here.
2851 	 */
2852 	if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) {
2853 		IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2854 		    "IOC guid %llx is present", ioc->ioc_guid);
2855 		return;
2856 	}
2857 	ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg);
2858 	IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1);
2859 
2860 	/* Make sure that IOC index is with the valid range */
2861 	if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) {
2862 		IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: "
2863 		    "IOC index Out of range, index %d", ioc);
2864 		(*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2865 		return;
2866 	}
2867 	ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1];
2868 	ioc_info->ioc_iou_info = gid_info->gl_iou;
2869 
2870 	mutex_enter(&gid_info->gl_mutex);
2871 	if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) {
2872 		reprobe = 1;
2873 		ioc_info->ioc_prev_serv = ioc_info->ioc_serv;
2874 		ioc_info->ioc_serv = NULL;
2875 		ioc_info->ioc_prev_serv_cnt =
2876 		    ioc_info->ioc_profile.ioc_service_entries;
2877 	} else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) {
2878 		IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response"
2879 		    "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state);
2880 		mutex_exit(&gid_info->gl_mutex);
2881 		(*flag) |= IBDM_IBMF_PKT_DUP_RESP;
2882 		return;
2883 	}
2884 	ioc_info->ioc_cb_args.cb_req_type = 0;
2885 	if (ioc_info->ioc_timeout_id) {
2886 		timeout_id = ioc_info->ioc_timeout_id;
2887 		ioc_info->ioc_timeout_id = 0;
2888 		mutex_exit(&gid_info->gl_mutex);
2889 		IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: "
2890 		    "ioc_timeout_id = 0x%x", timeout_id);
2891 		if (untimeout(timeout_id) == -1) {
2892 			IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: "
2893 			    "untimeout ioc_timeout_id failed");
2894 		}
2895 		mutex_enter(&gid_info->gl_mutex);
2896 	}
2897 
2898 	ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS;
2899 	if (reprobe == 0) {
2900 		ioc_info->ioc_iou_guid = gid_info->gl_nodeguid;
2901 		ioc_info->ioc_nodeguid = gid_info->gl_nodeguid;
2902 	}
2903 
2904 	/*
2905 	 * Save all the IOC information in the global structures.
2906 	 * Note the wire format is Big Endian and the Sparc process also
2907 	 * big endian. So, there is no need to convert the data fields
2908 	 * The conversion routines used below are ineffective on Sparc
2909 	 * machines where as they will be effective on little endian
2910 	 * machines such as Intel processors.
2911 	 */
2912 	gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile;
2913 
2914 	/*
2915 	 * Restrict updates to onlyport GIDs and service entries during reprobe
2916 	 */
2917 	if (reprobe == 0) {
2918 		gioc->ioc_guid			= b2h64(ioc->ioc_guid);
2919 		gioc->ioc_vendorid		=
2920 		    ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK)
2921 		    >> IB_DM_VENDORID_SHIFT);
2922 		gioc->ioc_deviceid		= b2h32(ioc->ioc_deviceid);
2923 		gioc->ioc_device_ver		= b2h16(ioc->ioc_device_ver);
2924 		gioc->ioc_subsys_vendorid	=
2925 		    ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK)
2926 		    >> IB_DM_VENDORID_SHIFT);
2927 		gioc->ioc_subsys_id		= b2h32(ioc->ioc_subsys_id);
2928 		gioc->ioc_io_class		= b2h16(ioc->ioc_io_class);
2929 		gioc->ioc_io_subclass		= b2h16(ioc->ioc_io_subclass);
2930 		gioc->ioc_protocol		= b2h16(ioc->ioc_protocol);
2931 		gioc->ioc_protocol_ver		= b2h16(ioc->ioc_protocol_ver);
2932 		gioc->ioc_send_msg_qdepth	=
2933 		    b2h16(ioc->ioc_send_msg_qdepth);
2934 		gioc->ioc_rdma_read_qdepth	=
2935 		    b2h16(ioc->ioc_rdma_read_qdepth);
2936 		gioc->ioc_send_msg_sz		= b2h32(ioc->ioc_send_msg_sz);
2937 		gioc->ioc_rdma_xfer_sz		= b2h32(ioc->ioc_rdma_xfer_sz);
2938 		gioc->ioc_ctrl_opcap_mask	= ioc->ioc_ctrl_opcap_mask;
2939 		bcopy(ioc->ioc_id_string, gioc->ioc_id_string,
2940 		    IB_DM_IOC_ID_STRING_LEN);
2941 
2942 		ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode;
2943 		ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid;
2944 		ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK &
2945 		    gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE;
2946 
2947 		if (ioc_info->ioc_diagdeviceid == B_TRUE) {
2948 			gid_info->gl_pending_cmds++;
2949 			IBTF_DPRINTF_L3(ibdm_string,
2950 			    "\tibdm_handle_ioc_profile: "
2951 			    "%d: gid_info %p gl_state %d pending_cmds %d",
2952 			    __LINE__, gid_info, gid_info->gl_state,
2953 			    gid_info->gl_pending_cmds);
2954 		}
2955 	}
2956 	gioc->ioc_service_entries	= ioc->ioc_service_entries;
2957 	mutex_exit(&gid_info->gl_mutex);
2958 
2959 	ibdm_dump_ioc_profile(gioc);
2960 
2961 	if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) {
2962 		if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) {
2963 			mutex_enter(&gid_info->gl_mutex);
2964 			gid_info->gl_pending_cmds--;
2965 			mutex_exit(&gid_info->gl_mutex);
2966 		}
2967 	}
2968 	ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc(
2969 	    (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)),
2970 	    KM_SLEEP);
2971 
2972 	/*
2973 	 * In one single request, maximum number of requests that can be
2974 	 * obtained is 4. If number of service entries are more than four,
2975 	 * calculate number requests needed and send them parallelly.
2976 	 */
2977 	nserv_entries = ioc->ioc_service_entries;
2978 	ii = 0;
2979 	while (nserv_entries) {
2980 		mutex_enter(&gid_info->gl_mutex);
2981 		gid_info->gl_pending_cmds++;
2982 		ibdm_bump_transactionID(gid_info);
2983 		mutex_exit(&gid_info->gl_mutex);
2984 
2985 		if (first != B_TRUE) {
2986 			if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2987 			    &msg) != IBMF_SUCCESS) {
2988 				continue;
2989 			}
2990 
2991 		}
2992 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2993 		ibdm_alloc_send_buffers(msg);
2994 		_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2995 		msg->im_local_addr.ia_local_lid	= gid_info->gl_slid;
2996 		msg->im_local_addr.ia_remote_lid	= gid_info->gl_dlid;
2997 		if (gid_info->gl_redirected == B_TRUE) {
2998 			if (gid_info->gl_redirect_dlid != 0) {
2999 				msg->im_local_addr.ia_remote_lid =
3000 				    gid_info->gl_redirect_dlid;
3001 			}
3002 			msg->im_local_addr.ia_remote_qno =
3003 			    gid_info->gl_redirect_QP;
3004 			msg->im_local_addr.ia_p_key =
3005 			    gid_info->gl_redirect_pkey;
3006 			msg->im_local_addr.ia_q_key =
3007 			    gid_info->gl_redirect_qkey;
3008 			msg->im_local_addr.ia_service_level =
3009 			    gid_info->gl_redirectSL;
3010 		} else {
3011 			msg->im_local_addr.ia_remote_qno = 1;
3012 			msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
3013 			msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
3014 			msg->im_local_addr.ia_service_level = gid_info->gl_SL;
3015 		}
3016 
3017 		hdr			= IBDM_OUT_IBMFMSG_MADHDR(msg);
3018 		hdr->BaseVersion	= MAD_CLASS_BASE_VERS_1;
3019 		hdr->MgmtClass		= MAD_MGMT_CLASS_DEV_MGT;
3020 		hdr->ClassVersion	= IB_DM_CLASS_VERSION_1;
3021 		hdr->R_Method		= IB_DM_DEVMGT_METHOD_GET;
3022 		hdr->Status		= 0;
3023 		hdr->TransactionID	= h2b64(gid_info->gl_transactionID);
3024 		hdr->AttributeID	= h2b16(IB_DM_ATTR_SERVICE_ENTRIES);
3025 
3026 		srv_start = ii * 4;
3027 		cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args;
3028 		cb_args->cb_gid_info	= gid_info;
3029 		cb_args->cb_retry_count	= ibdm_dft_retry_cnt;
3030 		cb_args->cb_req_type	= IBDM_REQ_TYPE_SRVENTS;
3031 		cb_args->cb_srvents_start = srv_start;
3032 		cb_args->cb_ioc_num	= ioc_no - 1;
3033 
3034 		if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) {
3035 			nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ;
3036 			cb_args->cb_srvents_end = (cb_args->cb_srvents_start +
3037 			    IBDM_MAX_SERV_ENTRIES_PER_REQ - 1);
3038 		} else {
3039 			cb_args->cb_srvents_end =
3040 			    (cb_args->cb_srvents_start + nserv_entries - 1);
3041 			nserv_entries = 0;
3042 		}
3043 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3044 		ibdm_fill_srv_attr_mod(hdr, cb_args);
3045 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3046 
3047 		mutex_enter(&gid_info->gl_mutex);
3048 		ioc_info->ioc_serv[srv_start].se_timeout_id = timeout(
3049 		    ibdm_pkt_timeout_hdlr, cb_args,
3050 		    IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3051 		mutex_exit(&gid_info->gl_mutex);
3052 
3053 		IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:"
3054 		    "timeout %x, ioc %d srv %d",
3055 		    ioc_info->ioc_serv[srv_start].se_timeout_id,
3056 		    ioc_no - 1, srv_start);
3057 
3058 		if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
3059 		    NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3060 			IBTF_DPRINTF_L2("ibdm",
3061 			    "\thandle_ioc_profile: msg send failed");
3062 			ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
3063 		}
3064 		(*flag) |= IBDM_IBMF_PKT_REUSED;
3065 		first = B_FALSE;
3066 		ii++;
3067 	}
3068 }
3069 
3070 
3071 /*
3072  * ibdm_handle_srventry_mad()
3073  */
3074 static void
ibdm_handle_srventry_mad(ibmf_msg_t * msg,ibdm_dp_gidinfo_t * gid_info,int * flag)3075 ibdm_handle_srventry_mad(ibmf_msg_t *msg,
3076     ibdm_dp_gidinfo_t *gid_info, int *flag)
3077 {
3078 	uint_t			ii, ioc_no, attrmod;
3079 	uint_t			nentries, start, end;
3080 	timeout_id_t		timeout_id;
3081 	ib_dm_srv_t		*srv_ents;
3082 	ibdm_ioc_info_t		*ioc_info;
3083 	ibdm_srvents_info_t	*gsrv_ents;
3084 
3085 	IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:"
3086 	    " IBMF msg %p gid info %p", msg, gid_info);
3087 
3088 	srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg);
3089 	/*
3090 	 * Get the start and end index of the service entries
3091 	 * Upper 16 bits identify the IOC
3092 	 * Lower 16 bits specify the range of service entries
3093 	 * 	LSB specifies (Big endian) end of the range
3094 	 * 	MSB specifies (Big endian) start of the range
3095 	 */
3096 	attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg);
3097 	ioc_no	= ((attrmod >> 16) & IBDM_16_BIT_MASK);
3098 	end	= ((attrmod >> 8) & IBDM_8_BIT_MASK);
3099 	start	= (attrmod & IBDM_8_BIT_MASK);
3100 
3101 	/* Make sure that IOC index is with the valid range */
3102 	if ((ioc_no < 1) |
3103 	    (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) {
3104 		IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3105 		    "IOC index Out of range, index %d", ioc_no);
3106 		(*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3107 		return;
3108 	}
3109 	ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1));
3110 
3111 	/*
3112 	 * Make sure that the "start" and "end" service indexes are
3113 	 * with in the valid range
3114 	 */
3115 	nentries = ioc_info->ioc_profile.ioc_service_entries;
3116 	if ((start > end) | (start >= nentries) | (end >= nentries)) {
3117 		IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3118 		    "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries);
3119 		(*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3120 		return;
3121 	}
3122 	gsrv_ents = &ioc_info->ioc_serv[start];
3123 	mutex_enter(&gid_info->gl_mutex);
3124 	if (gsrv_ents->se_state != IBDM_SE_INVALID) {
3125 		IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3126 		    "already known, ioc %d, srv %d, se_state %x",
3127 		    ioc_no - 1, start, gsrv_ents->se_state);
3128 		mutex_exit(&gid_info->gl_mutex);
3129 		(*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3130 		return