1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Should we maintain base lid for each port in ibmf_ci?
29  */
30 
31 /*
32  * This file implements the UD destination resource management in IBMF.
33  */
34 
35 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
36 
37 extern int ibmf_trace_level;
38 extern ibmf_state_t *ibmf_statep;
39 static void ibmf_i_populate_ud_dest_list(ibmf_ci_t *cip, int kmflag);
40 
41 /*
42  * ibmf_i_init_ud_dest():
43  * Initialize a cache of UD destination structure used to send UD traffic.
44  * Also create a list of pre-allocated UD destination structures to
45  * satisfy requests for a UD destination structure and its associated
46  * address handle, from a thread in interrupt context. Threads in interrupt
47  * context are not allowed to allocated their own address handles.
48  */
49 void
ibmf_i_init_ud_dest(ibmf_ci_t * cip)50 ibmf_i_init_ud_dest(ibmf_ci_t *cip)
51 {
52 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_ud_dest_start,
53 	    IBMF_TNF_TRACE, "", "ibmf_i_init_ud_dest() enter, cip = %p\n",
54 	    tnf_opaque, cip, cip);
55 
56 	/* initialize the UD dest list mutex */
57 	mutex_init(&cip->ci_ud_dest_list_mutex, NULL, MUTEX_DRIVER, NULL);
58 
59 	/* populate the UD dest list if possible */
60 	ibmf_i_pop_ud_dest_thread(cip);
61 
62 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_ud_dest_end,
63 	    IBMF_TNF_TRACE, "", "ibmf_i_init_ud_dest() exit\n");
64 }
65 
66 /*
67  * ibmf_i_fini_ud_dest():
68  * Free up the UD destination cache and the linked list.
69  */
70 void
ibmf_i_fini_ud_dest(ibmf_ci_t * cip)71 ibmf_i_fini_ud_dest(ibmf_ci_t *cip)
72 {
73 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_ud_dest_start,
74 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_ud_dest() enter, cip = %p\n",
75 	    tnf_opaque, cip, cip);
76 
77 	/* clean up the UD dest list */
78 	ibmf_i_clean_ud_dest_list(cip, B_TRUE);
79 
80 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_ud_dest_end,
81 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_ud_dest() exit\n");
82 }
83 
84 /*
85  * ibmf_i_get_ud_dest():
86  *	Get a UD destination structure from the list
87  */
88 ibmf_ud_dest_t *
ibmf_i_get_ud_dest(ibmf_ci_t * cip)89 ibmf_i_get_ud_dest(ibmf_ci_t *cip)
90 {
91 	ibmf_ud_dest_t		*ibmf_ud_dest;
92 
93 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_get_ud_dest_start,
94 	    IBMF_TNF_TRACE, "", "ibmf_i_get_ud_dest() enter, cip = %p\n",
95 	    tnf_opaque, cip, cip);
96 
97 	mutex_enter(&cip->ci_ud_dest_list_mutex);
98 	ibmf_ud_dest = cip->ci_ud_dest_list_head;
99 	if (ibmf_ud_dest != NULL) {
100 		cip->ci_ud_dest_list_head = ibmf_ud_dest->ud_next;
101 		cip->ci_ud_dest_list_count--;
102 	}
103 	mutex_exit(&cip->ci_ud_dest_list_mutex);
104 
105 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_get_ud_dest_end,
106 	    IBMF_TNF_TRACE, "", "ibmf_i_get_ud_dest() exit\n");
107 	return (ibmf_ud_dest);
108 }
109 
110 /*
111  * ibmf_i_put_ud_dest():
112  *	Add a UD destination structure to the list
113  */
114 void
ibmf_i_put_ud_dest(ibmf_ci_t * cip,ibmf_ud_dest_t * ud_dest)115 ibmf_i_put_ud_dest(ibmf_ci_t *cip, ibmf_ud_dest_t *ud_dest)
116 {
117 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_put_ud_dest_start,
118 	    IBMF_TNF_TRACE, "", "ibmf_i_put_ud_dest() enter, cip = %p, "
119 	    "ud_dest = %p\n", tnf_opaque, cip, cip,
120 	    tnf_opaque, ud_dest, ud_dest);
121 
122 	mutex_enter(&cip->ci_ud_dest_list_mutex);
123 	cip->ci_ud_dest_list_count++;
124 	ud_dest->ud_next = cip->ci_ud_dest_list_head;
125 	cip->ci_ud_dest_list_head = ud_dest;
126 	mutex_exit(&cip->ci_ud_dest_list_mutex);
127 
128 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_put_ud_dest_end,
129 	    IBMF_TNF_TRACE, "", "ibmf_i_put_ud_dest() exit, cip = %p\n",
130 	    tnf_opaque, cip, cip);
131 }
132 
133 /*
134  * ibmf_i_populate_ud_dest_list():
135  * Maintain a list of IBMF UD destination structures to
136  * satisfy requests for a UD destination structure and its associated
137  * address handle, from a thread in interrupt context. Threads in interrupt
138  * context are not allowed to allocate their own address handles.
139  * Add to this list only if the number of entries in the list falls below
140  * IBMF_UD_DEST_LO_WATER_MARK. When adding to the list, add entries upto
141  * IBMF_UD_DEST_HI_WATER_MARK.
142  */
143 static void
ibmf_i_populate_ud_dest_list(ibmf_ci_t * cip,int kmflag)144 ibmf_i_populate_ud_dest_list(ibmf_ci_t *cip, int kmflag)
145 {
146 	ibmf_ud_dest_t		*ibmf_ud_dest;
147 	uint32_t		count;
148 	ibt_status_t		status;
149 	ibt_ud_dest_flags_t	ud_dest_flags = IBT_UD_DEST_NO_FLAGS;
150 
151 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
152 	    ibmf_i_populate_ud_dest_list_start, IBMF_TNF_TRACE, "",
153 	    "ibmf_i_populate_ud_dest_list() enter, cip = %p, kmflag = %d \n",
154 	    tnf_opaque, cip, cip, tnf_int, kmflag, kmflag);
155 
156 	/* do not allow a population operation if non-blocking */
157 	if (kmflag == KM_NOSLEEP) {
158 		IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L3,
159 		    ibmf_i_populate_ud_dest, IBMF_TNF_TRACE, "",
160 		    "ibmf_i_populate_ud_dest_list(): %s\n", tnf_string, msg,
161 		    "Skipping, called with non-blocking flag\n");
162 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
163 		    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
164 		    "ibmf_i_populate_ud_dest_list() exit\n");
165 		/*
166 		 * Don't return a failure code here.
167 		 * If ibmf_i_ud_dest_alloc() returns NULL, the
168 		 * the resource allocation will fail
169 		 */
170 		return;
171 	}
172 
173 	mutex_enter(&cip->ci_ud_dest_list_mutex);
174 	count = cip->ci_ud_dest_list_count;
175 
176 	/* nothing to do if count is above the low water mark */
177 	if (count > IBMF_UD_DEST_LO_WATER_MARK) {
178 		mutex_exit(&cip->ci_ud_dest_list_mutex);
179 		IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L3,
180 		    ibmf_i_populate_ud_dest, IBMF_TNF_TRACE, "",
181 		    "ibmf_i_populate_ud_dest_list(): %s\n", tnf_string, msg,
182 		    "Count not below low water mark\n");
183 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
184 		    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
185 		    "ibmf_i_populate_ud_dest_list() exit\n");
186 		return;
187 	}
188 
189 	/* populate the pool upto the high water mark */
190 	while (count < IBMF_UD_DEST_HI_WATER_MARK) {
191 		ibt_adds_vect_t adds_vect;
192 
193 		ibmf_ud_dest = kmem_zalloc(sizeof (ibmf_ud_dest_t), kmflag);
194 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibmf_ud_dest))
195 
196 		/* Call IBTF to allocate an address handle */
197 		bzero(&adds_vect, sizeof (adds_vect));
198 		adds_vect.av_port_num = 1;
199 		adds_vect.av_srate = IBT_SRATE_1X;	/* assume the minimum */
200 		mutex_exit(&cip->ci_ud_dest_list_mutex);
201 
202 		status = ibt_alloc_ah(cip->ci_ci_handle, ud_dest_flags,
203 		    cip->ci_pd, &adds_vect, &ibmf_ud_dest->ud_dest.ud_ah);
204 		if (status != IBT_SUCCESS) {
205 			kmem_free(ibmf_ud_dest, sizeof (ibmf_ud_dest_t));
206 			IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
207 			    ibmf_i_populate_ud_dest_err, IBMF_TNF_ERROR, "",
208 			    "ibmf_i_populate_ud_dest_list(): %s, status = %d\n",
209 			    tnf_string, msg, "ibt alloc ah failed",
210 			    tnf_uint, ibt_status, status);
211 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
212 			    ibmf_i_populate_ud_dest_end, IBMF_TNF_TRACE, "",
213 			    "ibmf_i_populate_ud_dest_list() exit\n");
214 			return;
215 		}
216 
217 		/* Add the ud_dest to the list */
218 		mutex_enter(&cip->ci_ud_dest_list_mutex);
219 
220 		if (cip->ci_ud_dest_list_head != NULL)
221 			ibmf_ud_dest->ud_next = cip->ci_ud_dest_list_head;
222 		else
223 			ibmf_ud_dest->ud_next = NULL;
224 
225 		cip->ci_ud_dest_list_head = ibmf_ud_dest;
226 		cip->ci_ud_dest_list_count++;
227 
228 		/*
229 		 * Get the latest count since other threads may have
230 		 * added to the list as well.
231 		 */
232 		count = cip->ci_ud_dest_list_count;
233 
234 	}
235 
236 	mutex_exit(&cip->ci_ud_dest_list_mutex);
237 
238 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_populate_ud_dest_end,
239 	    IBMF_TNF_TRACE, "", "ibmf_i_populate_ud_dest_list() exit\n");
240 }
241 
242 /*
243  * ibmf_i_clean_ud_dest_list():
244  * Free up entries from the linked list of IBMF UD destination structures.
245  * If the "all" argument is B_TRUE, free up all the entries in the list.
246  * If the "all" argument is B_FALSE, free up entries to bring the total
247  * down to IBMF_UD_DEST_HI_WATER_MARK.
248  */
249 void
ibmf_i_clean_ud_dest_list(ibmf_ci_t * cip,boolean_t all)250 ibmf_i_clean_ud_dest_list(ibmf_ci_t *cip, boolean_t all)
251 {
252 	ibmf_ud_dest_t		*ibmf_ud_dest;
253 	ibt_ud_dest_t		*ud_dest;
254 	uint32_t		count;
255 	ibt_status_t		status;
256 
257 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_clean_ud_dest_start,
258 	    IBMF_TNF_TRACE, "", "ibmf_i_clean_ud_dest_list() enter, "
259 	    "cip = %p, all = %d\n", tnf_opaque, cip, cip,
260 	    tnf_uint, all, all);
261 
262 	mutex_enter(&cip->ci_ud_dest_list_mutex);
263 
264 	/* Determine the number of UD destination resources to free */
265 	if (all == B_TRUE) {
266 		count = cip->ci_ud_dest_list_count;
267 	} else if (cip->ci_ud_dest_list_count > IBMF_UD_DEST_HI_WATER_MARK) {
268 		count = cip->ci_ud_dest_list_count -
269 		    IBMF_UD_DEST_HI_WATER_MARK;
270 	} else
271 		count = 0;
272 
273 	while (count) {
274 		ibmf_ud_dest = cip->ci_ud_dest_list_head;
275 		ASSERT(ibmf_ud_dest != NULL);
276 		if (ibmf_ud_dest != NULL) {
277 			/* Remove ibmf_ud_dest from the list */
278 			cip->ci_ud_dest_list_head = ibmf_ud_dest->ud_next;
279 			cip->ci_ud_dest_list_count--;
280 			mutex_exit(&cip->ci_ud_dest_list_mutex);
281 
282 			ud_dest = &ibmf_ud_dest->ud_dest;
283 			status = ibt_free_ah(cip->ci_ci_handle, ud_dest->ud_ah);
284 			if (status != IBT_SUCCESS) {
285 				IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
286 				    ibmf_i_clean_ud_dest_err, IBMF_TNF_ERROR,
287 				    "", "ibmf_i_clean_ud_dest_list(): %s, "
288 				    "status = %d\n", tnf_string, msg,
289 				    "ibt_free_ah failed", tnf_uint, ibt_status,
290 				    status);
291 				IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
292 				    ibmf_i_clean_ud_dest_end, IBMF_TNF_TRACE,
293 				    "", "ibmf_i_clean_ud_dest_list() exit\n");
294 				return;
295 			}
296 
297 			/* Free the ud_dest context */
298 			kmem_free(ibmf_ud_dest, sizeof (ibmf_ud_dest_t));
299 
300 			mutex_enter(&cip->ci_ud_dest_list_mutex);
301 		}
302 		/* Determine the number of UD destination resources to free */
303 		if (all == B_TRUE) {
304 			count = cip->ci_ud_dest_list_count;
305 		} else if (cip->ci_ud_dest_list_count >
306 		    IBMF_UD_DEST_HI_WATER_MARK) {
307 			count = cip->ci_ud_dest_list_count -
308 			    IBMF_UD_DEST_HI_WATER_MARK;
309 		} else
310 			count = 0;
311 	}
312 
313 	mutex_exit(&cip->ci_ud_dest_list_mutex);
314 
315 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_clean_ud_dest_end,
316 	    IBMF_TNF_TRACE, "", "ibmf_i_clean_ud_dest_list() exit\n");
317 }
318 
319 /*
320  * ibmf_i_alloc_ud_dest():
321  *	Allocate and set up a UD destination context
322  */
323 /*ARGSUSED*/
324 int
ibmf_i_alloc_ud_dest(ibmf_client_t * clientp,ibmf_msg_impl_t * msgimplp,ibt_ud_dest_hdl_t * ud_dest_p,boolean_t block)325 ibmf_i_alloc_ud_dest(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
326     ibt_ud_dest_hdl_t *ud_dest_p, boolean_t block)
327 {
328 	ibmf_ci_t 		*cip;
329 	ibmf_addr_info_t	*addrp;
330 	ibt_status_t		status;
331 	ibt_adds_vect_t		adds_vec;
332 	ibt_ud_dest_t		*ud_dest;
333 	int			ibmf_status, ret;
334 
335 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_alloc_ud_dest_start,
336 	    IBMF_TNF_TRACE, "", "ibmf_i_alloc_ud_dest_list() enter, "
337 	    "clientp = %p, msg = %p, ud_destp = %p, block = %d\n",
338 	    tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp,
339 	    tnf_opaque, ud_dest_p, ud_dest_p, tnf_uint, block, block);
340 
341 	_NOTE(ASSUMING_PROTECTED(*ud_dest_p))
342 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ud_dest))
343 
344 	addrp = &msgimplp->im_local_addr;
345 	cip = clientp->ic_myci;
346 
347 	/*
348 	 * Dispatch a taskq to replenish the UD destination handle cache.
349 	 */
350 	mutex_enter(&cip->ci_ud_dest_list_mutex);
351 	if (cip->ci_ud_dest_list_count < IBMF_UD_DEST_LO_WATER_MARK) {
352 		ret = ibmf_ud_dest_tq_disp(cip);
353 		if (ret == 0) {
354 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L3,
355 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
356 			    "ibmf_i_alloc_ud_dest(): %s\n", tnf_string, msg,
357 			    "taskq dispatch of ud_dest population thread "
358 			    "failed");
359 		}
360 	}
361 	mutex_exit(&cip->ci_ud_dest_list_mutex);
362 
363 	/* initialize the address vector bases on global/local address */
364 	if (msgimplp->im_msg_flags & IBMF_MSG_FLAGS_GLOBAL_ADDRESS) {
365 		/* fill in the grh stuff as expected by ibt */
366 		adds_vec.av_flow = msgimplp->im_global_addr.ig_flow_label;
367 		adds_vec.av_send_grh = B_TRUE;
368 		adds_vec.av_tclass = msgimplp->im_global_addr.ig_tclass;
369 		adds_vec.av_hop = msgimplp->im_global_addr.ig_hop_limit;
370 		if (msgimplp->im_unsolicited == B_TRUE) {
371 			adds_vec.av_sgid =
372 			    msgimplp->im_global_addr.ig_recver_gid;
373 			adds_vec.av_dgid =
374 			    msgimplp->im_global_addr.ig_sender_gid;
375 		} else {
376 			adds_vec.av_sgid =
377 			    msgimplp->im_global_addr.ig_sender_gid;
378 			adds_vec.av_dgid =
379 			    msgimplp->im_global_addr.ig_recver_gid;
380 		}
381 	} else {
382 		adds_vec.av_send_grh = B_FALSE;
383 	}
384 
385 	/* common address vector initialization */
386 	adds_vec.av_dlid = addrp->ia_remote_lid;
387 	if ((clientp->ic_base_lid == 0) && (clientp->ic_qp->iq_qp_num != 0)) {
388 		/* Get the port's base LID */
389 		(void) ibt_get_port_state_byguid(
390 		    clientp->ic_client_info.ci_guid,
391 		    clientp->ic_client_info.port_num, NULL,
392 		    &clientp->ic_base_lid);
393 		if (clientp->ic_base_lid == 0) {
394 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
395 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
396 			    "ibmf_i_alloc_ud_dest(): %s\n", tnf_string, msg,
397 			    "base_lid is not defined, i.e., port is down");
398 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
399 			    ibmf_i_alloc_ud_dest_end, IBMF_TNF_TRACE, "",
400 			    "ibmf_i_alloc_ud_dest_list() exit\n");
401 			return (IBMF_BAD_PORT_STATE);
402 		}
403 	}
404 	adds_vec.av_src_path = addrp->ia_local_lid - clientp->ic_base_lid;
405 	adds_vec.av_srvl = addrp->ia_service_level;
406 	adds_vec.av_srate = IBT_SRATE_1X;
407 	adds_vec.av_port_num = clientp->ic_client_info.port_num;
408 
409 	ud_dest = *ud_dest_p;
410 
411 	/* If an IBT UD destination structure has not been allocated, do so */
412 	if (ud_dest == NULL) {
413 
414 		ibmf_ud_dest_t *ibmf_ud_dest;
415 
416 		/* Get a UD destination resource from the list */
417 		ibmf_ud_dest = ibmf_i_get_ud_dest(cip);
418 		if (ibmf_ud_dest == NULL) {
419 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
420 			    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
421 			    "ibmf_i_alloc_ud_dest(): %s\n",
422 			    tnf_string, msg, "No ud_dest available");
423 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
424 			    ibmf_i_alloc_ud_dest_end, IBMF_TNF_TRACE, "",
425 			    "ibmf_i_alloc_ud_dest_list() exit\n");
426 			return (IBMF_NO_RESOURCES);
427 		}
428 		ud_dest = &ibmf_ud_dest->ud_dest;
429 		msgimplp->im_ibmf_ud_dest = ibmf_ud_dest;
430 		ud_dest->ud_qkey = msgimplp->im_local_addr.ia_q_key;
431 		ud_dest->ud_dst_qpn = msgimplp->im_local_addr.ia_remote_qno;
432 		*ud_dest_p = ud_dest;
433 	} else {
434 		ud_dest->ud_qkey = msgimplp->im_local_addr.ia_q_key;
435 		ud_dest->ud_dst_qpn = msgimplp->im_local_addr.ia_remote_qno;
436 	}
437 
438 	/* modify the address handle with the address vector information */
439 	status = ibt_modify_ah(cip->ci_ci_handle, ud_dest->ud_ah, &adds_vec);
440 	if (status != IBT_SUCCESS)
441 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
442 		    ibmf_i_alloc_ud_dest_err, IBMF_TNF_ERROR, "",
443 		    "ibmf_i_alloc_ud_dest(): %s, status = %d\n",
444 		    tnf_string, msg, "ibt alloc ah failed", tnf_uint,
445 		    ibt_status, status);
446 
447 	ibmf_status = ibmf_i_ibt_to_ibmf_status(status);
448 	if (ibmf_status == IBMF_SUCCESS) {
449 		mutex_enter(&clientp->ic_kstat_mutex);
450 		IBMF_ADD32_KSTATS(clientp, ud_dests_alloced, 1);
451 		mutex_exit(&clientp->ic_kstat_mutex);
452 	}
453 
454 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_alloc_ud_dest_end,
455 	    IBMF_TNF_TRACE, "", "ibmf_i_alloc_ud_dest() exit\n");
456 
457 	return (ibmf_status);
458 }
459 
460 /*
461  * ibmf_i_free_ud_dest():
462  *	Free up the UD destination context
463  */
464 void
ibmf_i_free_ud_dest(ibmf_client_t * clientp,ibmf_msg_impl_t * msgimplp)465 ibmf_i_free_ud_dest(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp)
466 {
467 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_free_ud_dest_start,
468 	    IBMF_TNF_TRACE, "", "ibmf_i_free_ud_dest() enter\n");
469 
470 	ibmf_i_put_ud_dest(clientp->ic_myci, msgimplp->im_ibmf_ud_dest);
471 
472 	/* Clear the UD dest pointers so a new UD dest may be allocated */
473 	mutex_enter(&msgimplp->im_mutex);
474 	msgimplp->im_ibmf_ud_dest = NULL;
475 	msgimplp->im_ud_dest = NULL;
476 	mutex_exit(&msgimplp->im_mutex);
477 
478 	mutex_enter(&clientp->ic_kstat_mutex);
479 	IBMF_SUB32_KSTATS(clientp, ud_dests_alloced, 1);
480 	mutex_exit(&clientp->ic_kstat_mutex);
481 
482 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_free_ud_dest_end,
483 	    IBMF_TNF_TRACE, "", "ibmf_i_free_ud_dest() exit\n");
484 
485 }
486 
487 /*
488  * ibmf_i_pop_ud_dest_thread()
489  *
490  * Wrapper function to call ibmf_i_populate_ud_dest_list() with
491  * the KM_SLEEP flag.
492  */
493 void
ibmf_i_pop_ud_dest_thread(void * argp)494 ibmf_i_pop_ud_dest_thread(void *argp)
495 {
496 	ibmf_ci_t *cip = (ibmf_ci_t *)argp;
497 
498 	ibmf_i_populate_ud_dest_list(cip, KM_SLEEP);
499 }
500 
501 /*
502  * ibmf_ud_dest_tq_disp()
503  *
504  * Wrapper for taskq dispatch of the function that populates
505  * the UD destination handle cache.
506  */
507 int
ibmf_ud_dest_tq_disp(ibmf_ci_t * cip)508 ibmf_ud_dest_tq_disp(ibmf_ci_t *cip)
509 {
510 	return (taskq_dispatch(ibmf_statep->ibmf_taskq,
511 	    ibmf_i_pop_ud_dest_thread, cip, TQ_NOSLEEP));
512 }
513