1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file implements the Work Queue Entry (WQE) management in IBMF. 30 */ 31 32 #include <sys/ib/mgt/ibmf/ibmf_impl.h> 33 34 extern int ibmf_trace_level; 35 extern int ibmf_send_wqes_per_port, ibmf_recv_wqes_per_port; 36 37 #define IBMF_INIT_SG_ELEMENT(sg, mem, lkey, size) { \ 38 (sg).ds_va = (ib_vaddr_t)(uintptr_t)(mem); \ 39 (sg).ds_key = (lkey); \ 40 (sg).ds_len = (size); \ 41 } 42 43 #define IBMF_ADDR_TO_SEND_WR_ID(ptr, id) \ 44 (id) = (ibt_wrid_t)(uintptr_t)(ptr) 45 46 #define IBMF_ADDR_TO_RECV_WR_ID(ptr, id) \ 47 (id) = ((ibt_wrid_t)(uintptr_t)(ptr) | IBMF_RCV_CQE) 48 49 #define IBMF_INIT_RMPP_HDR(hdrp, ver, type, respt, flg, status, seg, lennwl) { \ 50 (hdrp)->rmpp_version = (ver); \ 51 (hdrp)->rmpp_type = (type); \ 52 (hdrp)->rmpp_resp_time = (respt); \ 53 (hdrp)->rmpp_flags = (flg); \ 54 (hdrp)->rmpp_status = (status); \ 55 (hdrp)->rmpp_segnum = (h2b32(seg)); \ 56 (hdrp)->rmpp_pyldlen_nwl = (h2b32(lennwl)); \ 57 } 58 59 static int ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg, 60 int kmflags); 61 static void ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg); 62 static int ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg, 63 int kmflags); 64 static void ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg); 65 static int ibmf_i_extend_wqe_mem(ibmf_ci_t *cip, 66 ibmf_qp_handle_t ibmf_qp_handle, ibmf_wqe_mgt_t *wqe_mgt, 67 boolean_t block); 68 69 /* 70 * ibmf_send_wqe_cache_constructor(): 71 * Constructor for the kmem cache used for send WQEs for special QPs 72 */ 73 /* ARGSUSED */ 74 static int 75 ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags) 76 { 77 ibmf_send_wqe_t *send_wqe = (ibmf_send_wqe_t *)buf; 78 ibmf_ci_t *cip = (ibmf_ci_t *)cdrarg; 79 ibmf_wqe_mgt_t *wqe_mgt; 80 81 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 82 ibmf_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "", 83 "ibmf_send_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n", 84 tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 85 86 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe)) 87 88 /* initialize send WQE context */ 89 send_wqe->send_sg_mem = 90 (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem, 91 IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP); 92 if (send_wqe->send_sg_mem == NULL) { 93 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 94 ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "", 95 "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg, 96 "Failed vmem allocation in send WQE cache constructor"); 97 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 98 ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 99 "ibmf_send_wqe_cache_constructor() exit\n"); 100 return (-1); 101 } 102 103 mutex_enter(&cip->ci_wqe_mutex); 104 wqe_mgt = cip->ci_wqe_mgt_list; 105 106 /* Look for the WQE management struct that includes this address */ 107 while (wqe_mgt != NULL) { 108 mutex_enter(&wqe_mgt->wqes_mutex); 109 if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) && 110 (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem + 111 wqe_mgt->wqes_kmem_sz))) { 112 mutex_exit(&wqe_mgt->wqes_mutex); 113 break; 114 } 115 mutex_exit(&wqe_mgt->wqes_mutex); 116 wqe_mgt = wqe_mgt->wqe_mgt_next; 117 } 118 119 if (wqe_mgt == NULL) { 120 mutex_exit(&cip->ci_wqe_mutex); 121 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 122 ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "", 123 "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg, 124 "Address not found in WQE mgt list"); 125 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 126 ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 127 "ibmf_send_wqe_cache_constructor() exit\n"); 128 return (-1); 129 } 130 131 mutex_enter(&wqe_mgt->wqes_mutex); 132 133 send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem + 134 (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem)); 135 bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE); 136 send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey; 137 send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 138 send_wqe->send_wqe_flags = 0; 139 send_wqe->send_wqe_next = NULL; 140 141 mutex_exit(&wqe_mgt->wqes_mutex); 142 mutex_exit(&cip->ci_wqe_mutex); 143 144 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 145 ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 146 "ibmf_send_wqe_cache_constructor() exit\n"); 147 148 return (0); 149 } 150 151 /* 152 * ibmf_send_wqe_cache_destructor(): 153 * Destructor for send WQE kmem cache for special QPs 154 */ 155 /* ARGSUSED */ 156 static void 157 ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg) 158 { 159 ibmf_send_wqe_t *send_wqe = (ibmf_send_wqe_t *)buf; 160 ibmf_ci_t *cip = (ibmf_ci_t *)cdrarg; 161 162 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 163 ibmf_i_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "", 164 "ibmf_send_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n", 165 tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 166 167 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe)) 168 169 /* Free the vmem allocated for the WQE */ 170 vmem_free(cip->ci_wqe_ib_vmem, 171 (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE); 172 send_wqe->send_mem = NULL; 173 174 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 175 ibmf_i_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "", 176 "ibmf_send_wqe_cache_destructor() exit\n"); 177 } 178 179 /* 180 * ibmf_recv_wqe_cache_constructor(): 181 * Constructor for receive WQE kmem cache for special QPs 182 */ 183 /* ARGSUSED */ 184 static int 185 ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags) 186 { 187 ibmf_recv_wqe_t *recv_wqe = (ibmf_recv_wqe_t *)buf; 188 ibmf_ci_t *cip = (ibmf_ci_t *)cdrarg; 189 ibmf_wqe_mgt_t *wqe_mgt; 190 191 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 192 ibmf_i_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "", 193 "ibmf_recv_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n", 194 tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 195 196 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe)) 197 198 /* initialize recv WQE context */ 199 recv_wqe->recv_sg_mem = 200 (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem, 201 IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP); 202 if (recv_wqe->recv_sg_mem == NULL) { 203 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 204 ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "", 205 "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg, 206 "Failed vmem allocation in receive WQE cache constructor"); 207 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 208 ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 209 "ibmf_recv_wqe_cache_constructor() exit\n"); 210 return (-1); 211 } 212 213 mutex_enter(&cip->ci_wqe_mutex); 214 wqe_mgt = cip->ci_wqe_mgt_list; 215 216 /* Look for the WQE management struct that includes this address */ 217 while (wqe_mgt != NULL) { 218 mutex_enter(&wqe_mgt->wqes_mutex); 219 if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) && 220 (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem + 221 wqe_mgt->wqes_kmem_sz))) { 222 mutex_exit(&wqe_mgt->wqes_mutex); 223 break; 224 } 225 mutex_exit(&wqe_mgt->wqes_mutex); 226 wqe_mgt = wqe_mgt->wqe_mgt_next; 227 } 228 229 if (wqe_mgt == NULL) { 230 mutex_exit(&cip->ci_wqe_mutex); 231 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 232 ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "", 233 "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg, 234 "Address not found in WQE mgt list"); 235 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 236 ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 237 "ibmf_recv_wqe_cache_constructor() exit\n"); 238 return (-1); 239 } 240 241 mutex_enter(&wqe_mgt->wqes_mutex); 242 243 recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem + 244 (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem)); 245 bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE); 246 recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey; 247 recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 248 recv_wqe->recv_wqe_next = NULL; 249 recv_wqe->recv_msg = NULL; 250 recv_wqe->recv_wqe_flags = 0; 251 252 mutex_exit(&wqe_mgt->wqes_mutex); 253 mutex_exit(&cip->ci_wqe_mutex); 254 255 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 256 ibmf_i_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 257 "ibmf_recv_wqe_cache_constructor() exit\n"); 258 259 return (0); 260 } 261 262 /* 263 * ibmf_recv_wqe_cache_destructor(): 264 * Destructor for receive WQE kmem cache for special QPs 265 */ 266 /* ARGSUSED */ 267 static void 268 ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg) 269 { 270 ibmf_recv_wqe_t *recv_wqe = (ibmf_recv_wqe_t *)buf; 271 ibmf_ci_t *cip = (ibmf_ci_t *)cdrarg; 272 273 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 274 ibmf_i_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "", 275 "ibmf_recv_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n", 276 tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 277 278 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe)) 279 280 /* Free the vmem allocated for the WQE */ 281 vmem_free(cip->ci_wqe_ib_vmem, 282 (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE); 283 recv_wqe->recv_mem = NULL; 284 285 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 286 ibmf_i_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "", 287 "ibmf_recv_wqe_cache_destructor() exit\n"); 288 } 289 290 /* 291 * ibmf_altqp_send_wqe_cache_constructor(): 292 * Constructor for the kmem cache used for send WQEs for alternate QPs 293 */ 294 /* ARGSUSED */ 295 int 296 ibmf_altqp_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags) 297 { 298 ibmf_send_wqe_t *send_wqe = (ibmf_send_wqe_t *)buf; 299 ibmf_alt_qp_t *qp_ctx = (ibmf_alt_qp_t *)cdrarg; 300 ibmf_wqe_mgt_t *wqe_mgt; 301 302 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 303 ibmf_altqp_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "", 304 "ibmf_altqp_send_wqe_cache_constructor() enter, buf = %p, " 305 "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 306 307 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe)) 308 309 /* initialize send WQE context */ 310 send_wqe->send_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc( 311 qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE, 312 kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP); 313 if (send_wqe->send_sg_mem == NULL) { 314 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 315 ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, 316 "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n", 317 tnf_string, msg, "Failed vmem allocation in " 318 "alternate QP send WQE cache constructor"); 319 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 320 ibmf_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, 321 "", "ibmf_altqp_send_wqe_cache_constructor() exit\n"); 322 return (-1); 323 } 324 325 mutex_enter(&qp_ctx->isq_wqe_mutex); 326 wqe_mgt = qp_ctx->isq_wqe_mgt_list; 327 328 /* Look for the WQE management struct that includes this address */ 329 while (wqe_mgt != NULL) { 330 mutex_enter(&wqe_mgt->wqes_mutex); 331 if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) && 332 (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem + 333 wqe_mgt->wqes_kmem_sz))) { 334 mutex_exit(&wqe_mgt->wqes_mutex); 335 break; 336 } 337 mutex_exit(&wqe_mgt->wqes_mutex); 338 wqe_mgt = wqe_mgt->wqe_mgt_next; 339 } 340 341 if (wqe_mgt == NULL) { 342 mutex_exit(&qp_ctx->isq_wqe_mutex); 343 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 344 ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, 345 "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n", 346 tnf_string, msg, "Address not found in WQE mgt list"); 347 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 348 ibmf_altqp_send_wqe_cache_constructor_end, 349 IBMF_TNF_TRACE, "", 350 "ibmf_altqp_send_wqe_cache_constructor() exit\n"); 351 return (-1); 352 } 353 354 mutex_enter(&wqe_mgt->wqes_mutex); 355 356 send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem + 357 (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem)); 358 bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE); 359 send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey; 360 send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 361 send_wqe->send_wqe_flags = 0; 362 363 mutex_exit(&wqe_mgt->wqes_mutex); 364 mutex_exit(&qp_ctx->isq_wqe_mutex); 365 366 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 367 ibmf_i_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 368 "ibmf_altqp_send_wqe_cache_constructor() exit\n"); 369 370 return (0); 371 } 372 373 /* 374 * ibmf_altqp_send_wqe_cache_destructor(): 375 * Destructor for send WQE kmem cache for alternate QPs 376 */ 377 /* ARGSUSED */ 378 void 379 ibmf_altqp_send_wqe_cache_destructor(void *buf, void *cdrarg) 380 { 381 ibmf_send_wqe_t *send_wqe = (ibmf_send_wqe_t *)buf; 382 ibmf_alt_qp_t *qp_ctx = (ibmf_alt_qp_t *)cdrarg; 383 384 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 385 ibmf_i_altqp_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "", 386 "ibmf_altqp_send_wqe_cache_destructor() enter, buf = %p, " 387 "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 388 389 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe)) 390 391 /* Free the vmem allocated for the WQE */ 392 vmem_free(qp_ctx->isq_wqe_ib_vmem, 393 (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE); 394 send_wqe->send_mem = NULL; 395 396 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 397 ibmf_i_altqp_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "", 398 "ibmf_altqp_send_wqe_cache_destructor() exit\n"); 399 } 400 401 /* 402 * ibmf_altqp_recv_wqe_cache_constructor(): 403 * Constructor for receive WQE kmem cache for alternate QPs 404 */ 405 /* ARGSUSED */ 406 int 407 ibmf_altqp_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags) 408 { 409 ibmf_recv_wqe_t *recv_wqe = (ibmf_recv_wqe_t *)buf; 410 ibmf_alt_qp_t *qp_ctx = (ibmf_alt_qp_t *)cdrarg; 411 ibmf_wqe_mgt_t *wqe_mgt; 412 413 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 414 ibmf_i_altqp_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "", 415 "ibmf_altqp_recv_wqe_cache_constructor() enter, buf = %p, " 416 "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 417 418 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe)) 419 420 /* initialize recv WQE context */ 421 recv_wqe->recv_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc( 422 qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE, 423 kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP); 424 if (recv_wqe->recv_sg_mem == NULL) { 425 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 426 ibmf_altqp_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, 427 "", "ibmf_altqp_recv_wqe_cache_constructor(): %s\n", 428 tnf_string, msg, 429 "Failed vmem allocation in recv WQE cache constructor"); 430 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 431 ibmf_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, 432 "", "ibmf_altqp_recv_wqe_cache_constructor() exit\n"); 433 return (-1); 434 } 435 436 mutex_enter(&qp_ctx->isq_wqe_mutex); 437 wqe_mgt = qp_ctx->isq_wqe_mgt_list; 438 439 /* Look for the WQE management struct that includes this address */ 440 while (wqe_mgt != NULL) { 441 mutex_enter(&wqe_mgt->wqes_mutex); 442 if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) && 443 (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem + 444 wqe_mgt->wqes_kmem_sz))) { 445 mutex_exit(&wqe_mgt->wqes_mutex); 446 break; 447 } 448 mutex_exit(&wqe_mgt->wqes_mutex); 449 wqe_mgt = wqe_mgt->wqe_mgt_next; 450 } 451 452 if (wqe_mgt == NULL) { 453 mutex_exit(&qp_ctx->isq_wqe_mutex); 454 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 455 ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "", 456 "ibmf_altqp_recv_wqe_cache_constructor(): %s\n", 457 tnf_string, msg, "Address not found in WQE mgt list"); 458 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 459 ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 460 "ibmf_recv_wqe_cache_constructor() exit\n"); 461 return (-1); 462 } 463 464 mutex_enter(&wqe_mgt->wqes_mutex); 465 466 recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem + 467 (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem)); 468 bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE); 469 recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey; 470 recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 471 recv_wqe->recv_wqe_flags = 0; 472 473 mutex_exit(&wqe_mgt->wqes_mutex); 474 mutex_exit(&qp_ctx->isq_wqe_mutex); 475 476 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 477 ibmf_i_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "", 478 "ibmf_altqp_recv_wqe_cache_constructor() exit\n"); 479 480 return (0); 481 } 482 483 /* 484 * ibmf_altqp_recv_wqe_cache_destructor(): 485 * Destructor for receive WQE kmem cache for alternate QPs 486 */ 487 /* ARGSUSED */ 488 void 489 ibmf_altqp_recv_wqe_cache_destructor(void *buf, void *cdrarg) 490 { 491 ibmf_recv_wqe_t *recv_wqe = (ibmf_recv_wqe_t *)buf; 492 ibmf_alt_qp_t *qp_ctx = (ibmf_alt_qp_t *)cdrarg; 493 494 IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4, 495 ibmf_i_altqp_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "", 496 "ibmf_altqp_recv_wqe_cache_destructor() enter, buf = %p, " 497 "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg); 498 499 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe)) 500 501 /* Free the vmem allocated for the WQE */ 502 vmem_free(qp_ctx->isq_wqe_ib_vmem, 503 (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE); 504 recv_wqe->recv_mem = NULL; 505 506 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 507 ibmf_i_altqp_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "", 508 "ibmf_altqp_recv_wqe_cache_destructor() exit\n"); 509 } 510 511 /* 512 * ibmf_i_init_wqes(): 513 * Create the kmem cache for send and receive WQEs 514 */ 515 int 516 ibmf_i_init_wqes(ibmf_ci_t *cip) 517 { 518 ibt_status_t status; 519 ibt_mr_hdl_t mem_hdl; 520 ibt_mr_desc_t mem_desc; 521 ibt_mr_attr_t mem_attr; 522 ibmf_wqe_mgt_t *wqe_mgtp; 523 char string[128]; 524 525 IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_start, 526 IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() enter, cip = %p\n", 527 tnf_opaque, cip, cip); 528 529 /* 530 * Allocate memory for the WQE management structure 531 */ 532 wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP); 533 mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL); 534 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp)) 535 536 /* 537 * Allocate memory for the WQEs to be used by the special QPs on this CI 538 * There are two special QPs per CI port 539 */ 540 wqe_mgtp->wqes_kmem_sz = cip->ci_nports * 2 * 541 ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) + 542 (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port)); 543 wqe_mgtp->wqes_kmem = 544 kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP); 545 546 mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem; 547 mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz; 548 mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE; 549 mem_attr.mr_as = NULL; 550 551 /* Register the allocated memory */ 552 status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd, &mem_attr, 553 &mem_hdl, &mem_desc); 554 if (status != IBT_SUCCESS) { 555 kmem_free(wqe_mgtp->wqes_kmem, 556 wqe_mgtp->wqes_kmem_sz); 557 IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1, 558 ibmf_i_init_wqes_err, IBMF_TNF_ERROR, "", 559 "ibmf_i_init_wqes(): %s, status = %d\n", tnf_string, msg, 560 "register of WQE mem failed", tnf_uint, status, status); 561 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 562 ibmf_i_init_wqes_end, IBMF_TNF_TRACE, "", 563 "ibmf_i_init_wqes() exit\n"); 564 return (IBMF_NO_RESOURCES); 565 } 566 567 /* Store the memory registration information */ 568 wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr; 569 wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey; 570 wqe_mgtp->wqes_ib_mem_hdl = mem_hdl; 571 572 /* Create a vmem arena for the IB virtual address space */ 573 bzero(string, 128); 574 (void) sprintf(string, "ibmf_%016" PRIx64 "_wqes", cip->ci_node_guid); 575 cip->ci_wqe_ib_vmem = vmem_create(string, 576 (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz, 577 sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP); 578 579 mutex_enter(&cip->ci_wqe_mutex); 580 cip->ci_wqe_mgt_list = wqe_mgtp; 581 mutex_exit(&cip->ci_wqe_mutex); 582 583 bzero(string, 128); 584 (void) sprintf(string, "ibmf_%016" PRIx64 "_swqe", cip->ci_node_guid); 585 /* create a kmem cache for the send WQEs */ 586 cip->ci_send_wqes_cache = kmem_cache_create(string, 587 sizeof (ibmf_send_wqe_t), 0, ibmf_send_wqe_cache_constructor, 588 ibmf_send_wqe_cache_destructor, NULL, (void *)cip, NULL, 0); 589 590 bzero(string, 128); 591 (void) sprintf(string, "ibmf_%016" PRIx64 "_rwqe", cip->ci_node_guid); 592 /* create a kmem cache for the receive WQEs */ 593 cip->ci_recv_wqes_cache = kmem_cache_create(string, 594 sizeof (ibmf_recv_wqe_t), 0, ibmf_recv_wqe_cache_constructor, 595 ibmf_recv_wqe_cache_destructor, NULL, (void *)cip, NULL, 0); 596 597 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_end, 598 IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() exit\n"); 599 600 return (IBMF_SUCCESS); 601 } 602 603 /* 604 * ibmf_i_fini_wqes(): 605 * Destroy the kmem cache for send and receive WQEs 606 */ 607 void 608 ibmf_i_fini_wqes(ibmf_ci_t *cip) 609 { 610 ibmf_wqe_mgt_t *wqe_mgt; 611 ibt_mr_hdl_t wqe_ib_mem_hdl; 612 void *wqe_kmem; 613 uint64_t wqe_kmem_sz; 614 615 IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start, 616 IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, cip = %p\n", 617 tnf_opaque, cip, cip); 618 619 mutex_enter(&cip->ci_wqe_mutex); 620 621 wqe_mgt = cip->ci_wqe_mgt_list; 622 while (wqe_mgt != NULL) { 623 /* Remove the WQE mgt struct from the list */ 624 cip->ci_wqe_mgt_list = wqe_mgt->wqe_mgt_next; 625 mutex_exit(&cip->ci_wqe_mutex); 626 627 mutex_enter(&wqe_mgt->wqes_mutex); 628 wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 629 wqe_kmem = wqe_mgt->wqes_kmem; 630 wqe_kmem_sz = wqe_mgt->wqes_kmem_sz; 631 mutex_exit(&wqe_mgt->wqes_mutex); 632 633 /* Deregister the memory allocated for the WQEs */ 634 (void) ibt_deregister_mr(cip->ci_ci_handle, wqe_ib_mem_hdl); 635 636 /* Free the kmem allocated for the WQEs */ 637 kmem_free(wqe_kmem, wqe_kmem_sz); 638 639 /* Destroy the mutex */ 640 mutex_destroy(&wqe_mgt->wqes_mutex); 641 642 /* Free the WQE management structure */ 643 kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t)); 644 645 mutex_enter(&cip->ci_wqe_mutex); 646 wqe_mgt = cip->ci_wqe_mgt_list; 647 } 648 649 mutex_exit(&cip->ci_wqe_mutex); 650 651 /* Destroy the kmem_cache for the send WQE */ 652 kmem_cache_destroy(cip->ci_send_wqes_cache); 653 /* Destroy the kmem_cache for the receive WQE */ 654 kmem_cache_destroy(cip->ci_recv_wqes_cache); 655 656 /* 657 * Destroy the vmem arena for the WQEs 658 * This must be done after the kmem_cache_destroy() calls since 659 * the cache destructors call vmem_free() 660 */ 661 vmem_destroy((void *)cip->ci_wqe_ib_vmem); 662 663 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end, 664 IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n"); 665 } 666 667 /* 668 * ibmf_i_init_altqp_wqes(): 669 * Create the kmem cache for send and receive WQEs used by alternate QPs 670 */ 671 int 672 ibmf_i_init_altqp_wqes(ibmf_alt_qp_t *qp_ctx) 673 { 674 ibt_status_t status; 675 ibt_mr_hdl_t mem_hdl; 676 ibt_mr_desc_t mem_desc; 677 ibt_mr_attr_t mem_attr; 678 ibmf_wqe_mgt_t *wqe_mgtp; 679 char string[128]; 680 681 IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_start, 682 IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() enter, qp_ctx = %p\n", 683 tnf_opaque, qp, qp_ctx); 684 685 /* 686 * Allocate memory for the WQE management structure 687 */ 688 wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP); 689 mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL); 690 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp)) 691 692 /* 693 * Allocate memory for all the WQEs to be used by this alternate QP 694 */ 695 wqe_mgtp->wqes_kmem_sz = (IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) + 696 (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port); 697 wqe_mgtp->wqes_kmem = kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP); 698 699 mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem; 700 mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz; 701 mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE; 702 mem_attr.mr_as = NULL; 703 704 /* Register the allocated memory */ 705 status = ibt_register_mr(qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle, 706 qp_ctx->isq_client_hdl->ic_myci->ci_pd, &mem_attr, &mem_hdl, 707 &mem_desc); 708 if (status != IBT_SUCCESS) { 709 kmem_free(wqe_mgtp->wqes_kmem, wqe_mgtp->wqes_kmem_sz); 710 IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1, 711 ibmf_i_init_altqp_wqes_err, IBMF_TNF_ERROR, "", 712 "ibmf_i_init_altqp_wqes(): %s, status = %d\n", 713 tnf_string, msg, 714 "register of WQE mem failed", tnf_uint, status, status); 715 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 716 ibmf_i_init_altqp_wqes_end, IBMF_TNF_TRACE, "", 717 "ibmf_i_init_altqp_wqes() exit\n"); 718 return (IBMF_NO_RESOURCES); 719 } 720 721 /* Store the memory registration information */ 722 wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr; 723 wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey; 724 wqe_mgtp->wqes_ib_mem_hdl = mem_hdl; 725 726 /* Create a vmem arena for the IB virtual address space */ 727 bzero(string, 128); 728 (void) sprintf(string, "ibmf_%016" PRIx64 "_%x_wqes", 729 qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn); 730 qp_ctx->isq_wqe_ib_vmem = vmem_create(string, 731 (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz, 732 sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP); 733 734 bzero(string, 128); 735 /* 736 * CAUTION: Do not exceed 32 characters for the kmem cache name, else, 737 * mdb does not exit (bug 4878751). There is some connection between 738 * mdb walkers and kmem_caches with the limitation likely to be in the 739 * mdb code. 740 */ 741 (void) sprintf(string, "ibmf%016" PRIx64 "_%xs", 742 qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn); 743 /* create a kmem cache for the send WQEs */ 744 qp_ctx->isq_send_wqes_cache = kmem_cache_create(string, 745 sizeof (ibmf_send_wqe_t), 0, ibmf_altqp_send_wqe_cache_constructor, 746 ibmf_altqp_send_wqe_cache_destructor, NULL, (void *)qp_ctx, 747 NULL, 0); 748 749 bzero(string, 128); 750 (void) sprintf(string, "ibmf%016" PRIx64 "_%xr", 751 qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn); 752 /* create a kmem cache for the receive WQEs */ 753 qp_ctx->isq_recv_wqes_cache = kmem_cache_create(string, 754 sizeof (ibmf_recv_wqe_t), 0, ibmf_altqp_recv_wqe_cache_constructor, 755 ibmf_altqp_recv_wqe_cache_destructor, NULL, (void *)qp_ctx, 756 NULL, 0); 757 758 mutex_enter(&qp_ctx->isq_wqe_mutex); 759 qp_ctx->isq_wqe_mgt_list = wqe_mgtp; 760 mutex_exit(&qp_ctx->isq_wqe_mutex); 761 762 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_end, 763 IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() exit\n"); 764 765 return (IBMF_SUCCESS); 766 } 767 768 /* 769 * ibmf_i_fini_altqp_wqes(): 770 * Destroy the kmem cache for send and receive WQEs for alternate QPs 771 */ 772 void 773 ibmf_i_fini_altqp_wqes(ibmf_alt_qp_t *qp_ctx) 774 { 775 ibmf_wqe_mgt_t *wqe_mgt; 776 ibt_mr_hdl_t wqe_ib_mem_hdl; 777 void *wqe_kmem; 778 uint64_t wqe_kmem_sz; 779 780 IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start, 781 IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, qp_ctx = %p\n", 782 tnf_opaque, qp, qp_ctx); 783 784 mutex_enter(&qp_ctx->isq_wqe_mutex); 785 wqe_mgt = qp_ctx->isq_wqe_mgt_list; 786 while (wqe_mgt != NULL) { 787 /* Remove the WQE mgt struct from the list */ 788 qp_ctx->isq_wqe_mgt_list = wqe_mgt->wqe_mgt_next; 789 mutex_exit(&qp_ctx->isq_wqe_mutex); 790 791 mutex_enter(&wqe_mgt->wqes_mutex); 792 wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl; 793 wqe_kmem = wqe_mgt->wqes_kmem; 794 wqe_kmem_sz = wqe_mgt->wqes_kmem_sz; 795 mutex_exit(&wqe_mgt->wqes_mutex); 796 797 /* Deregister the memory allocated for the WQEs */ 798 (void) ibt_deregister_mr( 799 qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle, 800 wqe_ib_mem_hdl); 801 802 /* Free the kmem allocated for the WQEs */ 803 kmem_free(wqe_kmem, wqe_kmem_sz); 804 805 /* Destroy the WQE mgt struct mutex */ 806 mutex_destroy(&wqe_mgt->wqes_mutex); 807 808 /* Free the WQE management structure */ 809 kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t)); 810 811 mutex_enter(&qp_ctx->isq_wqe_mutex); 812 wqe_mgt = qp_ctx->isq_wqe_mgt_list; 813 } 814 815 mutex_exit(&qp_ctx->isq_wqe_mutex); 816 817 /* Destroy the kmem_cache for the send WQE */ 818 kmem_cache_destroy(qp_ctx->isq_send_wqes_cache); 819 /* Destroy the kmem_cache for the receive WQE */ 820 kmem_cache_destroy(qp_ctx->isq_recv_wqes_cache); 821 822 /* 823 * Destroy the vmem arena for the WQEs 824 * This must be done after the kmem_cache_destroy() calls since 825 * the cache destructors call vmem_free() 826 */ 827 vmem_destroy((void *)qp_ctx->isq_wqe_ib_vmem); 828 829 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end, 830 IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n"); 831 } 832 833 /* 834 * ibmf_i_init_send_wqe(): 835 * Initialize a send WQE 836 */ 837 /* ARGSUSED */ 838 void 839 ibmf_i_init_send_wqe(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp, 840 ibt_wr_ds_t *sglp, ibmf_send_wqe_t *wqep, ibt_ud_dest_hdl_t ud_dest, 841 ibt_qp_hdl_t ibt_qp_handle, ibmf_qp_handle_t ibmf_qp_handle) 842 { 843 ibmf_msg_bufs_t *ipbufs = &msgimplp->im_msgbufs_send; 844 ibmf_msg_bufs_t *hdr_ipbufs; 845 ib_mad_hdr_t *ibmadhdrp; 846 ibmf_rmpp_ctx_t *rmpp_ctx = &msgimplp->im_rmpp_ctx; 847 ibmf_rmpp_hdr_t *rmpp_hdr; 848 ibt_send_wr_t *swrp; 849 uchar_t *buf; 850 size_t data_sz, offset; 851 uint32_t cl_hdr_sz, cl_hdr_off; 852 853 IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_start, 854 IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() enter, " 855 "clientp = %p, msg = %p, sglp = %p , wqep = %p, qp_hdl = %p\n", 856 tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp, 857 tnf_opaque, sglp, sglp, tnf_opaque, wqep, wqep, 858 tnf_opaque, qp_hdl, ibmf_qp_handle); 859 860 _NOTE(ASSUMING_PROTECTED(*wqep)) 861 _NOTE(ASSUMING_PROTECTED(*sglp)) 862 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrp)) 863 864 swrp = &wqep->send_wr; 865 /* use send wqe pointer as the WR ID */ 866 IBMF_ADDR_TO_SEND_WR_ID(wqep, swrp->wr_id); 867 ASSERT(swrp->wr_id != NULL); 868 swrp->wr_flags = IBT_WR_NO_FLAGS; 869 swrp->wr_opcode = IBT_WRC_SEND; 870 swrp->wr_trans = IBT_UD_SRV; 871 wqep->send_client = clientp; 872 wqep->send_msg = msgimplp; 873 874 IBMF_INIT_SG_ELEMENT(sglp[0], wqep->send_mem, wqep->send_sg_lkey, 875 IBMF_MAD_SIZE); 876 877 bzero(wqep->send_mem, IBMF_MAD_SIZE); 878 if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) { 879 buf = (uchar_t *)ipbufs->im_bufs_cl_data + 880 (rmpp_ctx->rmpp_ns - 1) * rmpp_ctx->rmpp_pkt_data_sz; 881 data_sz = (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts) ? 882 rmpp_ctx->rmpp_last_pkt_sz : rmpp_ctx->rmpp_pkt_data_sz; 883 } else { 884 buf = ipbufs->im_bufs_cl_data; 885 data_sz = ipbufs->im_bufs_cl_data_len; 886 } 887 888 /* 889 * We pick the correct msgbuf based on the nature of the transaction. 890 * Where the send msgbuf is available, we pick it to provide the 891 * context of the outgoing MAD. Note that if this is a termination 892 * context, then the send buffer is invalid even if the sequenced 893 * flags is set because the termination message only has a receive 894 * buffer set up. 895 */ 896 if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEQUENCED) && 897 ((msgimplp->im_flags & IBMF_MSG_FLAGS_TERMINATION) == 0)) { 898 hdr_ipbufs = &msgimplp->im_msgbufs_send; 899 } else if (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP) { 900 hdr_ipbufs = &msgimplp->im_msgbufs_recv; 901 } else if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) { 902 hdr_ipbufs = &msgimplp->im_msgbufs_send; 903 } else { 904 if (msgimplp->im_unsolicited == B_TRUE) { 905 hdr_ipbufs = &msgimplp->im_msgbufs_recv; 906 } else { 907 hdr_ipbufs = &msgimplp->im_msgbufs_send; 908 } 909 } 910 911 bcopy((void *)hdr_ipbufs->im_bufs_mad_hdr, 912 (void *)wqep->send_mem, sizeof (ib_mad_hdr_t)); 913 914 /* 915 * For unsolicited messages, we only have the sender's MAD at hand. 916 * So, we must flip the response bit in the method for the outgoing MAD. 917 */ 918 ibmadhdrp = (ib_mad_hdr_t *)wqep->send_mem; 919 if (msgimplp->im_unsolicited == B_TRUE) { 920 ibmadhdrp->R_Method = IBMF_FLIP_RESP_BIT(ibmadhdrp->R_Method); 921 } 922 923 offset = sizeof (ib_mad_hdr_t); 924 925 if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) || 926 (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP)) { 927 928 rmpp_hdr = (ibmf_rmpp_hdr_t *) 929 ((uintptr_t)wqep->send_mem + offset); 930 931 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rmpp_hdr)); 932 933 IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe, 934 IBMF_TNF_TRACE, "", 935 "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d," 936 " next_seg = %d, num_pkts = %d\n", 937 tnf_opaque, msgimplp, msgimplp, 938 tnf_opaque, rmpp_type, rmpp_ctx->rmpp_type, 939 tnf_opaque, next_seg, rmpp_ctx->rmpp_ns, 940 tnf_opaque, num_pkts, rmpp_ctx->rmpp_num_pkts); 941 942 /* 943 * Initialize the RMPP header 944 */ 945 rmpp_ctx->rmpp_flags = IBMF_RMPP_FLAGS_ACTIVE; 946 947 /* first, last packet flags set only for type DATA */ 948 if (rmpp_ctx->rmpp_type == IBMF_RMPP_TYPE_DATA) { 949 950 if (rmpp_ctx->rmpp_ns == 1) 951 rmpp_ctx->rmpp_flags |= 952 IBMF_RMPP_FLAGS_FIRST_PKT; 953 else 954 rmpp_ctx->rmpp_respt = IBMF_RMPP_DEFAULT_RRESPT; 955 956 if (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts) 957 rmpp_ctx->rmpp_flags |= 958 IBMF_RMPP_FLAGS_LAST_PKT; 959 } else { 960 data_sz = 0; 961 rmpp_ctx->rmpp_respt = IBMF_RMPP_TERM_RRESPT; 962 } 963 964 IBMF_INIT_RMPP_HDR(rmpp_hdr, 965 IBMF_RMPP_VERSION, rmpp_ctx->rmpp_type, 966 rmpp_ctx->rmpp_respt, rmpp_ctx->rmpp_flags, 967 rmpp_ctx->rmpp_status, rmpp_ctx->rmpp_word3, 968 rmpp_ctx->rmpp_word4) 969 970 IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe, 971 IBMF_TNF_TRACE, "", 972 "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d," 973 " rmpp_flags = 0x%x, rmpp_segnum = %d, pyld_nwl = %d\n", 974 tnf_opaque, msgimplp, msgimplp, 975 tnf_opaque, rmpp_type, rmpp_hdr->rmpp_type, 976 tnf_opaque, rmpp_flags, rmpp_hdr->rmpp_flags, 977 tnf_opaque, rmpp_segnum, b2h32(rmpp_hdr->rmpp_segnum), 978 tnf_opaque, pyld_nwl, b2h32(rmpp_hdr->rmpp_pyldlen_nwl)); 979 980 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(rmpp_hdr)); 981 } 982 983 /* determine offset to start class header */ 984 ibmf_i_mgt_class_to_hdr_sz_off( 985 hdr_ipbufs->im_bufs_mad_hdr->MgmtClass, 986 &cl_hdr_sz, &cl_hdr_off); 987 offset += cl_hdr_off; 988 if (hdr_ipbufs->im_bufs_cl_hdr != NULL) { 989 bcopy((void *)hdr_ipbufs->im_bufs_cl_hdr, 990 (void *)((uintptr_t)wqep->send_mem + offset), 991 hdr_ipbufs->im_bufs_cl_hdr_len); 992 offset += hdr_ipbufs->im_bufs_cl_hdr_len; 993 } 994 bcopy((void *)buf, (void *)((uintptr_t)wqep->send_mem + offset), 995 data_sz); 996 swrp->wr_sgl = sglp; 997 swrp->wr_nds = 1; 998 swrp->wr.ud.udwr_dest = ud_dest; 999 wqep->send_port_num = clientp->ic_client_info.port_num; 1000 wqep->send_qp_handle = ibt_qp_handle; 1001 wqep->send_ibmf_qp_handle = ibmf_qp_handle; 1002 1003 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*swrp)) 1004 1005 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_end, 1006 IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() exit\n"); 1007 } 1008 1009 /* 1010 * ibmf_i_init_recv_wqe(): 1011 * Initialize a receive WQE 1012 */ 1013 void 1014 ibmf_i_init_recv_wqe(ibmf_qp_t *qpp, ibt_wr_ds_t *sglp, 1015 ibmf_recv_wqe_t *wqep, ibt_qp_hdl_t ibt_qp_handle, 1016 ibmf_qp_handle_t ibmf_qp_handle) 1017 { 1018 ibt_recv_wr_t *rwrp; 1019 1020 IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_start, 1021 IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() enter, " 1022 "qpp = %p, sglp = %p , wqep = %p, ud_dest = %p, qp_hdl = %p\n", 1023 tnf_opaque, qpp, qpp, tnf_opaque, sglp, sglp, tnf_opaque, 1024 wqep, wqep, tnf_opaque, qp_hdl, ibmf_qp_handle); 1025 1026 _NOTE(ASSUMING_PROTECTED(*wqep)) 1027 _NOTE(ASSUMING_PROTECTED(*sglp)) 1028 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rwrp)) 1029 1030 rwrp = &wqep->recv_wr; 1031 1032 /* 1033 * we set a bit in the WR ID to be able to easily distinguish 1034 * between send completions and recv completions 1035 */ 1036 IBMF_ADDR_TO_RECV_WR_ID(wqep, rwrp->wr_id); 1037 1038 IBMF_INIT_SG_ELEMENT(sglp[0], wqep->recv_mem, wqep->recv_sg_lkey, 1039 sizeof (ib_grh_t) + IBMF_MAD_SIZE); 1040 1041 rwrp->wr_sgl = sglp; 1042 rwrp->wr_nds = IBMF_MAX_RQ_WR_SGL_ELEMENTS; 1043 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) { 1044 wqep->recv_port_num = qpp->iq_port_num; 1045 } else { 1046 ibmf_alt_qp_t *altqp = (ibmf_alt_qp_t *)ibmf_qp_handle; 1047 wqep->recv_port_num = altqp->isq_port_num; 1048 } 1049 wqep->recv_qpp = qpp; 1050 wqep->recv_qp_handle = ibt_qp_handle; 1051 wqep->recv_ibmf_qp_handle = ibmf_qp_handle; 1052 1053 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*rwrp)) 1054 1055 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_end, 1056 IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() exit\n"); 1057 } 1058 1059 /* 1060 * ibmf_i_extend_wqe_cache(): 1061 * Extend the kmem WQE cache 1062 */ 1063 int 1064 ibmf_i_extend_wqe_cache(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle, 1065 boolean_t block) 1066 { 1067 ibmf_wqe_mgt_t *wqe_mgt; 1068 1069 IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4, 1070 ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "", 1071 "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p, " 1072 " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, qp_hdl, 1073 ibmf_qp_handle, tnf_uint, block, block); 1074 1075 /* 1076 * Allocate memory for the WQE management structure 1077 */ 1078 wqe_mgt = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), 1079 (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP)); 1080 if (wqe_mgt == NULL) { 1081 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1082 ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "", 1083 "ibmf_i_extend_wqe_cache(): %s\n", 1084 tnf_string, msg, "wqe mgt alloc failed"); 1085 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1086 ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "", 1087 "ibmf_i_extend_wqe_cache() exit\n"); 1088 return (IBMF_NO_RESOURCES); 1089 } 1090 mutex_init(&wqe_mgt->wqes_mutex, NULL, MUTEX_DRIVER, NULL); 1091 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt)) 1092 1093 /* Allocate and register more WQE memory */ 1094 if (ibmf_i_extend_wqe_mem(cip, ibmf_qp_handle, wqe_mgt, 1095 block) != IBMF_SUCCESS) { 1096 mutex_destroy(&wqe_mgt->wqes_mutex); 1097 kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t)); 1098 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1099 ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "", 1100 "ibmf_i_extend_wqe_cache(): %s\n", 1101 tnf_string, msg, "extension of WQE pool failed"); 1102 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1103 ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "", 1104 "ibmf_i_extend_wqe_cache() exit\n"); 1105 return (IBMF_NO_RESOURCES); 1106 } 1107 1108 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1109 ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "", 1110 "ibmf_i_extend_wqe_cache() exit\n"); 1111 1112 return (IBMF_SUCCESS); 1113 } 1114 1115 /* 1116 * ibmf_i_extend_wqe_mem(): 1117 * Allocate and register more WQE memory, and expand the VMEM arena 1118 */ 1119 static int 1120 ibmf_i_extend_wqe_mem(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle, 1121 ibmf_wqe_mgt_t *wqe_mgt, boolean_t block) 1122 { 1123 ibt_status_t status; 1124 ibt_mr_hdl_t mem_hdl; 1125 ibt_mr_desc_t mem_desc; 1126 ibt_mr_attr_t mem_attr; 1127 ibmf_alt_qp_t *qp_ctx; 1128 ibmf_wqe_mgt_t *pwqe_mgt; 1129 vmem_t *wqe_vmem_arena; 1130 1131 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt)) 1132 1133 IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, 1134 ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "", 1135 "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p" 1136 "wqe_mgt = %p, block = %d\n", 1137 tnf_opaque, cip, cip, tnf_opaque, qp_hdl, ibmf_qp_handle, 1138 tnf_opaque, wqe_mgt, wqe_mgt, tnf_uint, block, block); 1139 1140 /* 1141 * Allocate more memory for the WQEs to be used by the 1142 * specified QP 1143 */ 1144 wqe_mgt->wqes_kmem_sz = cip->ci_nports * 2 * 1145 ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) + 1146 (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port)); 1147 wqe_mgt->wqes_kmem = kmem_zalloc(wqe_mgt->wqes_kmem_sz, 1148 (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP)); 1149 if (wqe_mgt->wqes_kmem == NULL) { 1150 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1151 ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "", 1152 "ibmf_i_extend_wqe_mem(): %s\n", 1153 tnf_string, msg, "extension of WQE pool failed"); 1154 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1155 ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "", 1156 "ibmf_i_extend_wqe_mem() exit\n"); 1157 return (IBMF_NO_RESOURCES); 1158 } 1159 1160 mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgt->wqes_kmem; 1161 mem_attr.mr_len = wqe_mgt->wqes_kmem_sz; 1162 mem_attr.mr_flags = (block == B_TRUE ? IBT_MR_SLEEP : IBT_MR_NOSLEEP) 1163 | IBT_MR_ENABLE_LOCAL_WRITE; 1164 mem_attr.mr_as = NULL; 1165 1166 /* Register the allocated memory */ 1167 status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd, 1168 &mem_attr, &mem_hdl, &mem_desc); 1169 if (status != IBT_SUCCESS) { 1170 kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz); 1171 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1172 ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "", 1173 "ibmf_i_extend_wqe_mem(): %s\n", 1174 tnf_string, msg, "wqe extension MR failed"); 1175 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1176 ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "", 1177 "ibmf_i_extend_wqe_mem() exit\n"); 1178 return (IBMF_NO_RESOURCES); 1179 } 1180 1181 /* Store the memory registration information */ 1182 wqe_mgt->wqes_ib_mem = mem_desc.md_vaddr; 1183 wqe_mgt->wqes_ib_lkey = mem_desc.md_lkey; 1184 wqe_mgt->wqes_ib_mem_hdl = mem_hdl; 1185 1186 /* Get the VMEM arena based on the QP type */ 1187 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) { 1188 wqe_vmem_arena = cip->ci_wqe_ib_vmem; 1189 } else { 1190 qp_ctx = (ibmf_alt_qp_t *)ibmf_qp_handle; 1191 wqe_vmem_arena = qp_ctx->isq_wqe_ib_vmem; 1192 } 1193 1194 /* Add these addresses to the vmem arena */ 1195 if (vmem_add(wqe_vmem_arena, (void *)(uintptr_t)wqe_mgt->wqes_ib_mem, 1196 wqe_mgt->wqes_kmem_sz, 1197 (block == B_TRUE ? VM_SLEEP : VM_NOSLEEP)) == NULL) { 1198 (void) ibt_deregister_mr(cip->ci_ci_handle, 1199 wqe_mgt->wqes_ib_mem_hdl); 1200 kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz); 1201 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1202 ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "", 1203 "ibmf_i_extend_wqe_mem(): %s\n", 1204 tnf_string, msg, "wqe extension vmem_add failed"); 1205 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1206 ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "", 1207 "ibmf_i_extend_wqe_mem() exit\n"); 1208 return (IBMF_NO_RESOURCES); 1209 } 1210 1211 /* Get the WQE management pointers based on the QP type */ 1212 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) { 1213 mutex_enter(&cip->ci_wqe_mutex); 1214 pwqe_mgt = cip->ci_wqe_mgt_list; 1215 1216 /* Add the new wqe management struct to the end of the list */ 1217 while (pwqe_mgt->wqe_mgt_next != NULL) 1218 pwqe_mgt = pwqe_mgt->wqe_mgt_next; 1219 pwqe_mgt->wqe_mgt_next = wqe_mgt; 1220 1221 mutex_exit(&cip->ci_wqe_mutex); 1222 } else { 1223 mutex_enter(&qp_ctx->isq_wqe_mutex); 1224 pwqe_mgt = qp_ctx->isq_wqe_mgt_list; 1225 1226 /* Add the new wqe management struct to the end of the list */ 1227 while (pwqe_mgt->wqe_mgt_next != NULL) 1228 pwqe_mgt = pwqe_mgt->wqe_mgt_next; 1229 pwqe_mgt->wqe_mgt_next = wqe_mgt; 1230 1231 mutex_exit(&qp_ctx->isq_wqe_mutex); 1232 } 1233 1234 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_extend_wqe_mem_end, 1235 IBMF_TNF_TRACE, "", "ibmf_i_extend_wqe_mem() exit\n"); 1236 1237 return (IBMF_SUCCESS); 1238 } 1239 1240 /* 1241 * ibmf_i_alloc_send_resources(): 1242 * Allocate send resources (the send WQE) 1243 */ 1244 int 1245 ibmf_i_alloc_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp, 1246 boolean_t block, ibmf_send_wqe_t **swqepp) 1247 { 1248 ibmf_send_wqe_t *send_wqep; 1249 struct kmem_cache *kmem_cachep; 1250 ibmf_qp_handle_t ibmf_qp_handle = msgimplp->im_qp_hdl; 1251 ibmf_alt_qp_t *altqp; 1252 1253 IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4, 1254 ibmf_i_alloc_send_resources_start, IBMF_TNF_TRACE, "", 1255 "ibmf_i_alloc_send_resources() enter, cip = %p, msg = %p, " 1256 " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, msg, 1257 msgimplp, tnf_uint, block, block); 1258 1259 /* Get the WQE kmem cache pointer based on the QP type */ 1260 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) 1261 kmem_cachep = cip->ci_send_wqes_cache; 1262 else { 1263 altqp = (ibmf_alt_qp_t *)ibmf_qp_handle; 1264 kmem_cachep = altqp->isq_send_wqes_cache; 1265 } 1266 1267 /* 1268 * Allocate a send WQE from the send WQE kmem cache 1269 * Do not block here as we are holding the msgimpl mutex. 1270 */ 1271 send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 1272 if (send_wqep == NULL) { 1273 /* 1274 * Attempt to extend the cache and then retry the 1275 * kmem_cache_alloc() 1276 * The block argument (third) is set to B_FALSE. 1277 */ 1278 if (ibmf_i_extend_wqe_cache(cip, ibmf_qp_handle, B_FALSE) == 1279 IBMF_NO_RESOURCES) { 1280 mutex_enter(&cip->ci_mutex); 1281 IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed, 1); 1282 mutex_exit(&cip->ci_mutex); 1283 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1284 ibmf_i_alloc_send_resources_err, IBMF_TNF_ERROR, "", 1285 "ibmf_i_alloc_send_resources(): %s\n", 1286 tnf_string, msg, "alloc send_wqe failed"); 1287 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1288 ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "", 1289 "ibmf_i_alloc_send_resources() exit\n"); 1290 return (IBMF_NO_RESOURCES); 1291 } else { 1292 send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 1293 if (send_wqep == NULL) { 1294 /* Allocation failed again. Give up here. */ 1295 mutex_enter(&cip->ci_mutex); 1296 IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed, 1297 1); 1298 mutex_exit(&cip->ci_mutex); 1299 IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1, 1300 ibmf_i_alloc_send_resources_err, 1301 IBMF_TNF_ERROR, "", 1302 "ibmf_i_alloc_send_resources(): %s\n", 1303 tnf_string, msg, "alloc send_wqe failed"); 1304 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1305 ibmf_i_alloc_send_resources_end, 1306 IBMF_TNF_TRACE, "", 1307 "ibmf_i_alloc_send_resources() exit\n"); 1308 return (IBMF_NO_RESOURCES); 1309 } 1310 } 1311 } 1312 1313 mutex_enter(&cip->ci_mutex); 1314 IBMF_ADD32_PORT_KSTATS(cip, send_wqes_alloced, 1); 1315 mutex_exit(&cip->ci_mutex); 1316 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) { 1317 mutex_enter(&cip->ci_mutex); 1318 cip->ci_wqes_alloced++; 1319 mutex_exit(&cip->ci_mutex); 1320 } else { 1321 mutex_enter(&altqp->isq_mutex); 1322 altqp->isq_wqes_alloced++; 1323 mutex_exit(&altqp->isq_mutex); 1324 } 1325 1326 send_wqep->send_msg = msgimplp; 1327 *swqepp = send_wqep; 1328 1329 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1330 ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "", 1331 "ibmf_i_alloc_send_resources() exit\n"); 1332 1333 return (IBMF_SUCCESS); 1334 } 1335 1336 /* 1337 * ibmf_i_free_send_resources(): 1338 * Free send resources (just the send WQE) 1339 */ 1340 /* ARGSUSED */ 1341 void 1342 ibmf_i_free_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp, 1343 ibmf_send_wqe_t *swqep) 1344 { 1345 struct kmem_cache *kmem_cachep; 1346 ibmf_qp_handle_t ibmf_qp_handle = msgimplp->im_qp_hdl; 1347 ibmf_alt_qp_t *altqp; 1348 1349 IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4, 1350 ibmf_i_free_send_resources_start, IBMF_TNF_TRACE, "", 1351 "ibmf_i_free_send_resources() enter, cip = %p, msg = %p, " 1352 " swqep = %p\n", tnf_opaque, cip, cip, tnf_opaque, msg, 1353 msgimplp, tnf_opaque, swqep, swqep); 1354 1355 /* Get the WQE kmem cache pointer based on the QP type */ 1356 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) 1357 kmem_cachep = cip->ci_send_wqes_cache; 1358 else { 1359 altqp = (ibmf_alt_qp_t *)ibmf_qp_handle; 1360 kmem_cachep = altqp->isq_send_wqes_cache; 1361 } 1362 1363 /* return the send WQE to the kmem cache */ 1364 kmem_cache_free(kmem_cachep, swqep); 1365 1366 mutex_enter(&cip->ci_mutex); 1367 IBMF_SUB32_PORT_KSTATS(cip, send_wqes_alloced, 1); 1368 mutex_exit(&cip->ci_mutex); 1369 if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) { 1370 mutex_enter(&cip->ci_mutex); 1371 cip->ci_wqes_alloced--; 1372 if (cip->ci_wqes_alloced == 0) 1373 cv_signal(&cip->ci_wqes_cv); 1374 mutex_exit(&cip->ci_mutex); 1375 } else { 1376 mutex_enter(&altqp->isq_mutex); 1377 altqp->isq_wqes_alloced--; 1378 if (altqp->isq_wqes_alloced == 0) 1379 cv_signal(&altqp->isq_wqes_cv); 1380 mutex_exit(&altqp->isq_mutex); 1381 } 1382 1383 IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, 1384 ibmf_i_free_send_resources_end, IBMF_TNF_TRACE, "", 1385 "ibmf_i_free_send_resources() exit\n"); 1386 } 1387