1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * hermon_rsrc.c 29 * Hermon Resource Management Routines 30 * 31 * Implements all the routines necessary for setup, teardown, and 32 * alloc/free of all Hermon resources, including those that are managed 33 * by Hermon hardware or which live in Hermon's direct attached DDR memory. 34 */ 35 36 #include <sys/types.h> 37 #include <sys/conf.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/modctl.h> 41 #include <sys/vmem.h> 42 #include <sys/bitmap.h> 43 44 #include <sys/ib/adapters/hermon/hermon.h> 45 46 int hermon_rsrc_verbose = 0; 47 48 /* 49 * The following routines are used for initializing and destroying 50 * the resource pools used by the Hermon resource allocation routines. 51 * They consist of four classes of object: 52 * 53 * Mailboxes: The "In" and "Out" mailbox types are used by the Hermon 54 * command interface routines. Mailboxes are used to pass information 55 * back and forth to the Hermon firmware. Either type of mailbox may 56 * be allocated from Hermon's direct attached DDR memory or from system 57 * memory (although currently all "In" mailboxes are in DDR and all "out" 58 * mailboxes come from system memory. 59 * 60 * HW entry objects: These objects represent resources required by the Hermon 61 * hardware. These objects include things like Queue Pair contexts (QPC), 62 * Completion Queue contexts (CQC), Event Queue contexts (EQC), RDB (for 63 * supporting RDMA Read/Atomic), Multicast Group entries (MCG), Memory 64 * Protection Table entries (MPT), Memory Translation Table entries (MTT). 65 * 66 * What these objects all have in common is that they are each required 67 * to come from ICM memory, they are always allocated from tables, and 68 * they are not to be directly accessed (read or written) by driver 69 * software (Mellanox FMR access to MPT is an exception). 70 * The other notable exceptions are the UAR pages (UAR_PG) which are 71 * allocated from the UAR address space rather than DDR, and the UD 72 * address vectors (UDAV) which are similar to the common object types 73 * with the major difference being that UDAVs _are_ directly read and 74 * written by driver software. 75 * 76 * SW handle objects: These objects represent resources required by Hermon 77 * driver software. They are primarily software tracking structures, 78 * which are allocated from system memory (using kmem_cache). Several of 79 * the objects have both a "constructor" and "destructor" method 80 * associated with them (see below). 81 * 82 * Protection Domain (PD) handle objects: These objects are very much like 83 * a SW handle object with the notable difference that all PD handle 84 * objects have an actual Protection Domain number (PD) associated with 85 * them (and the PD number is allocated/managed through a separate 86 * vmem_arena specifically set aside for this purpose. 87 */ 88 89 static int hermon_rsrc_mbox_init(hermon_state_t *state, 90 hermon_rsrc_mbox_info_t *info); 91 static void hermon_rsrc_mbox_fini(hermon_state_t *state, 92 hermon_rsrc_mbox_info_t *info); 93 94 static int hermon_rsrc_sw_handles_init(hermon_state_t *state, 95 hermon_rsrc_sw_hdl_info_t *info); 96 static void hermon_rsrc_sw_handles_fini(hermon_state_t *state, 97 hermon_rsrc_sw_hdl_info_t *info); 98 99 static int hermon_rsrc_pd_handles_init(hermon_state_t *state, 100 hermon_rsrc_sw_hdl_info_t *info); 101 static void hermon_rsrc_pd_handles_fini(hermon_state_t *state, 102 hermon_rsrc_sw_hdl_info_t *info); 103 104 /* 105 * The following routines are used for allocating and freeing the specific 106 * types of objects described above from their associated resource pools. 107 */ 108 static int hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, 109 uint_t num, hermon_rsrc_t *hdl); 110 static void hermon_rsrc_mbox_free(hermon_rsrc_pool_info_t *pool_info, 111 hermon_rsrc_t *hdl); 112 113 static int hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, 114 uint_t num, uint_t num_align, ddi_acc_handle_t acc_handle, 115 uint_t sleepflag, hermon_rsrc_t *hdl); 116 static void hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info, 117 hermon_rsrc_t *hdl); 118 119 static int hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, 120 uint_t num, hermon_rsrc_t *hdl); 121 static int hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info, 122 hermon_rsrc_t *hdl); 123 124 static int hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, 125 uint_t sleepflag, hermon_rsrc_t *hdl); 126 static void hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, 127 hermon_rsrc_t *hdl); 128 129 static int hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, 130 uint_t sleepflag, hermon_rsrc_t *hdl); 131 static void hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, 132 hermon_rsrc_t *hdl); 133 134 /* 135 * The following routines are the constructors and destructors for several 136 * of the SW handle type objects. For certain types of SW handles objects 137 * (all of which are implemented using kmem_cache), we need to do some 138 * special field initialization (specifically, mutex_init/destroy). These 139 * routines enable that init and teardown. 140 */ 141 static int hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags); 142 static void hermon_rsrc_pdhdl_destructor(void *pd, void *state); 143 static int hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags); 144 static void hermon_rsrc_cqhdl_destructor(void *cq, void *state); 145 static int hermon_rsrc_qphdl_constructor(void *cq, void *priv, int flags); 146 static void hermon_rsrc_qphdl_destructor(void *cq, void *state); 147 static int hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags); 148 static void hermon_rsrc_srqhdl_destructor(void *srq, void *state); 149 static int hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags); 150 static void hermon_rsrc_refcnt_destructor(void *rc, void *state); 151 static int hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags); 152 static void hermon_rsrc_ahhdl_destructor(void *ah, void *state); 153 static int hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags); 154 static void hermon_rsrc_mrhdl_destructor(void *mr, void *state); 155 156 /* 157 * Special routine to calculate and return the size of a MCG object based 158 * on current driver configuration (specifically, the number of QP per MCG 159 * that has been configured. 160 */ 161 static int hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, 162 uint_t *mcg_size_shift); 163 164 165 /* 166 * hermon_rsrc_alloc() 167 * 168 * Context: Can be called from interrupt or base context. 169 * The "sleepflag" parameter is used by all object allocators to 170 * determine whether to SLEEP for resources or not. 171 */ 172 int 173 hermon_rsrc_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num, 174 uint_t sleepflag, hermon_rsrc_t **hdl) 175 { 176 hermon_rsrc_pool_info_t *rsrc_pool; 177 hermon_rsrc_t *tmp_rsrc_hdl; 178 int flag, status = DDI_FAILURE; 179 180 ASSERT(state != NULL); 181 ASSERT(hdl != NULL); 182 183 rsrc_pool = &state->hs_rsrc_hdl[rsrc]; 184 ASSERT(rsrc_pool != NULL); 185 186 /* 187 * Allocate space for the object used to track the resource handle 188 */ 189 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 190 tmp_rsrc_hdl = (hermon_rsrc_t *)kmem_cache_alloc(state->hs_rsrc_cache, 191 flag); 192 if (tmp_rsrc_hdl == NULL) { 193 return (DDI_FAILURE); 194 } 195 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl)) 196 197 /* 198 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call 199 * to know what type of resource is being freed. 200 */ 201 tmp_rsrc_hdl->rsrc_type = rsrc; 202 203 /* 204 * Depending on resource type, call the appropriate alloc routine 205 */ 206 switch (rsrc_pool->rsrc_type) { 207 case HERMON_IN_MBOX: 208 case HERMON_OUT_MBOX: 209 case HERMON_INTR_IN_MBOX: 210 case HERMON_INTR_OUT_MBOX: 211 status = hermon_rsrc_mbox_alloc(rsrc_pool, num, tmp_rsrc_hdl); 212 break; 213 214 case HERMON_QPC: 215 /* Allocate "num" contiguous/aligned QPCs for RSS */ 216 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num, 217 0, sleepflag, tmp_rsrc_hdl); 218 break; 219 220 case HERMON_CQC: 221 case HERMON_SRQC: 222 case HERMON_EQC: 223 /* 224 * Because these objects are NOT accessed by Hermon driver 225 * software, we set the acc_handle parameter to zero. 226 */ 227 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1, 0, 228 sleepflag, tmp_rsrc_hdl); 229 break; 230 231 case HERMON_DMPT: 232 /* 233 * Because these objects are sometimes accessed by Hermon 234 * driver software (FMR for MPTs), we need the acc_handle 235 * to be set. The ICM-aware code will set it for all 236 * ICM backed resources. 237 * But if they are allocated in multiples, we specify here that 238 * they must be aligned on a more restrictive boundary. 239 */ 240 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num, 241 0, sleepflag, tmp_rsrc_hdl); 242 break; 243 244 case HERMON_MCG: 245 /* 246 * Hermon MCG entries are also NOT accessed by Hermon driver 247 * software, but because MCG entries do not have the same 248 * alignnment restrictions we loosen the constraint here. 249 */ 250 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1, 0, 251 sleepflag, tmp_rsrc_hdl); 252 break; 253 254 case HERMON_MTT: 255 /* 256 * Because MTT objects are among the few HW resources that 257 * may be allocated in odd numbers, we specify a less 258 * restrictive alignment than for the above resources. 259 */ 260 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1, 261 0, sleepflag, tmp_rsrc_hdl); 262 break; 263 264 case HERMON_UARPG: 265 /* 266 * Because UAR pages are written by Hermon driver software (for 267 * doorbells), we set the acc_handle parameter to point to 268 * the ddi_acc_handle_t for the Hermon UAR memory. 269 */ 270 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1, 271 hermon_rsrc_alloc_uarhdl(state), sleepflag, tmp_rsrc_hdl); 272 break; 273 274 case HERMON_MRHDL: 275 case HERMON_EQHDL: 276 case HERMON_CQHDL: 277 case HERMON_SRQHDL: 278 case HERMON_AHHDL: 279 case HERMON_QPHDL: 280 case HERMON_REFCNT: 281 status = hermon_rsrc_swhdl_alloc(rsrc_pool, sleepflag, 282 tmp_rsrc_hdl); 283 break; 284 285 case HERMON_PDHDL: 286 status = hermon_rsrc_pdhdl_alloc(rsrc_pool, sleepflag, 287 tmp_rsrc_hdl); 288 break; 289 290 case HERMON_RDB: /* handled during HERMON_QPC */ 291 case HERMON_ALTC: /* handled during HERMON_QPC */ 292 case HERMON_AUXC: /* handled during HERMON_QPC */ 293 case HERMON_CMPT_QPC: /* handled during HERMON_QPC */ 294 case HERMON_CMPT_SRQC: /* handled during HERMON_SRQC */ 295 case HERMON_CMPT_CQC: /* handled during HERMON_CPC */ 296 case HERMON_CMPT_EQC: /* handled during HERMON_EPC */ 297 default: 298 HERMON_WARNING(state, "unexpected resource type in alloc "); 299 cmn_err(CE_WARN, "Resource type %x \n", rsrc_pool->rsrc_type); 300 break; 301 } 302 303 /* 304 * If the resource allocation failed, then free the special resource 305 * tracking structure and return failure. Otherwise return the 306 * handle for the resource tracking structure. 307 */ 308 if (status != DDI_SUCCESS) { 309 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl); 310 tmp_rsrc_hdl = NULL; 311 return (DDI_FAILURE); 312 } else { 313 *hdl = tmp_rsrc_hdl; 314 return (DDI_SUCCESS); 315 } 316 } 317 318 319 /* 320 * hermon_rsrc_free() 321 * Context: Can be called from interrupt or base context. 322 */ 323 void 324 hermon_rsrc_free(hermon_state_t *state, hermon_rsrc_t **hdl) 325 { 326 hermon_rsrc_pool_info_t *rsrc_pool; 327 328 ASSERT(state != NULL); 329 ASSERT(hdl != NULL); 330 331 rsrc_pool = &state->hs_rsrc_hdl[(*hdl)->rsrc_type]; 332 ASSERT(rsrc_pool != NULL); 333 334 /* 335 * Depending on resource type, call the appropriate free routine 336 */ 337 switch (rsrc_pool->rsrc_type) { 338 case HERMON_IN_MBOX: 339 case HERMON_OUT_MBOX: 340 case HERMON_INTR_IN_MBOX: 341 case HERMON_INTR_OUT_MBOX: 342 hermon_rsrc_mbox_free(rsrc_pool, *hdl); 343 break; 344 345 case HERMON_QPC: 346 case HERMON_CQC: 347 case HERMON_SRQC: 348 case HERMON_EQC: 349 case HERMON_DMPT: 350 case HERMON_MCG: 351 case HERMON_MTT: 352 case HERMON_UARPG: 353 hermon_rsrc_hw_entry_free(rsrc_pool, *hdl); 354 break; 355 356 case HERMON_MRHDL: 357 case HERMON_EQHDL: 358 case HERMON_CQHDL: 359 case HERMON_SRQHDL: 360 case HERMON_AHHDL: 361 case HERMON_QPHDL: 362 case HERMON_REFCNT: 363 hermon_rsrc_swhdl_free(rsrc_pool, *hdl); 364 break; 365 366 case HERMON_PDHDL: 367 hermon_rsrc_pdhdl_free(rsrc_pool, *hdl); 368 break; 369 370 case HERMON_RDB: 371 case HERMON_ALTC: 372 case HERMON_AUXC: 373 case HERMON_CMPT_QPC: 374 case HERMON_CMPT_SRQC: 375 case HERMON_CMPT_CQC: 376 case HERMON_CMPT_EQC: 377 default: 378 HERMON_WARNING(state, "unexpected resource type in free"); 379 break; 380 } 381 382 /* 383 * Free the special resource tracking structure, set the handle to 384 * NULL, and return. 385 */ 386 kmem_cache_free(state->hs_rsrc_cache, *hdl); 387 *hdl = NULL; 388 } 389 390 391 /* 392 * hermon_rsrc_init_phase1() 393 * 394 * Completes the first phase of Hermon resource/configuration init. 395 * This involves creating the kmem_cache for the "hermon_rsrc_t" 396 * structs, allocating the space for the resource pool handles, 397 * and setting up the "Out" mailboxes. 398 * 399 * When this function completes, the Hermon driver is ready to 400 * post the following commands which return information only in the 401 * "Out" mailbox: QUERY_DDR, QUERY_FW, QUERY_DEV_LIM, and QUERY_ADAPTER 402 * If any of these commands are to be posted at this time, they must be 403 * done so only when "spinning" (as the outstanding command list and 404 * EQ setup code has not yet run) 405 * 406 * Context: Only called from attach() path context 407 */ 408 int 409 hermon_rsrc_init_phase1(hermon_state_t *state) 410 { 411 hermon_rsrc_pool_info_t *rsrc_pool; 412 hermon_rsrc_mbox_info_t mbox_info; 413 hermon_rsrc_cleanup_level_t cleanup; 414 hermon_cfg_profile_t *cfgprof; 415 uint64_t num, size; 416 int status; 417 char *rsrc_name; 418 419 ASSERT(state != NULL); 420 421 /* This is where Phase 1 of resource initialization begins */ 422 cleanup = HERMON_RSRC_CLEANUP_LEVEL0; 423 424 /* Build kmem cache name from Hermon instance */ 425 rsrc_name = (char *)kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP); 426 HERMON_RSRC_NAME(rsrc_name, HERMON_RSRC_CACHE); 427 428 /* 429 * Create the kmem_cache for "hermon_rsrc_t" structures 430 * (kmem_cache_create will SLEEP until successful) 431 */ 432 state->hs_rsrc_cache = kmem_cache_create(rsrc_name, 433 sizeof (hermon_rsrc_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 434 435 /* 436 * Allocate an array of hermon_rsrc_pool_info_t's (used in all 437 * subsequent resource allocations) 438 */ 439 state->hs_rsrc_hdl = kmem_zalloc(HERMON_NUM_RESOURCES * 440 sizeof (hermon_rsrc_pool_info_t), KM_SLEEP); 441 442 /* Pull in the configuration profile */ 443 cfgprof = state->hs_cfg_profile; 444 445 /* Initialize the resource pool for "out" mailboxes */ 446 num = ((uint64_t)1 << cfgprof->cp_log_num_outmbox); 447 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size); 448 rsrc_pool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX]; 449 rsrc_pool->rsrc_type = HERMON_OUT_MBOX; 450 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 451 rsrc_pool->rsrc_pool_size = (size * num); 452 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size; 453 rsrc_pool->rsrc_quantum = (uint_t)size; 454 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN; 455 rsrc_pool->rsrc_state = state; 456 mbox_info.mbi_num = num; 457 mbox_info.mbi_size = size; 458 mbox_info.mbi_rsrcpool = rsrc_pool; 459 status = hermon_rsrc_mbox_init(state, &mbox_info); 460 if (status != DDI_SUCCESS) { 461 hermon_rsrc_fini(state, cleanup); 462 status = DDI_FAILURE; 463 goto rsrcinitp1_fail; 464 } 465 cleanup = HERMON_RSRC_CLEANUP_LEVEL1; 466 467 /* Initialize the mailbox list */ 468 status = hermon_outmbox_list_init(state); 469 if (status != DDI_SUCCESS) { 470 hermon_rsrc_fini(state, cleanup); 471 status = DDI_FAILURE; 472 goto rsrcinitp1_fail; 473 } 474 cleanup = HERMON_RSRC_CLEANUP_LEVEL2; 475 476 /* Initialize the resource pool for "interrupt out" mailboxes */ 477 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_outmbox); 478 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size); 479 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX]; 480 rsrc_pool->rsrc_type = HERMON_INTR_OUT_MBOX; 481 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 482 rsrc_pool->rsrc_pool_size = (size * num); 483 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size; 484 rsrc_pool->rsrc_quantum = (uint_t)size; 485 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN; 486 rsrc_pool->rsrc_state = state; 487 mbox_info.mbi_num = num; 488 mbox_info.mbi_size = size; 489 mbox_info.mbi_rsrcpool = rsrc_pool; 490 status = hermon_rsrc_mbox_init(state, &mbox_info); 491 if (status != DDI_SUCCESS) { 492 hermon_rsrc_fini(state, cleanup); 493 status = DDI_FAILURE; 494 goto rsrcinitp1_fail; 495 } 496 cleanup = HERMON_RSRC_CLEANUP_LEVEL3; 497 498 /* Initialize the mailbox list */ 499 status = hermon_intr_outmbox_list_init(state); 500 if (status != DDI_SUCCESS) { 501 hermon_rsrc_fini(state, cleanup); 502 status = DDI_FAILURE; 503 goto rsrcinitp1_fail; 504 } 505 cleanup = HERMON_RSRC_CLEANUP_LEVEL4; 506 507 /* Initialize the resource pool for "in" mailboxes */ 508 num = ((uint64_t)1 << cfgprof->cp_log_num_inmbox); 509 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size); 510 rsrc_pool = &state->hs_rsrc_hdl[HERMON_IN_MBOX]; 511 rsrc_pool->rsrc_type = HERMON_IN_MBOX; 512 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 513 rsrc_pool->rsrc_pool_size = (size * num); 514 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size; 515 rsrc_pool->rsrc_quantum = (uint_t)size; 516 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN; 517 rsrc_pool->rsrc_state = state; 518 mbox_info.mbi_num = num; 519 mbox_info.mbi_size = size; 520 mbox_info.mbi_rsrcpool = rsrc_pool; 521 status = hermon_rsrc_mbox_init(state, &mbox_info); 522 if (status != DDI_SUCCESS) { 523 hermon_rsrc_fini(state, cleanup); 524 status = DDI_FAILURE; 525 goto rsrcinitp1_fail; 526 } 527 cleanup = HERMON_RSRC_CLEANUP_LEVEL5; 528 529 /* Initialize the mailbox list */ 530 status = hermon_inmbox_list_init(state); 531 if (status != DDI_SUCCESS) { 532 hermon_rsrc_fini(state, cleanup); 533 status = DDI_FAILURE; 534 goto rsrcinitp1_fail; 535 } 536 cleanup = HERMON_RSRC_CLEANUP_LEVEL6; 537 538 /* Initialize the resource pool for "interrupt in" mailboxes */ 539 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_inmbox); 540 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size); 541 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX]; 542 rsrc_pool->rsrc_type = HERMON_INTR_IN_MBOX; 543 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 544 rsrc_pool->rsrc_pool_size = (size * num); 545 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size; 546 rsrc_pool->rsrc_quantum = (uint_t)size; 547 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN; 548 rsrc_pool->rsrc_state = state; 549 mbox_info.mbi_num = num; 550 mbox_info.mbi_size = size; 551 mbox_info.mbi_rsrcpool = rsrc_pool; 552 status = hermon_rsrc_mbox_init(state, &mbox_info); 553 if (status != DDI_SUCCESS) { 554 hermon_rsrc_fini(state, cleanup); 555 status = DDI_FAILURE; 556 goto rsrcinitp1_fail; 557 } 558 cleanup = HERMON_RSRC_CLEANUP_LEVEL7; 559 560 /* Initialize the mailbox list */ 561 status = hermon_intr_inmbox_list_init(state); 562 if (status != DDI_SUCCESS) { 563 hermon_rsrc_fini(state, cleanup); 564 status = DDI_FAILURE; 565 goto rsrcinitp1_fail; 566 } 567 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE; 568 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN); 569 return (DDI_SUCCESS); 570 571 rsrcinitp1_fail: 572 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN); 573 return (status); 574 } 575 576 577 /* 578 * hermon_rsrc_init_phase2() 579 * Context: Only called from attach() path context 580 */ 581 int 582 hermon_rsrc_init_phase2(hermon_state_t *state) 583 { 584 hermon_rsrc_sw_hdl_info_t hdl_info; 585 hermon_rsrc_hw_entry_info_t entry_info; 586 hermon_rsrc_pool_info_t *rsrc_pool; 587 hermon_rsrc_cleanup_level_t cleanup, ncleanup; 588 hermon_cfg_profile_t *cfgprof; 589 hermon_hw_querydevlim_t *devlim; 590 uint64_t num, max, num_prealloc; 591 uint_t mcg_size, mcg_size_shift; 592 int i, status; 593 char *rsrc_name; 594 595 ASSERT(state != NULL); 596 597 /* Phase 2 initialization begins where Phase 1 left off */ 598 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE; 599 600 /* Allocate the ICM resource name space */ 601 602 /* Build the ICM vmem arena names from Hermon instance */ 603 rsrc_name = (char *)kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP); 604 605 /* 606 * Initialize the resource pools for all objects that exist in 607 * context memory (ICM). The ICM consists of context tables, each 608 * type of resource (QP, CQ, EQ, etc) having it's own context table 609 * (QPC, CQC, EQC, etc...). 610 */ 611 cfgprof = state->hs_cfg_profile; 612 devlim = &state->hs_devlim; 613 614 /* 615 * Initialize the resource pools for each of the driver resources. 616 * With a few exceptions, these resources fall into the two cateogories 617 * of either hw_entries or sw_entries. 618 */ 619 620 /* 621 * Initialize the resource pools for ICM (hardware) types first. 622 * These resources are managed through vmem arenas, which are 623 * created via the rsrc pool initialization routine. Note that, 624 * due to further calculations, the MCG resource pool is 625 * initialized seperately. 626 */ 627 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) { 628 629 rsrc_pool = &state->hs_rsrc_hdl[i]; 630 rsrc_pool->rsrc_type = i; 631 632 /* Set the resource-specific attributes */ 633 switch (i) { 634 case HERMON_MTT: 635 max = ((uint64_t)1 << devlim->log_max_mtt); 636 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_mtt); 637 HERMON_RSRC_NAME(rsrc_name, HERMON_MTT_VMEM); 638 ncleanup = HERMON_RSRC_CLEANUP_LEVEL9; 639 break; 640 641 case HERMON_DMPT: 642 max = ((uint64_t)1 << devlim->log_max_dmpt); 643 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_dmpt); 644 HERMON_RSRC_NAME(rsrc_name, HERMON_DMPT_VMEM); 645 ncleanup = HERMON_RSRC_CLEANUP_LEVEL10; 646 break; 647 648 case HERMON_QPC: 649 max = ((uint64_t)1 << devlim->log_max_qp); 650 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_qp); 651 HERMON_RSRC_NAME(rsrc_name, HERMON_QPC_VMEM); 652 ncleanup = HERMON_RSRC_CLEANUP_LEVEL11; 653 break; 654 655 case HERMON_CQC: 656 max = ((uint64_t)1 << devlim->log_max_cq); 657 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_cq); 658 HERMON_RSRC_NAME(rsrc_name, HERMON_CQC_VMEM); 659 ncleanup = HERMON_RSRC_CLEANUP_LEVEL13; 660 break; 661 662 case HERMON_SRQC: 663 max = ((uint64_t)1 << devlim->log_max_srq); 664 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_srq); 665 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQC_VMEM); 666 ncleanup = HERMON_RSRC_CLEANUP_LEVEL16; 667 break; 668 669 case HERMON_EQC: 670 max = ((uint64_t)1 << devlim->log_max_eq); 671 num_prealloc = devlim->num_rsvd_eq; 672 HERMON_RSRC_NAME(rsrc_name, HERMON_EQC_VMEM); 673 ncleanup = HERMON_RSRC_CLEANUP_LEVEL18; 674 break; 675 676 case HERMON_MCG: /* handled below */ 677 case HERMON_AUXC: 678 case HERMON_ALTC: 679 case HERMON_RDB: 680 case HERMON_CMPT_QPC: 681 case HERMON_CMPT_SRQC: 682 case HERMON_CMPT_CQC: 683 case HERMON_CMPT_EQC: 684 default: 685 /* We don't need to initialize this rsrc here. */ 686 continue; 687 } 688 689 /* Set the common values for all resource pools */ 690 rsrc_pool->rsrc_state = state; 691 rsrc_pool->rsrc_loc = HERMON_IN_ICM; 692 rsrc_pool->rsrc_pool_size = state->hs_icm[i].table_size; 693 rsrc_pool->rsrc_align = state->hs_icm[i].table_size; 694 rsrc_pool->rsrc_shift = state->hs_icm[i].log_object_size; 695 rsrc_pool->rsrc_quantum = state->hs_icm[i].object_size; 696 697 /* Now, initialize the entry_info and call the init routine */ 698 entry_info.hwi_num = state->hs_icm[i].num_entries; 699 entry_info.hwi_max = max; 700 entry_info.hwi_prealloc = num_prealloc; 701 entry_info.hwi_rsrcpool = rsrc_pool; 702 entry_info.hwi_rsrcname = rsrc_name; 703 status = hermon_rsrc_hw_entries_init(state, &entry_info); 704 if (status != DDI_SUCCESS) { 705 hermon_rsrc_fini(state, cleanup); 706 status = DDI_FAILURE; 707 goto rsrcinitp2_fail; 708 } 709 cleanup = ncleanup; 710 } 711 712 /* 713 * Initialize the Multicast Group (MCG) entries. First, calculate 714 * (and validate) the size of the MCGs. 715 */ 716 status = hermon_rsrc_mcg_entry_get_size(state, &mcg_size_shift); 717 if (status != DDI_SUCCESS) { 718 hermon_rsrc_fini(state, cleanup); 719 status = DDI_FAILURE; 720 goto rsrcinitp2_fail; 721 } 722 mcg_size = HERMON_MCGMEM_SZ(state); 723 724 /* 725 * Initialize the resource pool for the MCG table entries. Notice 726 * that the number of MCGs is configurable. Note also that a certain 727 * number of MCGs must be set aside for Hermon firmware use (they 728 * correspond to the number of MCGs used by the internal hash 729 * function). 730 */ 731 num = ((uint64_t)1 << cfgprof->cp_log_num_mcg); 732 max = ((uint64_t)1 << devlim->log_max_mcg); 733 num_prealloc = ((uint64_t)1 << cfgprof->cp_log_num_mcg_hash); 734 rsrc_pool = &state->hs_rsrc_hdl[HERMON_MCG]; 735 rsrc_pool->rsrc_type = HERMON_MCG; 736 rsrc_pool->rsrc_loc = HERMON_IN_ICM; 737 rsrc_pool->rsrc_pool_size = (mcg_size * num); 738 rsrc_pool->rsrc_shift = mcg_size_shift; 739 rsrc_pool->rsrc_quantum = mcg_size; 740 rsrc_pool->rsrc_align = (mcg_size * num); 741 rsrc_pool->rsrc_state = state; 742 HERMON_RSRC_NAME(rsrc_name, HERMON_MCG_VMEM); 743 entry_info.hwi_num = num; 744 entry_info.hwi_max = max; 745 entry_info.hwi_prealloc = num_prealloc; 746 entry_info.hwi_rsrcpool = rsrc_pool; 747 entry_info.hwi_rsrcname = rsrc_name; 748 status = hermon_rsrc_hw_entries_init(state, &entry_info); 749 if (status != DDI_SUCCESS) { 750 hermon_rsrc_fini(state, cleanup); 751 status = DDI_FAILURE; 752 goto rsrcinitp2_fail; 753 } 754 cleanup = HERMON_RSRC_CLEANUP_LEVEL19; 755 756 /* 757 * Initialize the full range of ICM for the AUXC resource. 758 * This is done because its size is so small, about 1 byte per QP. 759 */ 760 761 /* 762 * Initialize the Hermon command handling interfaces. This step 763 * sets up the outstanding command tracking mechanism for easy access 764 * and fast allocation (see hermon_cmd.c for more details). 765 */ 766 status = hermon_outstanding_cmdlist_init(state); 767 if (status != DDI_SUCCESS) { 768 hermon_rsrc_fini(state, cleanup); 769 status = DDI_FAILURE; 770 goto rsrcinitp2_fail; 771 } 772 cleanup = HERMON_RSRC_CLEANUP_LEVEL20; 773 774 /* Initialize the resource pool and vmem arena for the PD handles */ 775 rsrc_pool = &state->hs_rsrc_hdl[HERMON_PDHDL]; 776 rsrc_pool->rsrc_type = HERMON_PDHDL; 777 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 778 rsrc_pool->rsrc_quantum = sizeof (struct hermon_sw_pd_s); 779 rsrc_pool->rsrc_state = state; 780 HERMON_RSRC_NAME(rsrc_name, HERMON_PDHDL_CACHE); 781 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_pd); 782 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_pd); 783 hdl_info.swi_rsrcpool = rsrc_pool; 784 hdl_info.swi_constructor = hermon_rsrc_pdhdl_constructor; 785 hdl_info.swi_destructor = hermon_rsrc_pdhdl_destructor; 786 hdl_info.swi_rsrcname = rsrc_name; 787 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT; 788 status = hermon_rsrc_pd_handles_init(state, &hdl_info); 789 if (status != DDI_SUCCESS) { 790 hermon_rsrc_fini(state, cleanup); 791 status = DDI_FAILURE; 792 goto rsrcinitp2_fail; 793 } 794 cleanup = HERMON_RSRC_CLEANUP_LEVEL21; 795 796 /* 797 * Initialize the resource pools for the rest of the software handles. 798 * This includes MR handles, EQ handles, QP handles, etc. These 799 * objects are almost entirely managed using kmem_cache routines, 800 * and do not utilize a vmem arena. 801 */ 802 for (i = HERMON_NUM_ICM_RESOURCES; i < HERMON_NUM_RESOURCES; i++) { 803 rsrc_pool = &state->hs_rsrc_hdl[i]; 804 805 /* Set the resource-specific attributes */ 806 switch (i) { 807 case HERMON_MRHDL: 808 rsrc_pool->rsrc_type = HERMON_MRHDL; 809 rsrc_pool->rsrc_quantum = 810 sizeof (struct hermon_sw_mr_s); 811 HERMON_RSRC_NAME(rsrc_name, HERMON_MRHDL_CACHE); 812 hdl_info.swi_num = 813 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) + 814 ((uint64_t)1 << cfgprof->cp_log_num_cmpt); 815 hdl_info.swi_max = 816 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) + 817 ((uint64_t)1 << cfgprof->cp_log_num_cmpt); 818 hdl_info.swi_constructor = 819 hermon_rsrc_mrhdl_constructor; 820 hdl_info.swi_destructor = hermon_rsrc_mrhdl_destructor; 821 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT; 822 ncleanup = HERMON_RSRC_CLEANUP_LEVEL22; 823 break; 824 825 case HERMON_EQHDL: 826 rsrc_pool->rsrc_type = HERMON_EQHDL; 827 rsrc_pool->rsrc_quantum = 828 sizeof (struct hermon_sw_eq_s); 829 HERMON_RSRC_NAME(rsrc_name, HERMON_EQHDL_CACHE); 830 hdl_info.swi_num = HERMON_NUM_EQ; 831 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_eq); 832 hdl_info.swi_constructor = NULL; 833 hdl_info.swi_destructor = NULL; 834 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT; 835 ncleanup = HERMON_RSRC_CLEANUP_LEVEL23; 836 break; 837 838 case HERMON_CQHDL: 839 rsrc_pool->rsrc_type = HERMON_CQHDL; 840 rsrc_pool->rsrc_quantum = 841 sizeof (struct hermon_sw_cq_s); 842 HERMON_RSRC_NAME(rsrc_name, HERMON_CQHDL_CACHE); 843 hdl_info.swi_num = 844 (uint64_t)1 << cfgprof->cp_log_num_cq; 845 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_cq; 846 hdl_info.swi_constructor = 847 hermon_rsrc_cqhdl_constructor; 848 hdl_info.swi_destructor = hermon_rsrc_cqhdl_destructor; 849 hdl_info.swi_flags = (HERMON_SWHDL_KMEMCACHE_INIT | 850 HERMON_SWHDL_TABLE_INIT); 851 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t); 852 ncleanup = HERMON_RSRC_CLEANUP_LEVEL24; 853 break; 854 855 case HERMON_SRQHDL: 856 rsrc_pool->rsrc_type = HERMON_SRQHDL; 857 rsrc_pool->rsrc_quantum = 858 sizeof (struct hermon_sw_srq_s); 859 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQHDL_CACHE); 860 hdl_info.swi_num = 861 (uint64_t)1 << cfgprof->cp_log_num_srq; 862 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_srq; 863 hdl_info.swi_constructor = 864 hermon_rsrc_srqhdl_constructor; 865 hdl_info.swi_destructor = hermon_rsrc_srqhdl_destructor; 866 hdl_info.swi_flags = (HERMON_SWHDL_KMEMCACHE_INIT | 867 HERMON_SWHDL_TABLE_INIT); 868 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t); 869 ncleanup = HERMON_RSRC_CLEANUP_LEVEL25; 870 break; 871 872 case HERMON_AHHDL: 873 rsrc_pool->rsrc_type = HERMON_AHHDL; 874 rsrc_pool->rsrc_quantum = 875 sizeof (struct hermon_sw_ah_s); 876 HERMON_RSRC_NAME(rsrc_name, HERMON_AHHDL_CACHE); 877 hdl_info.swi_num = 878 (uint64_t)1 << cfgprof->cp_log_num_ah; 879 hdl_info.swi_max = HERMON_NUM_AH; 880 hdl_info.swi_constructor = 881 hermon_rsrc_ahhdl_constructor; 882 hdl_info.swi_destructor = hermon_rsrc_ahhdl_destructor; 883 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT; 884 ncleanup = HERMON_RSRC_CLEANUP_LEVEL26; 885 break; 886 887 case HERMON_QPHDL: 888 rsrc_pool->rsrc_type = HERMON_QPHDL; 889 rsrc_pool->rsrc_quantum = 890 sizeof (struct hermon_sw_qp_s); 891 HERMON_RSRC_NAME(rsrc_name, HERMON_QPHDL_CACHE); 892 hdl_info.swi_num = 893 (uint64_t)1 << cfgprof->cp_log_num_qp; 894 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_qp; 895 hdl_info.swi_constructor = 896 hermon_rsrc_qphdl_constructor; 897 hdl_info.swi_destructor = hermon_rsrc_qphdl_destructor; 898 hdl_info.swi_flags = (HERMON_SWHDL_KMEMCACHE_INIT | 899 HERMON_SWHDL_TABLE_INIT); 900 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t); 901 ncleanup = HERMON_RSRC_CLEANUP_LEVEL27; 902 break; 903 904 case HERMON_REFCNT: 905 rsrc_pool->rsrc_type = HERMON_REFCNT; 906 rsrc_pool->rsrc_quantum = sizeof (hermon_sw_refcnt_t); 907 HERMON_RSRC_NAME(rsrc_name, HERMON_REFCNT_CACHE); 908 hdl_info.swi_num = 909 (uint64_t)1 << cfgprof->cp_log_num_dmpt; 910 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_dmpt; 911 hdl_info.swi_constructor = 912 hermon_rsrc_refcnt_constructor; 913 hdl_info.swi_destructor = hermon_rsrc_refcnt_destructor; 914 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT; 915 ncleanup = HERMON_RSRC_CLEANUP_LEVEL28; 916 break; 917 918 default: 919 continue; 920 } 921 922 /* Set the common values and call the init routine */ 923 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM; 924 rsrc_pool->rsrc_state = state; 925 hdl_info.swi_rsrcpool = rsrc_pool; 926 hdl_info.swi_rsrcname = rsrc_name; 927 status = hermon_rsrc_sw_handles_init(state, &hdl_info); 928 if (status != DDI_SUCCESS) { 929 hermon_rsrc_fini(state, cleanup); 930 status = DDI_FAILURE; 931 goto rsrcinitp2_fail; 932 } 933 cleanup = ncleanup; 934 935 /* 936 * For table entries, save away a pointer to the central list 937 * of handle pointers. These are used to enable fast lookup 938 * of the resources during event processing. 939 */ 940 switch (i) { 941 case HERMON_CQHDL: 942 state->hs_cqhdl = hdl_info.swi_table_ptr; 943 break; 944 case HERMON_QPHDL: 945 state->hs_qphdl = hdl_info.swi_table_ptr; 946 break; 947 case HERMON_SRQHDL: 948 state->hs_srqhdl = hdl_info.swi_table_ptr; 949 break; 950 default: 951 break; 952 } 953 } 954 955 /* 956 * Initialize a resource pool for the MCG handles. Notice that for 957 * these MCG handles, we are allocating a table of structures (used to 958 * keep track of the MCG entries that are being written to hardware 959 * and to speed up multicast attach/detach operations). 960 */ 961 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg); 962 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_mcg); 963 hdl_info.swi_flags = HERMON_SWHDL_TABLE_INIT; 964 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s); 965 status = hermon_rsrc_sw_handles_init(state, &hdl_info); 966 if (status != DDI_SUCCESS) { 967 hermon_rsrc_fini(state, cleanup); 968 status = DDI_FAILURE; 969 goto rsrcinitp2_fail; 970 } 971 state->hs_mcghdl = hdl_info.swi_table_ptr; 972 cleanup = HERMON_RSRC_CLEANUP_LEVEL29; 973 974 /* 975 * Last, initialize the resource pool for the UAR pages, which contain 976 * the hardware's doorbell registers. Each process supported in User 977 * Mode is assigned a UAR page. Also coming from this pool are the 978 * kernel-assigned UAR page, and any hardware-reserved pages. Note 979 * that the number of UAR pages is configurable, the value must be less 980 * than the maximum value (obtained from the QUERY_DEV_LIM command) or 981 * the initialization will fail. Note also that we assign the base 982 * address of the UAR BAR to the rsrc_start parameter. 983 */ 984 num = ((uint64_t)1 << cfgprof->cp_log_num_uar); 985 max = num; 986 num_prealloc = max(devlim->num_rsvd_uar, 128); 987 rsrc_pool = &state->hs_rsrc_hdl[HERMON_UARPG]; 988 rsrc_pool->rsrc_type = HERMON_UARPG; 989 rsrc_pool->rsrc_loc = HERMON_IN_UAR; 990 rsrc_pool->rsrc_pool_size = (num << PAGESHIFT); 991 rsrc_pool->rsrc_shift = PAGESHIFT; 992 rsrc_pool->rsrc_quantum = (uint_t)PAGESIZE; 993 rsrc_pool->rsrc_align = PAGESIZE; 994 rsrc_pool->rsrc_state = state; 995 rsrc_pool->rsrc_start = (void *)state->hs_reg_uar_baseaddr; 996 HERMON_RSRC_NAME(rsrc_name, HERMON_UAR_PAGE_VMEM_ATTCH); 997 entry_info.hwi_num = num; 998 entry_info.hwi_max = max; 999 entry_info.hwi_prealloc = num_prealloc; 1000 entry_info.hwi_rsrcpool = rsrc_pool; 1001 entry_info.hwi_rsrcname = rsrc_name; 1002 status = hermon_rsrc_hw_entries_init(state, &entry_info); 1003 if (status != DDI_SUCCESS) { 1004 hermon_rsrc_fini(state, cleanup); 1005 status = DDI_FAILURE; 1006 goto rsrcinitp2_fail; 1007 } 1008 1009 cleanup = HERMON_RSRC_CLEANUP_ALL; 1010 1011 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN); 1012 return (DDI_SUCCESS); 1013 1014 rsrcinitp2_fail: 1015 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN); 1016 return (status); 1017 } 1018 1019 1020 /* 1021 * hermon_rsrc_fini() 1022 * Context: Only called from attach() and/or detach() path contexts 1023 */ 1024 void 1025 hermon_rsrc_fini(hermon_state_t *state, hermon_rsrc_cleanup_level_t clean) 1026 { 1027 hermon_rsrc_sw_hdl_info_t hdl_info; 1028 hermon_rsrc_hw_entry_info_t entry_info; 1029 hermon_rsrc_mbox_info_t mbox_info; 1030 hermon_cfg_profile_t *cfgprof; 1031 1032 ASSERT(state != NULL); 1033 1034 cfgprof = state->hs_cfg_profile; 1035 1036 /* 1037 * If init code above is shortened up (see comments), then we 1038 * need to establish how to safely and simply clean up from any 1039 * given failure point. Flags, maybe... 1040 */ 1041 1042 switch (clean) { 1043 /* 1044 * If we add more resources that need to be cleaned up here, we should 1045 * ensure that HERMON_RSRC_CLEANUP_ALL is still the first entry (i.e. 1046 * corresponds to the last resource allocated). 1047 */ 1048 1049 case HERMON_RSRC_CLEANUP_ALL: 1050 case HERMON_RSRC_CLEANUP_LEVEL31: 1051 /* Cleanup the UAR page resource pool, first the dbr pages */ 1052 if (state->hs_kern_dbr) { 1053 hermon_dbr_kern_free(state); 1054 state->hs_kern_dbr = NULL; 1055 } 1056 1057 /* NS then, the pool itself */ 1058 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_UARPG]; 1059 hermon_rsrc_hw_entries_fini(state, &entry_info); 1060 1061 /* FALLTHROUGH */ 1062 1063 case HERMON_RSRC_CLEANUP_LEVEL30: 1064 /* Cleanup the central MCG handle pointers list */ 1065 hdl_info.swi_rsrcpool = NULL; 1066 hdl_info.swi_table_ptr = state->hs_mcghdl; 1067 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg); 1068 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s); 1069 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1070 /* FALLTHROUGH */ 1071 1072 case HERMON_RSRC_CLEANUP_LEVEL29: 1073 /* Cleanup the reference count resource pool */ 1074 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_REFCNT]; 1075 hdl_info.swi_table_ptr = NULL; 1076 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1077 /* FALLTHROUGH */ 1078 1079 case HERMON_RSRC_CLEANUP_LEVEL28: 1080 /* Cleanup the QP handle resource pool */ 1081 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPHDL]; 1082 hdl_info.swi_table_ptr = state->hs_qphdl; 1083 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_qp); 1084 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t); 1085 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1086 /* FALLTHROUGH */ 1087 case HERMON_RSRC_CLEANUP_LEVEL27: 1088 /* Cleanup the address handle resrouce pool */ 1089 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AHHDL]; 1090 hdl_info.swi_table_ptr = NULL; 1091 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1092 /* FALLTHROUGH */ 1093 1094 case HERMON_RSRC_CLEANUP_LEVEL26: 1095 /* Cleanup the SRQ handle resource pool. */ 1096 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQHDL]; 1097 hdl_info.swi_table_ptr = state->hs_srqhdl; 1098 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_srq); 1099 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t); 1100 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1101 /* FALLTHROUGH */ 1102 1103 case HERMON_RSRC_CLEANUP_LEVEL25: 1104 /* Cleanup the CQ handle resource pool */ 1105 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQHDL]; 1106 hdl_info.swi_table_ptr = state->hs_cqhdl; 1107 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_cq); 1108 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t); 1109 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1110 /* FALLTHROUGH */ 1111 1112 case HERMON_RSRC_CLEANUP_LEVEL24: 1113 /* Cleanup the EQ handle resource pool */ 1114 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQHDL]; 1115 hdl_info.swi_table_ptr = NULL; 1116 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1117 /* FALLTHROUGH */ 1118 1119 case HERMON_RSRC_CLEANUP_LEVEL23: 1120 /* Cleanup the MR handle resource pool */ 1121 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MRHDL]; 1122 hdl_info.swi_table_ptr = NULL; 1123 hermon_rsrc_sw_handles_fini(state, &hdl_info); 1124 /* FALLTHROUGH */ 1125 1126 case HERMON_RSRC_CLEANUP_LEVEL22: 1127 /* Cleanup the PD handle resource pool */ 1128 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_PDHDL]; 1129 hdl_info.swi_table_ptr = NULL; 1130 hermon_rsrc_pd_handles_fini(state, &hdl_info); 1131 /* FALLTHROUGH */ 1132 1133 case HERMON_RSRC_CLEANUP_LEVEL21: 1134 /* Currently unused - FALLTHROUGH */ 1135 1136 case HERMON_RSRC_CLEANUP_LEVEL20: 1137 /* Cleanup the outstanding command list */ 1138 hermon_outstanding_cmdlist_fini(state); 1139 /* FALLTHROUGH */ 1140 1141 case HERMON_RSRC_CLEANUP_LEVEL19: 1142 /* Cleanup the EQC table resource pool */ 1143 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQC]; 1144 hermon_rsrc_hw_entries_fini(state, &entry_info); 1145 /* FALLTHROUGH */ 1146 1147 case HERMON_RSRC_CLEANUP_LEVEL18: 1148 /* Cleanup the MCG table resource pool */ 1149 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MCG]; 1150 hermon_rsrc_hw_entries_fini(state, &entry_info); 1151 /* FALLTHROUGH */ 1152 1153 case HERMON_RSRC_CLEANUP_LEVEL17: 1154 /* Currently Unused - fallthrough */ 1155 case HERMON_RSRC_CLEANUP_LEVEL16: 1156 /* Cleanup the SRQC table resource pool */ 1157 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQC]; 1158 hermon_rsrc_hw_entries_fini(state, &entry_info); 1159 /* FALLTHROUGH */ 1160 1161 case HERMON_RSRC_CLEANUP_LEVEL15: 1162 /* Cleanup the AUXC table resource pool */ 1163 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AUXC]; 1164 hermon_rsrc_hw_entries_fini(state, &entry_info); 1165 /* FALLTHROUGH */ 1166 1167 case HERMON_RSRC_CLEANUP_LEVEL14: 1168 /* Cleanup the ALTCF table resource pool */ 1169 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_ALTC]; 1170 hermon_rsrc_hw_entries_fini(state, &entry_info); 1171 /* FALLTHROUGH */ 1172 1173 case HERMON_RSRC_CLEANUP_LEVEL13: 1174 /* Cleanup the CQC table resource pool */ 1175 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQC]; 1176 hermon_rsrc_hw_entries_fini(state, &entry_info); 1177 /* FALLTHROUGH */ 1178 1179 case HERMON_RSRC_CLEANUP_LEVEL12: 1180 /* Cleanup the RDB table resource pool */ 1181 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_RDB]; 1182 hermon_rsrc_hw_entries_fini(state, &entry_info); 1183 /* FALLTHROUGH */ 1184 1185 case HERMON_RSRC_CLEANUP_LEVEL11: 1186 /* Cleanup the QPC table resource pool */ 1187 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPC]; 1188 hermon_rsrc_hw_entries_fini(state, &entry_info); 1189 /* FALLTHROUGH */ 1190 1191 case HERMON_RSRC_CLEANUP_LEVEL10EQ: 1192 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */ 1193 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_EQC]; 1194 hermon_rsrc_hw_entries_fini(state, &entry_info); 1195 /* FALLTHROUGH */ 1196 1197 case HERMON_RSRC_CLEANUP_LEVEL10CQ: 1198 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */ 1199 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_CQC]; 1200 hermon_rsrc_hw_entries_fini(state, &entry_info); 1201 /* FALLTHROUGH */ 1202 1203 case HERMON_RSRC_CLEANUP_LEVEL10SRQ: 1204 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */ 1205 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_SRQC]; 1206 hermon_rsrc_hw_entries_fini(state, &entry_info); 1207 /* FALLTHROUGH */ 1208 1209 case HERMON_RSRC_CLEANUP_LEVEL10QP: 1210 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */ 1211 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_QPC]; 1212 hermon_rsrc_hw_entries_fini(state, &entry_info); 1213 /* FALLTHROUGH */ 1214 1215 case HERMON_RSRC_CLEANUP_LEVEL10: 1216 /* Cleanup the dMPT table resource pool */ 1217 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_DMPT]; 1218 hermon_rsrc_hw_entries_fini(state, &entry_info); 1219 /* FALLTHROUGH */ 1220 1221 case HERMON_RSRC_CLEANUP_LEVEL9: 1222 /* Cleanup the MTT table resource pool */ 1223 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MTT]; 1224 hermon_rsrc_hw_entries_fini(state, &entry_info); 1225 break; 1226 1227 /* 1228 * The cleanup below comes from the "Phase 1" initialization step. 1229 * (see hermon_rsrc_init_phase1() above) 1230 */ 1231 case HERMON_RSRC_CLEANUP_PHASE1_COMPLETE: 1232 /* Cleanup the "In" mailbox list */ 1233 hermon_intr_inmbox_list_fini(state); 1234 /* FALLTHROUGH */ 1235 1236 case HERMON_RSRC_CLEANUP_LEVEL7: 1237 /* Cleanup the interrupt "In" mailbox resource pool */ 1238 mbox_info.mbi_rsrcpool = 1239 &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX]; 1240 hermon_rsrc_mbox_fini(state, &mbox_info); 1241 /* FALLTHROUGH */ 1242 1243 case HERMON_RSRC_CLEANUP_LEVEL6: 1244 /* Cleanup the "In" mailbox list */ 1245 hermon_inmbox_list_fini(state); 1246 /* FALLTHROUGH */ 1247 1248 case HERMON_RSRC_CLEANUP_LEVEL5: 1249 /* Cleanup the "In" mailbox resource pool */ 1250 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_IN_MBOX]; 1251 hermon_rsrc_mbox_fini(state, &mbox_info); 1252 /* FALLTHROUGH */ 1253 1254 case HERMON_RSRC_CLEANUP_LEVEL4: 1255 /* Cleanup the interrupt "Out" mailbox list */ 1256 hermon_intr_outmbox_list_fini(state); 1257 /* FALLTHROUGH */ 1258 1259 case HERMON_RSRC_CLEANUP_LEVEL3: 1260 /* Cleanup the "Out" mailbox resource pool */ 1261 mbox_info.mbi_rsrcpool = 1262 &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX]; 1263 hermon_rsrc_mbox_fini(state, &mbox_info); 1264 /* FALLTHROUGH */ 1265 1266 case HERMON_RSRC_CLEANUP_LEVEL2: 1267 /* Cleanup the "Out" mailbox list */ 1268 hermon_outmbox_list_fini(state); 1269 /* FALLTHROUGH */ 1270 1271 case HERMON_RSRC_CLEANUP_LEVEL1: 1272 /* Cleanup the "Out" mailbox resource pool */ 1273 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX]; 1274 hermon_rsrc_mbox_fini(state, &mbox_info); 1275 /* FALLTHROUGH */ 1276 1277 case HERMON_RSRC_CLEANUP_LEVEL0: 1278 /* Free the array of hermon_rsrc_pool_info_t's */ 1279 1280 kmem_free(state->hs_rsrc_hdl, HERMON_NUM_RESOURCES * 1281 sizeof (hermon_rsrc_pool_info_t)); 1282 1283 kmem_cache_destroy(state->hs_rsrc_cache); 1284 break; 1285 1286 default: 1287 HERMON_WARNING(state, "unexpected resource cleanup level"); 1288 break; 1289 } 1290 } 1291 1292 1293 /* 1294 * hermon_rsrc_mbox_init() 1295 * Context: Only called from attach() path context 1296 */ 1297 static int 1298 hermon_rsrc_mbox_init(hermon_state_t *state, hermon_rsrc_mbox_info_t *info) 1299 { 1300 hermon_rsrc_pool_info_t *rsrc_pool; 1301 hermon_rsrc_priv_mbox_t *priv; 1302 1303 ASSERT(state != NULL); 1304 ASSERT(info != NULL); 1305 1306 rsrc_pool = info->mbi_rsrcpool; 1307 ASSERT(rsrc_pool != NULL); 1308 1309 /* Allocate and initialize mailbox private structure */ 1310 priv = kmem_zalloc(sizeof (hermon_rsrc_priv_mbox_t), KM_SLEEP); 1311 priv->pmb_dip = state->hs_dip; 1312 priv->pmb_devaccattr = state->hs_reg_accattr; 1313 priv->pmb_xfer_mode = DDI_DMA_CONSISTENT; 1314 1315 /* 1316 * Initialize many of the default DMA attributes. Then set alignment 1317 * and scatter-gather restrictions specific for mailbox memory. 1318 */ 1319 hermon_dma_attr_init(state, &priv->pmb_dmaattr); 1320 priv->pmb_dmaattr.dma_attr_align = HERMON_MBOX_ALIGN; 1321 priv->pmb_dmaattr.dma_attr_sgllen = 1; 1322 priv->pmb_dmaattr.dma_attr_flags = 0; 1323 rsrc_pool->rsrc_private = priv; 1324 1325 ASSERT(rsrc_pool->rsrc_loc == HERMON_IN_SYSMEM); 1326 1327 rsrc_pool->rsrc_start = NULL; 1328 rsrc_pool->rsrc_vmp = NULL; 1329 1330 return (DDI_SUCCESS); 1331 } 1332 1333 1334 /* 1335 * hermon_rsrc_mbox_fini() 1336 * Context: Only called from attach() and/or detach() path contexts 1337 */ 1338 /* ARGSUSED */ 1339 static void 1340 hermon_rsrc_mbox_fini(hermon_state_t *state, hermon_rsrc_mbox_info_t *info) 1341 { 1342 hermon_rsrc_pool_info_t *rsrc_pool; 1343 1344 ASSERT(state != NULL); 1345 ASSERT(info != NULL); 1346 1347 rsrc_pool = info->mbi_rsrcpool; 1348 ASSERT(rsrc_pool != NULL); 1349 1350 /* Free up the private struct */ 1351 kmem_free(rsrc_pool->rsrc_private, sizeof (hermon_rsrc_priv_mbox_t)); 1352 } 1353 1354 1355 /* 1356 * hermon_rsrc_hw_entries_init() 1357 * Context: Only called from attach() path context 1358 */ 1359 int 1360 hermon_rsrc_hw_entries_init(hermon_state_t *state, 1361 hermon_rsrc_hw_entry_info_t *info) 1362 { 1363 hermon_rsrc_pool_info_t *rsrc_pool; 1364 hermon_rsrc_t *rsvd_rsrc = NULL; 1365 vmem_t *vmp; 1366 uint64_t num_hwentry, max_hwentry, num_prealloc; 1367 int status; 1368 1369 ASSERT(state != NULL); 1370 ASSERT(info != NULL); 1371 1372 rsrc_pool = info->hwi_rsrcpool; 1373 ASSERT(rsrc_pool != NULL); 1374 num_hwentry = info->hwi_num; 1375 max_hwentry = info->hwi_max; 1376 num_prealloc = info->hwi_prealloc; 1377 1378 if (hermon_rsrc_verbose) { 1379 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init: " 1380 "rsrc_type (0x%x) num (%llx) max (0x%llx) prealloc " 1381 "(0x%llx)", rsrc_pool->rsrc_type, (longlong_t)num_hwentry, 1382 (longlong_t)max_hwentry, (longlong_t)num_prealloc); 1383 } 1384 1385 /* Make sure number of HW entries makes sense */ 1386 if (num_hwentry > max_hwentry) { 1387 return (DDI_FAILURE); 1388 } 1389 1390 /* Set this pool's rsrc_start from the initial ICM allocation */ 1391 if (rsrc_pool->rsrc_start == 0) { 1392 1393 /* use a ROUND value that works on both 32 and 64-bit kernels */ 1394 rsrc_pool->rsrc_start = (void *)(uintptr_t)0x10000000; 1395 1396 if (hermon_rsrc_verbose) { 1397 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:" 1398 " rsrc_type (0x%x) rsrc_start set (0x%lx)", 1399 rsrc_pool->rsrc_type, rsrc_pool->rsrc_start); 1400 } 1401 } 1402 1403 /* 1404 * Create new vmem arena for the HW entries table if rsrc_quantum 1405 * is non-zero. Otherwise if rsrc_quantum is zero, then these HW 1406 * entries are not going to be dynamically allocatable (i.e. they 1407 * won't be allocated/freed through hermon_rsrc_alloc/free). This 1408 * latter option is used for both ALTC and CMPT resources which 1409 * are managed by hardware. 1410 */ 1411 if (rsrc_pool->rsrc_quantum != 0) { 1412 vmp = vmem_create(info->hwi_rsrcname, 1413 (void *)(uintptr_t)rsrc_pool->rsrc_start, 1414 rsrc_pool->rsrc_pool_size, rsrc_pool->rsrc_quantum, 1415 NULL, NULL, NULL, 0, VM_SLEEP); 1416 if (vmp == NULL) { 1417 /* failed to create vmem arena */ 1418 return (DDI_FAILURE); 1419 } 1420 rsrc_pool->rsrc_vmp = vmp; 1421 if (hermon_rsrc_verbose) { 1422 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:" 1423 " rsrc_type (0x%x) created vmem arena for rsrc", 1424 rsrc_pool->rsrc_type); 1425 } 1426 } else { 1427 /* we do not require a vmem arena */ 1428 rsrc_pool->rsrc_vmp = NULL; 1429 if (hermon_rsrc_verbose) { 1430 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:" 1431 " rsrc_type (0x%x) vmem arena not required", 1432 rsrc_pool->rsrc_type); 1433 } 1434 } 1435 1436 /* Allocate hardware reserved resources, if any */ 1437 if (num_prealloc != 0) { 1438 status = hermon_rsrc_alloc(state, rsrc_pool->rsrc_type, 1439 num_prealloc, HERMON_SLEEP, &rsvd_rsrc); 1440 if (status != DDI_SUCCESS) { 1441 /* unable to preallocate the reserved entries */ 1442 if (rsrc_pool->rsrc_vmp != NULL) { 1443 vmem_destroy(rsrc_pool->rsrc_vmp); 1444 } 1445 return (DDI_FAILURE); 1446 } 1447 } 1448 rsrc_pool->rsrc_private = rsvd_rsrc; 1449 1450 return (DDI_SUCCESS); 1451 } 1452 1453 1454 /* 1455 * hermon_rsrc_hw_entries_fini() 1456 * Context: Only called from attach() and/or detach() path contexts 1457 */ 1458 void 1459 hermon_rsrc_hw_entries_fini(hermon_state_t *state, 1460 hermon_rsrc_hw_entry_info_t *info) 1461 { 1462 hermon_rsrc_pool_info_t *rsrc_pool; 1463 hermon_rsrc_t *rsvd_rsrc; 1464 1465 ASSERT(state != NULL); 1466 ASSERT(info != NULL); 1467 1468 rsrc_pool = info->hwi_rsrcpool; 1469 ASSERT(rsrc_pool != NULL); 1470 1471 /* Free up any "reserved" (i.e. preallocated) HW entries */ 1472 rsvd_rsrc = (hermon_rsrc_t *)rsrc_pool->rsrc_private; 1473 if (rsvd_rsrc != NULL) { 1474 hermon_rsrc_free(state, &rsvd_rsrc); 1475 } 1476 1477 /* 1478 * If we've actually setup a vmem arena for the HW entries, then 1479 * destroy it now 1480 */ 1481 if (rsrc_pool->rsrc_vmp != NULL) { 1482 vmem_destroy(rsrc_pool->rsrc_vmp); 1483 } 1484 } 1485 1486 1487 /* 1488 * hermon_rsrc_sw_handles_init() 1489 * Context: Only called from attach() path context 1490 */ 1491 /* ARGSUSED */ 1492 static int 1493 hermon_rsrc_sw_handles_init(hermon_state_t *state, 1494 hermon_rsrc_sw_hdl_info_t *info) 1495 { 1496 hermon_rsrc_pool_info_t *rsrc_pool; 1497 uint64_t num_swhdl, max_swhdl, prealloc_sz; 1498 1499 ASSERT(state != NULL); 1500 ASSERT(info != NULL); 1501 1502 rsrc_pool = info->swi_rsrcpool; 1503 ASSERT(rsrc_pool != NULL); 1504 num_swhdl = info->swi_num; 1505 max_swhdl = info->swi_max; 1506 prealloc_sz = info->swi_prealloc_sz; 1507 1508 1509 /* Make sure number of SW handles makes sense */ 1510 if (num_swhdl > max_swhdl) { 1511 return (DDI_FAILURE); 1512 } 1513 1514 /* 1515 * Depending on the flags parameter, create a kmem_cache for some 1516 * number of software handle structures. Note: kmem_cache_create() 1517 * will SLEEP until successful. 1518 */ 1519 if (info->swi_flags & HERMON_SWHDL_KMEMCACHE_INIT) { 1520 rsrc_pool->rsrc_private = kmem_cache_create( 1521 info->swi_rsrcname, rsrc_pool->rsrc_quantum, 0, 1522 info->swi_constructor, info->swi_destructor, NULL, 1523 rsrc_pool->rsrc_state, NULL, 0); 1524 } 1525 1526 1527 /* Allocate the central list of SW handle pointers */ 1528 if (info->swi_flags & HERMON_SWHDL_TABLE_INIT) { 1529 info->swi_table_ptr = kmem_zalloc(num_swhdl * prealloc_sz, 1530 KM_SLEEP); 1531 } 1532 1533 return (DDI_SUCCESS); 1534 } 1535 1536 1537 /* 1538 * hermon_rsrc_sw_handles_fini() 1539 * Context: Only called from attach() and/or detach() path contexts 1540 */ 1541 /* ARGSUSED */ 1542 static void 1543 hermon_rsrc_sw_handles_fini(hermon_state_t *state, 1544 hermon_rsrc_sw_hdl_info_t *info) 1545 { 1546 hermon_rsrc_pool_info_t *rsrc_pool; 1547 uint64_t num_swhdl, prealloc_sz; 1548 1549 ASSERT(state != NULL); 1550 ASSERT(info != NULL); 1551 1552 rsrc_pool = info->swi_rsrcpool; 1553 num_swhdl = info->swi_num; 1554 prealloc_sz = info->swi_prealloc_sz; 1555 1556 /* 1557 * If a "software handle" kmem_cache exists for this resource, then 1558 * destroy it now 1559 */ 1560 if (rsrc_pool != NULL) { 1561 kmem_cache_destroy(rsrc_pool->rsrc_private); 1562 } 1563 1564 /* Free up this central list of SW handle pointers */ 1565 if (info->swi_table_ptr != NULL) { 1566 kmem_free(info->swi_table_ptr, num_swhdl * prealloc_sz); 1567 } 1568 } 1569 1570 1571 /* 1572 * hermon_rsrc_pd_handles_init() 1573 * Context: Only called from attach() path context 1574 */ 1575 static int 1576 hermon_rsrc_pd_handles_init(hermon_state_t *state, 1577 hermon_rsrc_sw_hdl_info_t *info) 1578 { 1579 hermon_rsrc_pool_info_t *rsrc_pool; 1580 vmem_t *vmp; 1581 char vmem_name[HERMON_RSRC_NAME_MAXLEN]; 1582 int status; 1583 1584 ASSERT(state != NULL); 1585 ASSERT(info != NULL); 1586 1587 rsrc_pool = info->swi_rsrcpool; 1588 ASSERT(rsrc_pool != NULL); 1589 1590 /* Initialize the resource pool for software handle table */ 1591 status = hermon_rsrc_sw_handles_init(state, info); 1592 if (status != DDI_SUCCESS) { 1593 return (DDI_FAILURE); 1594 } 1595 1596 /* Build vmem arena name from Hermon instance */ 1597 HERMON_RSRC_NAME(vmem_name, HERMON_PDHDL_VMEM); 1598 1599 /* Create new vmem arena for PD numbers */ 1600 vmp = vmem_create(vmem_name, (caddr_t)1, info->swi_num, 1, NULL, 1601 NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 1602 if (vmp == NULL) { 1603 /* Unable to create vmem arena */ 1604 info->swi_table_ptr = NULL; 1605 hermon_rsrc_sw_handles_fini(state, info); 1606 return (DDI_FAILURE); 1607 } 1608 rsrc_pool->rsrc_vmp = vmp; 1609 1610 return (DDI_SUCCESS); 1611 } 1612 1613 1614 /* 1615 * hermon_rsrc_pd_handles_fini() 1616 * Context: Only called from attach() and/or detach() path contexts 1617 */ 1618 static void 1619 hermon_rsrc_pd_handles_fini(hermon_state_t *state, 1620 hermon_rsrc_sw_hdl_info_t *info) 1621 { 1622 hermon_rsrc_pool_info_t *rsrc_pool; 1623 1624 ASSERT(state != NULL); 1625 ASSERT(info != NULL); 1626 1627 rsrc_pool = info->swi_rsrcpool; 1628 1629 /* Destroy the specially created UAR scratch table vmem arena */ 1630 vmem_destroy(rsrc_pool->rsrc_vmp); 1631 1632 /* Destroy the "hermon_sw_pd_t" kmem_cache */ 1633 hermon_rsrc_sw_handles_fini(state, info); 1634 } 1635 1636 1637 /* 1638 * hermon_rsrc_mbox_alloc() 1639 * Context: Only called from attach() path context 1640 */ 1641 static int 1642 hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num, 1643 hermon_rsrc_t *hdl) 1644 { 1645 hermon_rsrc_priv_mbox_t *priv; 1646 caddr_t kaddr; 1647 size_t real_len, temp_len; 1648 int status; 1649 1650 ASSERT(pool_info != NULL); 1651 ASSERT(hdl != NULL); 1652 1653 /* Get the private pointer for the mailboxes */ 1654 priv = pool_info->rsrc_private; 1655 ASSERT(priv != NULL); 1656 1657 /* Allocate a DMA handle for the mailbox */ 1658 status = ddi_dma_alloc_handle(priv->pmb_dip, &priv->pmb_dmaattr, 1659 DDI_DMA_SLEEP, NULL, &hdl->hr_dmahdl); 1660 if (status != DDI_SUCCESS) { 1661 return (DDI_FAILURE); 1662 } 1663 1664 /* Allocate memory for the mailbox */ 1665 temp_len = (num * pool_info->rsrc_quantum); 1666 status = ddi_dma_mem_alloc(hdl->hr_dmahdl, temp_len, 1667 &priv->pmb_devaccattr, priv->pmb_xfer_mode, DDI_DMA_SLEEP, 1668 NULL, &kaddr, &real_len, &hdl->hr_acchdl); 1669 if (status != DDI_SUCCESS) { 1670 /* No more memory available for mailbox entries */ 1671 ddi_dma_free_handle(&hdl->hr_dmahdl); 1672 return (DDI_FAILURE); 1673 } 1674 1675 hdl->hr_addr = (void *)kaddr; 1676 hdl->hr_len = (uint32_t)real_len; 1677 1678 return (DDI_SUCCESS); 1679 } 1680 1681 1682 /* 1683 * hermon_rsrc_mbox_free() 1684 * Context: Can be called from interrupt or base context. 1685 */ 1686 static void 1687 hermon_rsrc_mbox_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl) 1688 { 1689 ASSERT(pool_info != NULL); 1690 ASSERT(hdl != NULL); 1691 1692 /* Use ddi_dma_mem_free() to free up sys memory for mailbox */ 1693 ddi_dma_mem_free(&hdl->hr_acchdl); 1694 1695 /* Free the DMA handle for the mailbox */ 1696 ddi_dma_free_handle(&hdl->hr_dmahdl); 1697 } 1698 1699 1700 /* 1701 * hermon_rsrc_hw_entry_alloc() 1702 * Context: Can be called from interrupt or base context. 1703 */ 1704 static int 1705 hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num, 1706 uint_t num_align, ddi_acc_handle_t acc_handle, uint_t sleepflag, 1707 hermon_rsrc_t *hdl) 1708 { 1709 void *addr; 1710 uint64_t offset; 1711 uint32_t align; 1712 int status; 1713 int flag; 1714 1715 ASSERT(pool_info != NULL); 1716 ASSERT(hdl != NULL); 1717 1718 /* 1719 * Hermon hardware entries (QPC, CQC, EQC, MPT, etc.) do not 1720 * generally use the acc_handle (because the entries are not 1721 * directly accessed by software). The exception to this rule 1722 * are the MTT entries. 1723 */ 1724 1725 /* 1726 * Use vmem_xalloc() to get a properly aligned pointer (based on 1727 * the number requested) to the HW entry(ies). This handles the 1728 * cases (for special QPCs and for RDB entries) where we need more 1729 * than one and need to ensure that they are properly aligned. 1730 */ 1731 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP; 1732 hdl->hr_len = (num * pool_info->rsrc_quantum); 1733 align = (num_align * pool_info->rsrc_quantum); 1734 1735 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len, 1736 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT); 1737 1738 if (addr == NULL) { 1739 /* No more HW entries available */ 1740 return (DDI_FAILURE); 1741 } 1742 1743 hdl->hr_acchdl = acc_handle; 1744 1745 /* Calculate vaddr and HW table index */ 1746 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start; 1747 hdl->hr_addr = addr; 1748 hdl->hr_indx = offset >> pool_info->rsrc_shift; 1749 1750 if (pool_info->rsrc_loc == HERMON_IN_ICM) { 1751 /* confirm ICM is mapped, and allocate if necessary */ 1752 status = hermon_rsrc_hw_entry_icm_confirm(pool_info, num, hdl); 1753 if (status != DDI_SUCCESS) { 1754 return (DDI_FAILURE); 1755 } 1756 hdl->hr_addr = NULL; 1757 } 1758 1759 return (DDI_SUCCESS); 1760 } 1761 1762 1763 1764 /* 1765 * hermon_rsrc_hw_entry_free() 1766 * Context: Can be called from interrupt or base context. 1767 */ 1768 static void 1769 hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info, 1770 hermon_rsrc_t *hdl) 1771 { 1772 void *addr; 1773 uint64_t offset; 1774 int status; 1775 1776 ASSERT(pool_info != NULL); 1777 ASSERT(hdl != NULL); 1778 1779 /* Calculate the allocated address */ 1780 offset = hdl->hr_indx << pool_info->rsrc_shift; 1781 addr = (void *)(uintptr_t)(offset + (uintptr_t)pool_info->rsrc_start); 1782 1783 /* Use vmem_xfree() to free up the HW table entry */ 1784 vmem_xfree(pool_info->rsrc_vmp, addr, hdl->hr_len); 1785 1786 if (pool_info->rsrc_loc == HERMON_IN_ICM) { 1787 /* free ICM references, and free ICM if required */ 1788 status = hermon_rsrc_hw_entry_icm_free(pool_info, hdl); 1789 if (status != DDI_SUCCESS) 1790 HERMON_WARNING(pool_info->rsrc_state, 1791 "failure in hw_entry_free"); 1792 } 1793 } 1794 1795 /* 1796 * hermon_rsrc_hw_entry_icm_confirm() 1797 * Context: Can be called from interrupt or base context. 1798 */ 1799 static int 1800 hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, uint_t num, 1801 hermon_rsrc_t *hdl) 1802 { 1803 hermon_state_t *state; 1804 hermon_icm_table_t *icm_table; 1805 uint8_t *bitmap; 1806 hermon_dma_info_t *dma_info; 1807 hermon_rsrc_type_t type; 1808 uint32_t rindx, span_offset; 1809 uint32_t span_avail; 1810 int num_backed; 1811 int status; 1812 uint32_t index1, index2; 1813 1814 /* 1815 * Utility routine responsible for ensuring that there is memory 1816 * backing the ICM resources allocated via hermon_rsrc_hw_entry_alloc(). 1817 * Confirm existing ICM mapping(s) or allocate ICM memory for the 1818 * given hardware resources being allocated, and increment the 1819 * ICM DMA structure(s) reference count. 1820 * 1821 * We may be allocating more objects than can fit in a single span, 1822 * or more than will fit in the remaining contiguous memory (from 1823 * the offset indicated by hdl->ar_indx) in the span in question. 1824 * In either of these cases, we'll be breaking up our allocation 1825 * into multiple spans. 1826 */ 1827 state = pool_info->rsrc_state; 1828 type = pool_info->rsrc_type; 1829 icm_table = &state->hs_icm[type]; 1830 1831 rindx = hdl->hr_indx; 1832 hermon_index(index1, index2, rindx, icm_table, span_offset); 1833 1834 if (hermon_rsrc_verbose) { 1835 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_confirm: " 1836 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x): ", 1837 type, num, hdl->hr_len, index1, index2); 1838 } 1839 1840 mutex_enter(&icm_table->icm_table_lock); 1841 hermon_bitmap(bitmap, dma_info, icm_table, index1); 1842 while (num) { 1843 #ifndef __lock_lint 1844 while (icm_table->icm_busy) { 1845 cv_wait(&icm_table->icm_table_cv, 1846 &icm_table->icm_table_lock); 1847 } 1848 #endif 1849 if (!HERMON_BMAP_BIT_ISSET(bitmap, index2)) { 1850 /* Allocate ICM for this span */ 1851 icm_table->icm_busy = 1; 1852 mutex_exit(&icm_table->icm_table_lock); 1853 status = hermon_icm_alloc(state, type, index1, index2); 1854 mutex_enter(&icm_table->icm_table_lock); 1855 icm_table->icm_busy = 0; 1856 cv_broadcast(&icm_table->icm_table_cv); 1857 if (status != DDI_SUCCESS) { 1858 goto fail_alloc; 1859 } 1860 if (hermon_rsrc_verbose) { 1861 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_" 1862 "hw_entry_icm_confirm: ALLOCATED ICM: " 1863 "type (0x%x) index (0x%x, 0x%x)", 1864 type, index1, index2); 1865 } 1866 } 1867 1868 /* 1869 * Mellanox FMR accesses the MPT directly. We set the 1870 * access handle here only for this case 1871 */ 1872 if (type == HERMON_DMPT) 1873 hdl->hr_acchdl = dma_info[index2].acc_hdl; 1874 1875 /* 1876 * We need to increment the refcnt of this span by the 1877 * number of objects in this resource allocation that are 1878 * backed by this span. Given that the rsrc allocation is 1879 * contiguous, this value will be the number of objects in 1880 * the span from 'span_offset' onward, either up to a max 1881 * of the total number of objects, or the end of the span. 1882 * So, determine the number of objects that can be backed 1883 * by this span ('span_avail'), then determine the number 1884 * of backed resources. 1885 */ 1886 span_avail = icm_table->span - span_offset; 1887 if (num > span_avail) { 1888 num_backed = span_avail; 1889 } else { 1890 num_backed = num; 1891 } 1892 1893 /* 1894 * Now that we know 'num_backed', increment the refcnt, 1895 * decrement the total number, and set 'span_offset' to 1896 * 0 in case we roll over into the next span. 1897 */ 1898 dma_info[index2].icm_refcnt += num_backed; 1899 rindx += num_backed; 1900 num -= num_backed; 1901 1902 if (hermon_rsrc_verbose) { 1903 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) index " 1904 "(0x%x, 0x%x) num_backed (0x%x)", 1905 type, index1, index2, num_backed); 1906 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) refcnt now " 1907 "(0x%x) num_remaining (0x%x)", type, 1908 dma_info[index2].icm_refcnt, num); 1909 } 1910 if (num == 0) 1911 break; 1912 1913 hermon_index(index1, index2, rindx, icm_table, span_offset); 1914 hermon_bitmap(bitmap, dma_info, icm_table, index1); 1915 } 1916 mutex_exit(&icm_table->icm_table_lock); 1917 1918 return (DDI_SUCCESS); 1919 1920 fail_alloc: 1921 /* JBDB */ 1922 if (hermon_rsrc_verbose) { 1923 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_" 1924 "hw_entry_icm_confirm: FAILED ICM ALLOC: " 1925 "type (0x%x) num remaind (0x%x) index (0x%x, 0x%x)" 1926 "refcnt (0x%x)", type, num, index1, index2, 1927 icm_table->icm_dma[index1][index2].icm_refcnt); 1928 } 1929 IBTF_DPRINTF_L2("hermon", "WARNING: " 1930 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n"); 1931 1932 #if needs_work 1933 /* free refcnt's and any spans we've allocated */ 1934 while (index-- != start) { 1935 /* 1936 * JBDB - This is a bit tricky. We need to 1937 * free refcnt's on any spans that we've 1938 * incremented them on, and completely free 1939 * spans that we've allocated. How do we do 1940 * this here? Does it need to be as involved 1941 * as the core of icm_free() below, or can 1942 * we leverage breadcrumbs somehow? 1943 */ 1944 HERMON_WARNING(state, "unable to allocate ICM memory: " 1945 "UNIMPLEMENTED HANDLING!!"); 1946 } 1947 #else 1948 cmn_err(CE_WARN, 1949 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n"); 1950 #endif 1951 mutex_exit(&icm_table->icm_table_lock); 1952 1953 HERMON_WARNING(state, "unable to allocate ICM memory"); 1954 return (DDI_FAILURE); 1955 } 1956 1957 /* 1958 * hermon_rsrc_hw_entry_icm_free() 1959 * Context: Can be called from interrupt or base context. 1960 */ 1961 static int 1962 hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info, 1963 hermon_rsrc_t *hdl) 1964 { 1965 hermon_state_t *state; 1966 hermon_icm_table_t *icm_table; 1967 uint8_t *bitmap; 1968 hermon_dma_info_t *dma_info; 1969 hermon_rsrc_type_t type; 1970 uint32_t span_offset; 1971 uint32_t span_remain; 1972 int num_freed; 1973 int num; 1974 uint32_t index1, index2, rindx; 1975 1976 /* 1977 * Utility routine responsible for freeing references to ICM 1978 * DMA spans, and freeing the ICM memory if necessary. 1979 * 1980 * We may have allocated objects in a single contiguous resource 1981 * allocation that reside in a number of spans, at any given 1982 * starting offset within a span. We therefore must determine 1983 * where this allocation starts, and then determine if we need 1984 * to free objects in more than one span. 1985 */ 1986 state = pool_info->rsrc_state; 1987 type = pool_info->rsrc_type; 1988 icm_table = &state->hs_icm[type]; 1989 1990 rindx = hdl->hr_indx; 1991 hermon_index(index1, index2, rindx, icm_table, span_offset); 1992 hermon_bitmap(bitmap, dma_info, icm_table, index1); 1993 1994 /* determine the number of ICM objects in this allocation */ 1995 num = hdl->hr_len >> pool_info->rsrc_shift; 1996 1997 if (hermon_rsrc_verbose) { 1998 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_free: " 1999 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x)", 2000 type, num, hdl->hr_len, index1, index2); 2001 } 2002 mutex_enter(&icm_table->icm_table_lock); 2003 while (num) { 2004 /* 2005 * As with the ICM confirm code above, we need to 2006 * decrement the ICM span(s) by the number of 2007 * resources being freed. So, determine the number 2008 * of objects that are backed in this span from 2009 * 'span_offset' onward, and set 'num_freed' to 2010 * the smaller of either that number ('span_remain'), 2011 * or the total number of objects being freed. 2012 */ 2013 span_remain = icm_table->span - span_offset; 2014 if (num > span_remain) { 2015 num_freed = span_remain; 2016 } else { 2017 num_freed = num; 2018 } 2019 2020 /* 2021 * Now that we know 'num_freed', decrement the refcnt, 2022 * decrement the total number, and set 'span_offset' to 2023 * 0 in case we roll over into the next span. 2024 */ 2025 dma_info[index2].icm_refcnt -= num_freed; 2026 num -= num_freed; 2027 rindx += num_freed; 2028 2029 if (hermon_rsrc_verbose) { 2030 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) index " 2031 "(0x%x, 0x%x) num_freed (0x%x)", type, 2032 index1, index2, num_freed); 2033 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) refcnt now " 2034 "(0x%x) num remaining (0x%x)", type, 2035 icm_table->icm_dma[index1][index2].icm_refcnt, num); 2036 } 2037 2038 #if HERMON_ICM_FREE_ENABLED 2039 /* If we've freed the last object in this span, free it */ 2040 if ((index1 != 0 || index2 != 0) && 2041 (dma_info[index2].icm_refcnt == 0)) { 2042 if (hermon_rsrc_verbose) { 2043 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry" 2044 "_icm_free: freeing ICM type (0x%x) index" 2045 " (0x%x, 0x%x)", type, index1, index2); 2046 } 2047 hermon_icm_free(state, type, index1, index2); 2048 } 2049 #endif 2050 if (num == 0) 2051 break; 2052 2053 hermon_index(index1, index2, rindx, icm_table, span_offset); 2054 hermon_bitmap(bitmap, dma_info, icm_table, index1); 2055 } 2056 mutex_exit(&icm_table->icm_table_lock); 2057 2058 return (DDI_SUCCESS); 2059 } 2060 2061 2062 2063 /* 2064 * hermon_rsrc_swhdl_alloc() 2065 * Context: Can be called from interrupt or base context. 2066 */ 2067 static int 2068 hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag, 2069 hermon_rsrc_t *hdl) 2070 { 2071 void *addr; 2072 int flag; 2073 2074 ASSERT(pool_info != NULL); 2075 ASSERT(hdl != NULL); 2076 2077 /* Allocate the software handle structure */ 2078 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 2079 addr = kmem_cache_alloc(pool_info->rsrc_private, flag); 2080 if (addr == NULL) { 2081 return (DDI_FAILURE); 2082 } 2083 hdl->hr_len = pool_info->rsrc_quantum; 2084 hdl->hr_addr = addr; 2085 2086 return (DDI_SUCCESS); 2087 } 2088 2089 2090 /* 2091 * hermon_rsrc_swhdl_free() 2092 * Context: Can be called from interrupt or base context. 2093 */ 2094 static void 2095 hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl) 2096 { 2097 ASSERT(pool_info != NULL); 2098 ASSERT(hdl != NULL); 2099 2100 /* Free the software handle structure */ 2101 kmem_cache_free(pool_info->rsrc_private, hdl->hr_addr); 2102 } 2103 2104 2105 /* 2106 * hermon_rsrc_pdhdl_alloc() 2107 * Context: Can be called from interrupt or base context. 2108 */ 2109 static int 2110 hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag, 2111 hermon_rsrc_t *hdl) 2112 { 2113 hermon_pdhdl_t addr; 2114 void *tmpaddr; 2115 int flag, status; 2116 2117 ASSERT(pool_info != NULL); 2118 ASSERT(hdl != NULL); 2119 2120 /* Allocate the software handle */ 2121 status = hermon_rsrc_swhdl_alloc(pool_info, sleepflag, hdl); 2122 if (status != DDI_SUCCESS) { 2123 return (DDI_FAILURE); 2124 } 2125 addr = (hermon_pdhdl_t)hdl->hr_addr; 2126 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*addr)) 2127 2128 /* Allocate a PD number for the handle */ 2129 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP; 2130 tmpaddr = vmem_alloc(pool_info->rsrc_vmp, 1, flag); 2131 if (tmpaddr == NULL) { 2132 /* No more PD number entries available */ 2133 hermon_rsrc_swhdl_free(pool_info, hdl); 2134 return (DDI_FAILURE); 2135 } 2136 addr->pd_pdnum = (uint32_t)(uintptr_t)tmpaddr; 2137 addr->pd_rsrcp = hdl; 2138 hdl->hr_indx = addr->pd_pdnum; 2139 2140 return (DDI_SUCCESS); 2141 } 2142 2143 2144 /* 2145 * hermon_rsrc_pdhdl_free() 2146 * Context: Can be called from interrupt or base context. 2147 */ 2148 static void 2149 hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl) 2150 { 2151 ASSERT(pool_info != NULL); 2152 ASSERT(hdl != NULL); 2153 2154 /* Use vmem_free() to free up the PD number */ 2155 vmem_free(pool_info->rsrc_vmp, (void *)(uintptr_t)hdl->hr_indx, 1); 2156 2157 /* Free the software handle structure */ 2158 hermon_rsrc_swhdl_free(pool_info, hdl); 2159 } 2160 2161 2162 /* 2163 * hermon_rsrc_pdhdl_constructor() 2164 * Context: Can be called from interrupt or base context. 2165 */ 2166 /* ARGSUSED */ 2167 static int 2168 hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags) 2169 { 2170 hermon_pdhdl_t pdhdl; 2171 hermon_state_t *state; 2172 2173 pdhdl = (hermon_pdhdl_t)pd; 2174 state = (hermon_state_t *)priv; 2175 2176 mutex_init(&pdhdl->pd_lock, NULL, MUTEX_DRIVER, 2177 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2178 2179 return (DDI_SUCCESS); 2180 } 2181 2182 2183 /* 2184 * hermon_rsrc_pdhdl_destructor() 2185 * Context: Can be called from interrupt or base context. 2186 */ 2187 /* ARGSUSED */ 2188 static void 2189 hermon_rsrc_pdhdl_destructor(void *pd, void *priv) 2190 { 2191 hermon_pdhdl_t pdhdl; 2192 2193 pdhdl = (hermon_pdhdl_t)pd; 2194 2195 mutex_destroy(&pdhdl->pd_lock); 2196 } 2197 2198 2199 /* 2200 * hermon_rsrc_cqhdl_constructor() 2201 * Context: Can be called from interrupt or base context. 2202 */ 2203 /* ARGSUSED */ 2204 static int 2205 hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags) 2206 { 2207 hermon_cqhdl_t cqhdl; 2208 hermon_state_t *state; 2209 2210 cqhdl = (hermon_cqhdl_t)cq; 2211 state = (hermon_state_t *)priv; 2212 2213 mutex_init(&cqhdl->cq_lock, NULL, MUTEX_DRIVER, 2214 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2215 2216 return (DDI_SUCCESS); 2217 } 2218 2219 2220 /* 2221 * hermon_rsrc_cqhdl_destructor() 2222 * Context: Can be called from interrupt or base context. 2223 */ 2224 /* ARGSUSED */ 2225 static void 2226 hermon_rsrc_cqhdl_destructor(void *cq, void *priv) 2227 { 2228 hermon_cqhdl_t cqhdl; 2229 2230 cqhdl = (hermon_cqhdl_t)cq; 2231 2232 mutex_destroy(&cqhdl->cq_lock); 2233 } 2234 2235 2236 /* 2237 * hermon_rsrc_qphdl_constructor() 2238 * Context: Can be called from interrupt or base context. 2239 */ 2240 /* ARGSUSED */ 2241 static int 2242 hermon_rsrc_qphdl_constructor(void *qp, void *priv, int flags) 2243 { 2244 hermon_qphdl_t qphdl; 2245 hermon_state_t *state; 2246 2247 qphdl = (hermon_qphdl_t)qp; 2248 state = (hermon_state_t *)priv; 2249 2250 mutex_init(&qphdl->qp_lock, NULL, MUTEX_DRIVER, 2251 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2252 2253 return (DDI_SUCCESS); 2254 } 2255 2256 2257 /* 2258 * hermon_rsrc_qphdl_destructor() 2259 * Context: Can be called from interrupt or base context. 2260 */ 2261 /* ARGSUSED */ 2262 static void 2263 hermon_rsrc_qphdl_destructor(void *qp, void *priv) 2264 { 2265 hermon_qphdl_t qphdl; 2266 2267 qphdl = (hermon_qphdl_t)qp; 2268 2269 mutex_destroy(&qphdl->qp_lock); 2270 } 2271 2272 2273 /* 2274 * hermon_rsrc_srqhdl_constructor() 2275 * Context: Can be called from interrupt or base context. 2276 */ 2277 /* ARGSUSED */ 2278 static int 2279 hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags) 2280 { 2281 hermon_srqhdl_t srqhdl; 2282 hermon_state_t *state; 2283 2284 srqhdl = (hermon_srqhdl_t)srq; 2285 state = (hermon_state_t *)priv; 2286 2287 mutex_init(&srqhdl->srq_lock, NULL, MUTEX_DRIVER, 2288 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2289 2290 return (DDI_SUCCESS); 2291 } 2292 2293 2294 /* 2295 * hermon_rsrc_srqhdl_destructor() 2296 * Context: Can be called from interrupt or base context. 2297 */ 2298 /* ARGSUSED */ 2299 static void 2300 hermon_rsrc_srqhdl_destructor(void *srq, void *priv) 2301 { 2302 hermon_srqhdl_t srqhdl; 2303 2304 srqhdl = (hermon_srqhdl_t)srq; 2305 2306 mutex_destroy(&srqhdl->srq_lock); 2307 } 2308 2309 2310 /* 2311 * hermon_rsrc_refcnt_constructor() 2312 * Context: Can be called from interrupt or base context. 2313 */ 2314 /* ARGSUSED */ 2315 static int 2316 hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags) 2317 { 2318 hermon_sw_refcnt_t *refcnt; 2319 hermon_state_t *state; 2320 2321 refcnt = (hermon_sw_refcnt_t *)rc; 2322 state = (hermon_state_t *)priv; 2323 2324 mutex_init(&refcnt->swrc_lock, NULL, MUTEX_DRIVER, 2325 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2326 2327 return (DDI_SUCCESS); 2328 } 2329 2330 2331 /* 2332 * hermon_rsrc_refcnt_destructor() 2333 * Context: Can be called from interrupt or base context. 2334 */ 2335 /* ARGSUSED */ 2336 static void 2337 hermon_rsrc_refcnt_destructor(void *rc, void *priv) 2338 { 2339 hermon_sw_refcnt_t *refcnt; 2340 2341 refcnt = (hermon_sw_refcnt_t *)rc; 2342 2343 mutex_destroy(&refcnt->swrc_lock); 2344 } 2345 2346 2347 /* 2348 * hermon_rsrc_ahhdl_constructor() 2349 * Context: Can be called from interrupt or base context. 2350 */ 2351 /* ARGSUSED */ 2352 static int 2353 hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags) 2354 { 2355 hermon_ahhdl_t ahhdl; 2356 hermon_state_t *state; 2357 2358 ahhdl = (hermon_ahhdl_t)ah; 2359 state = (hermon_state_t *)priv; 2360 2361 mutex_init(&ahhdl->ah_lock, NULL, MUTEX_DRIVER, 2362 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2363 return (DDI_SUCCESS); 2364 } 2365 2366 2367 /* 2368 * hermon_rsrc_ahhdl_destructor() 2369 * Context: Can be called from interrupt or base context. 2370 */ 2371 /* ARGSUSED */ 2372 static void 2373 hermon_rsrc_ahhdl_destructor(void *ah, void *priv) 2374 { 2375 hermon_ahhdl_t ahhdl; 2376 2377 ahhdl = (hermon_ahhdl_t)ah; 2378 2379 mutex_destroy(&ahhdl->ah_lock); 2380 } 2381 2382 2383 /* 2384 * hermon_rsrc_mrhdl_constructor() 2385 * Context: Can be called from interrupt or base context. 2386 */ 2387 /* ARGSUSED */ 2388 static int 2389 hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags) 2390 { 2391 hermon_mrhdl_t mrhdl; 2392 hermon_state_t *state; 2393 2394 mrhdl = (hermon_mrhdl_t)mr; 2395 state = (hermon_state_t *)priv; 2396 2397 mutex_init(&mrhdl->mr_lock, NULL, MUTEX_DRIVER, 2398 DDI_INTR_PRI(state->hs_intrmsi_pri)); 2399 2400 return (DDI_SUCCESS); 2401 } 2402 2403 2404 /* 2405 * hermon_rsrc_mrhdl_destructor() 2406 * Context: Can be called from interrupt or base context. 2407 */ 2408 /* ARGSUSED */ 2409 static void 2410 hermon_rsrc_mrhdl_destructor(void *mr, void *priv) 2411 { 2412 hermon_mrhdl_t mrhdl; 2413 2414 mrhdl = (hermon_mrhdl_t)mr; 2415 2416 mutex_destroy(&mrhdl->mr_lock); 2417 } 2418 2419 2420 /* 2421 * hermon_rsrc_mcg_entry_get_size() 2422 */ 2423 static int 2424 hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, uint_t *mcg_size_shift) 2425 { 2426 uint_t num_qp_per_mcg, max_qp_per_mcg, log2; 2427 2428 /* 2429 * Round the configured number of QP per MCG to next larger 2430 * power-of-2 size and update. 2431 */ 2432 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg + 8; 2433 log2 = highbit(num_qp_per_mcg); 2434 if ((num_qp_per_mcg & (num_qp_per_mcg - 1)) == 0) { 2435 log2 = log2 - 1; 2436 } 2437 state->hs_cfg_profile->cp_num_qp_per_mcg = (1 << log2) - 8; 2438 2439 /* Now make sure number of QP per MCG makes sense */ 2440 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg; 2441 max_qp_per_mcg = (1 << state->hs_devlim.log_max_qp_mcg); 2442 if (num_qp_per_mcg > max_qp_per_mcg) { 2443 return (DDI_FAILURE); 2444 } 2445 2446 /* Return the (shift) size of an individual MCG HW entry */ 2447 *mcg_size_shift = log2 + 2; 2448 2449 return (DDI_SUCCESS); 2450 } 2451