1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * tavor_ci.c 29 * Tavor Channel Interface (CI) Routines 30 * 31 * Implements all the routines necessary to interface with the IBTF. 32 * Pointers to all of these functions are passed to the IBTF at attach() 33 * time in the ibc_operations_t structure. These functions include all 34 * of the necessary routines to implement the required InfiniBand "verbs" 35 * and additional IBTF-specific interfaces. 36 */ 37 38 #include <sys/types.h> 39 #include <sys/conf.h> 40 #include <sys/ddi.h> 41 #include <sys/sunddi.h> 42 43 #include <sys/ib/adapters/tavor/tavor.h> 44 45 /* HCA and port related operations */ 46 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t, 47 ibt_hca_portinfo_t *); 48 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t, 49 ibt_port_modify_flags_t, uint8_t); 50 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t); 51 52 /* Protection Domains */ 53 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t, 54 ibc_pd_hdl_t *); 55 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t); 56 57 /* Reliable Datagram Domains */ 58 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t, 59 ibc_rdd_hdl_t *); 60 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t); 61 62 /* Address Handles */ 63 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t, 64 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *); 65 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t); 66 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t, 67 ibc_pd_hdl_t *, ibt_adds_vect_t *); 68 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t, 69 ibt_adds_vect_t *); 70 71 /* Queue Pairs */ 72 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t, 73 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *, 74 ibc_qp_hdl_t *); 75 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t, 76 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *, 77 ibt_chan_sizes_t *, ibc_qp_hdl_t *); 78 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t, 79 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, 80 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *); 81 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 82 ibc_free_qp_flags_t, ibc_qpn_hdl_t *); 83 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t); 84 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 85 ibt_qp_query_attr_t *); 86 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 87 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *); 88 89 /* Completion Queues */ 90 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t, 91 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *); 92 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t); 93 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *, 94 uint_t *, uint_t *, ibt_cq_handler_id_t *); 95 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 96 uint_t, uint_t *); 97 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 98 uint_t, uint_t, ibt_cq_handler_id_t); 99 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t, ibt_cq_sched_flags_t, 100 ibc_cq_handler_attr_t *); 101 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibt_cq_handler_id_t); 102 103 /* EE Contexts */ 104 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t, 105 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *); 106 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t); 107 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t, 108 ibt_eec_query_attr_t *); 109 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t, 110 ibt_cep_modify_flags_t, ibt_eec_info_t *); 111 112 /* Memory Registration */ 113 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t, 114 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *); 115 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t, 116 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *); 117 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t, 118 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *, 119 ibc_mr_hdl_t *, ibt_mr_desc_t *); 120 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t); 121 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t, 122 ibt_mr_query_attr_t *); 123 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t, 124 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, 125 ibt_mr_desc_t *); 126 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t, 127 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *, 128 ibt_mr_desc_t *); 129 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t); 130 131 /* Memory Windows */ 132 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t, 133 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *); 134 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t); 135 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t, 136 ibt_mw_query_attr_t *); 137 138 /* Multicast Groups */ 139 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t, 140 ib_gid_t, ib_lid_t); 141 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t, 142 ib_gid_t, ib_lid_t); 143 144 /* Work Request and Completion Processing */ 145 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t, 146 ibt_send_wr_t *, uint_t, uint_t *); 147 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t, 148 ibt_recv_wr_t *, uint_t, uint_t *); 149 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 150 ibt_wc_t *, uint_t, uint_t *); 151 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 152 ibt_cq_notify_flags_t); 153 154 /* CI Object Private Data */ 155 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t, 156 ibt_object_type_t, void *, void *, size_t); 157 158 /* CI Object Private Data */ 159 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t, 160 ibt_object_type_t, void *, void *, size_t); 161 162 /* Shared Receive Queues */ 163 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t, 164 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *, 165 ibt_srq_sizes_t *); 166 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t); 167 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 168 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *); 169 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 170 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *); 171 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 172 ibt_recv_wr_t *, uint_t, uint_t *); 173 174 /* Address translation */ 175 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *, 176 void *, uint_t, ibt_phys_buf_t *, uint_t *, size_t *, ib_memlen_t *, 177 ibc_ma_hdl_t *); 178 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t); 179 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *, 180 ibt_all_wr_t *, ibc_mi_hdl_t *); 181 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t); 182 183 /* Allocate L_Key */ 184 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t, 185 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *); 186 187 /* Physical Register Memory Region */ 188 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t, 189 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *); 190 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t, 191 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, 192 ibt_pmr_desc_t *); 193 194 /* Mellanox FMR */ 195 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 196 ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool); 197 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, 198 ibc_fmr_pool_hdl_t fmr_pool); 199 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, 200 ibc_fmr_pool_hdl_t fmr_pool); 201 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca, 202 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr, 203 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p); 204 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, 205 ibc_mr_hdl_t mr); 206 207 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t, 208 ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *); 209 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t); 210 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t, 211 caddr_t *, tavor_mem_alloc_hdl_t *); 212 213 214 /* 215 * This ibc_operations_t structure includes pointers to all the entry points 216 * provided by the Tavor driver. This structure is passed to the IBTF at 217 * driver attach time, using the ibc_attach() call. 218 */ 219 ibc_operations_t tavor_ibc_ops = { 220 /* HCA and port related operations */ 221 tavor_ci_query_hca_ports, 222 tavor_ci_modify_ports, 223 tavor_ci_modify_system_image, 224 225 /* Protection Domains */ 226 tavor_ci_alloc_pd, 227 tavor_ci_free_pd, 228 229 /* Reliable Datagram Domains */ 230 tavor_ci_alloc_rdd, 231 tavor_ci_free_rdd, 232 233 /* Address Handles */ 234 tavor_ci_alloc_ah, 235 tavor_ci_free_ah, 236 tavor_ci_query_ah, 237 tavor_ci_modify_ah, 238 239 /* Queue Pairs */ 240 tavor_ci_alloc_qp, 241 tavor_ci_alloc_special_qp, 242 tavor_ci_alloc_qp_range, 243 tavor_ci_free_qp, 244 tavor_ci_release_qpn, 245 tavor_ci_query_qp, 246 tavor_ci_modify_qp, 247 248 /* Completion Queues */ 249 tavor_ci_alloc_cq, 250 tavor_ci_free_cq, 251 tavor_ci_query_cq, 252 tavor_ci_resize_cq, 253 tavor_ci_modify_cq, 254 tavor_ci_alloc_cq_sched, 255 tavor_ci_free_cq_sched, 256 257 /* EE Contexts */ 258 tavor_ci_alloc_eec, 259 tavor_ci_free_eec, 260 tavor_ci_query_eec, 261 tavor_ci_modify_eec, 262 263 /* Memory Registration */ 264 tavor_ci_register_mr, 265 tavor_ci_register_buf, 266 tavor_ci_register_shared_mr, 267 tavor_ci_deregister_mr, 268 tavor_ci_query_mr, 269 tavor_ci_reregister_mr, 270 tavor_ci_reregister_buf, 271 tavor_ci_sync_mr, 272 273 /* Memory Windows */ 274 tavor_ci_alloc_mw, 275 tavor_ci_free_mw, 276 tavor_ci_query_mw, 277 278 /* Multicast Groups */ 279 tavor_ci_attach_mcg, 280 tavor_ci_detach_mcg, 281 282 /* Work Request and Completion Processing */ 283 tavor_ci_post_send, 284 tavor_ci_post_recv, 285 tavor_ci_poll_cq, 286 tavor_ci_notify_cq, 287 288 /* CI Object Mapping Data */ 289 tavor_ci_ci_data_in, 290 tavor_ci_ci_data_out, 291 292 /* Shared Receive Queue */ 293 tavor_ci_alloc_srq, 294 tavor_ci_free_srq, 295 tavor_ci_query_srq, 296 tavor_ci_modify_srq, 297 tavor_ci_post_srq, 298 299 /* Address translation */ 300 tavor_ci_map_mem_area, 301 tavor_ci_unmap_mem_area, 302 tavor_ci_map_mem_iov, 303 tavor_ci_unmap_mem_iov, 304 305 /* Allocate L_key */ 306 tavor_ci_alloc_lkey, 307 308 /* Physical Register Memory Region */ 309 tavor_ci_register_physical_mr, 310 tavor_ci_reregister_physical_mr, 311 312 /* Mellanox FMR */ 313 tavor_ci_create_fmr_pool, 314 tavor_ci_destroy_fmr_pool, 315 tavor_ci_flush_fmr_pool, 316 tavor_ci_register_physical_fmr, 317 tavor_ci_deregister_fmr, 318 319 /* dmable memory */ 320 tavor_ci_alloc_io_mem, 321 tavor_ci_free_io_mem 322 }; 323 324 325 /* 326 * tavor_ci_query_hca_ports() 327 * Returns HCA port attributes for either one or all of the HCA's ports. 328 * Context: Can be called only from user or kernel context. 329 */ 330 static ibt_status_t 331 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port, 332 ibt_hca_portinfo_t *info_p) 333 { 334 tavor_state_t *state; 335 uint_t start, end, port; 336 int status, indx; 337 338 TAVOR_TNF_ENTER(tavor_ci_query_hca_ports); 339 340 /* Check for valid HCA handle */ 341 if (hca == NULL) { 342 TNF_PROBE_0(tavor_ci_query_hca_ports_invhca_fail, 343 TAVOR_TNF_ERROR, ""); 344 TAVOR_TNF_EXIT(tavor_ci_query_port); 345 return (IBT_HCA_HDL_INVALID); 346 } 347 348 /* Grab the Tavor softstate pointer */ 349 state = (tavor_state_t *)hca; 350 351 /* 352 * If the specified port is zero, then we are supposed to query all 353 * ports. Otherwise, we query only the port number specified. 354 * Setup the start and end port numbers as appropriate for the loop 355 * below. Note: The first Tavor port is port number one (1). 356 */ 357 if (query_port == 0) { 358 start = 1; 359 end = start + (state->ts_cfg_profile->cp_num_ports - 1); 360 } else { 361 end = start = query_port; 362 } 363 364 /* Query the port(s) */ 365 for (port = start, indx = 0; port <= end; port++, indx++) { 366 status = tavor_port_query(state, port, &info_p[indx]); 367 if (status != DDI_SUCCESS) { 368 TNF_PROBE_1(tavor_port_query_fail, TAVOR_TNF_ERROR, 369 "", tnf_uint, status, status); 370 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports); 371 return (status); 372 } 373 } 374 375 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports); 376 return (IBT_SUCCESS); 377 } 378 379 380 /* 381 * tavor_ci_modify_ports() 382 * Modify HCA port attributes 383 * Context: Can be called only from user or kernel context. 384 */ 385 static ibt_status_t 386 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port, 387 ibt_port_modify_flags_t flags, uint8_t init_type) 388 { 389 tavor_state_t *state; 390 int status; 391 392 TAVOR_TNF_ENTER(tavor_ci_modify_ports); 393 394 /* Check for valid HCA handle */ 395 if (hca == NULL) { 396 TNF_PROBE_0(tavor_ci_modify_ports_invhca_fail, 397 TAVOR_TNF_ERROR, ""); 398 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 399 return (IBT_HCA_HDL_INVALID); 400 } 401 402 /* Grab the Tavor softstate pointer */ 403 state = (tavor_state_t *)hca; 404 405 /* Modify the port(s) */ 406 status = tavor_port_modify(state, port, flags, init_type); 407 if (status != DDI_SUCCESS) { 408 TNF_PROBE_1(tavor_ci_modify_ports_fail, 409 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 410 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 411 return (status); 412 } 413 414 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 415 return (IBT_SUCCESS); 416 } 417 418 /* 419 * tavor_ci_modify_system_image() 420 * Modify the System Image GUID 421 * Context: Can be called only from user or kernel context. 422 */ 423 /* ARGSUSED */ 424 static ibt_status_t 425 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid) 426 { 427 TAVOR_TNF_ENTER(tavor_ci_modify_system_image); 428 429 /* 430 * This is an unsupported interface for the Tavor driver. This 431 * interface is necessary to support modification of the System 432 * Image GUID. Tavor is only capable of modifying this parameter 433 * once (during driver initialization). 434 */ 435 436 TAVOR_TNF_EXIT(tavor_ci_modify_system_image); 437 return (IBT_NOT_SUPPORTED); 438 } 439 440 /* 441 * tavor_ci_alloc_pd() 442 * Allocate a Protection Domain 443 * Context: Can be called only from user or kernel context. 444 */ 445 /* ARGSUSED */ 446 static ibt_status_t 447 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p) 448 { 449 tavor_state_t *state; 450 tavor_pdhdl_t pdhdl; 451 int status; 452 453 TAVOR_TNF_ENTER(tavor_ci_alloc_pd); 454 455 ASSERT(pd_p != NULL); 456 457 /* Check for valid HCA handle */ 458 if (hca == NULL) { 459 TNF_PROBE_0(tavor_ci_alloc_pd_invhca_fail, 460 TAVOR_TNF_ERROR, ""); 461 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 462 return (IBT_HCA_HDL_INVALID); 463 } 464 465 /* Grab the Tavor softstate pointer */ 466 state = (tavor_state_t *)hca; 467 468 /* Allocate the PD */ 469 status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP); 470 if (status != DDI_SUCCESS) { 471 TNF_PROBE_1(tavor_ci_alloc_pd_fail, TAVOR_TNF_ERROR, "", 472 tnf_uint, status, status); 473 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 474 return (status); 475 } 476 477 /* Return the Tavor PD handle */ 478 *pd_p = (ibc_pd_hdl_t)pdhdl; 479 480 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 481 return (IBT_SUCCESS); 482 } 483 484 485 /* 486 * tavor_ci_free_pd() 487 * Free a Protection Domain 488 * Context: Can be called only from user or kernel context 489 */ 490 static ibt_status_t 491 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd) 492 { 493 tavor_state_t *state; 494 tavor_pdhdl_t pdhdl; 495 int status; 496 497 TAVOR_TNF_ENTER(tavor_ci_free_pd); 498 499 /* Check for valid HCA handle */ 500 if (hca == NULL) { 501 TNF_PROBE_0(tavor_ci_free_pd_invhca_fail, 502 TAVOR_TNF_ERROR, ""); 503 TAVOR_TNF_EXIT(tavor_ci_free_pd); 504 return (IBT_HCA_HDL_INVALID); 505 } 506 507 /* Check for valid PD handle pointer */ 508 if (pd == NULL) { 509 TNF_PROBE_0(tavor_ci_free_pd_invpdhdl_fail, 510 TAVOR_TNF_ERROR, ""); 511 TAVOR_TNF_EXIT(tavor_ci_free_pd); 512 return (IBT_PD_HDL_INVALID); 513 } 514 515 /* Grab the Tavor softstate pointer and PD handle */ 516 state = (tavor_state_t *)hca; 517 pdhdl = (tavor_pdhdl_t)pd; 518 519 /* Free the PD */ 520 status = tavor_pd_free(state, &pdhdl); 521 if (status != DDI_SUCCESS) { 522 TNF_PROBE_1(tavor_ci_free_pd_fail, TAVOR_TNF_ERROR, "", 523 tnf_uint, status, status); 524 TAVOR_TNF_EXIT(tavor_ci_free_pd); 525 return (status); 526 } 527 528 TAVOR_TNF_EXIT(tavor_ci_free_pd); 529 return (IBT_SUCCESS); 530 } 531 532 533 /* 534 * tavor_ci_alloc_rdd() 535 * Allocate a Reliable Datagram Domain 536 * Context: Can be called only from user or kernel context. 537 */ 538 /* ARGSUSED */ 539 static ibt_status_t 540 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags, 541 ibc_rdd_hdl_t *rdd_p) 542 { 543 TAVOR_TNF_ENTER(tavor_ci_alloc_rdd); 544 545 /* 546 * This is an unsupported interface for the Tavor driver. This 547 * interface is necessary to support Reliable Datagram (RD) 548 * operations. Tavor does not support RD. 549 */ 550 551 TAVOR_TNF_EXIT(tavor_ci_alloc_rdd); 552 return (IBT_NOT_SUPPORTED); 553 } 554 555 556 /* 557 * tavor_free_rdd() 558 * Free a Reliable Datagram Domain 559 * Context: Can be called only from user or kernel context. 560 */ 561 /* ARGSUSED */ 562 static ibt_status_t 563 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd) 564 { 565 TAVOR_TNF_ENTER(tavor_ci_free_rdd); 566 567 /* 568 * This is an unsupported interface for the Tavor driver. This 569 * interface is necessary to support Reliable Datagram (RD) 570 * operations. Tavor does not support RD. 571 */ 572 573 TAVOR_TNF_EXIT(tavor_ci_free_rdd); 574 return (IBT_NOT_SUPPORTED); 575 } 576 577 578 /* 579 * tavor_ci_alloc_ah() 580 * Allocate an Address Handle 581 * Context: Can be called only from user or kernel context. 582 */ 583 /* ARGSUSED */ 584 static ibt_status_t 585 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd, 586 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p) 587 { 588 tavor_state_t *state; 589 tavor_ahhdl_t ahhdl; 590 tavor_pdhdl_t pdhdl; 591 int status; 592 593 TAVOR_TNF_ENTER(tavor_ci_alloc_ah); 594 595 /* Check for valid HCA handle */ 596 if (hca == NULL) { 597 TNF_PROBE_0(tavor_ci_alloc_ah_invhca_fail, 598 TAVOR_TNF_ERROR, ""); 599 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 600 return (IBT_HCA_HDL_INVALID); 601 } 602 603 /* Check for valid PD handle pointer */ 604 if (pd == NULL) { 605 TNF_PROBE_0(tavor_ci_alloc_ah_invpdhdl_fail, 606 TAVOR_TNF_ERROR, ""); 607 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 608 return (IBT_PD_HDL_INVALID); 609 } 610 611 /* Grab the Tavor softstate pointer and PD handle */ 612 state = (tavor_state_t *)hca; 613 pdhdl = (tavor_pdhdl_t)pd; 614 615 /* Allocate the AH */ 616 status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP); 617 if (status != DDI_SUCCESS) { 618 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "", 619 tnf_uint, status, status); 620 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 621 return (status); 622 } 623 624 /* Return the Tavor AH handle */ 625 *ah_p = (ibc_ah_hdl_t)ahhdl; 626 627 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 628 return (IBT_SUCCESS); 629 } 630 631 632 /* 633 * tavor_ci_free_ah() 634 * Free an Address Handle 635 * Context: Can be called only from user or kernel context. 636 */ 637 static ibt_status_t 638 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah) 639 { 640 tavor_state_t *state; 641 tavor_ahhdl_t ahhdl; 642 int status; 643 644 TAVOR_TNF_ENTER(tavor_ci_free_ah); 645 646 /* Check for valid HCA handle */ 647 if (hca == NULL) { 648 TNF_PROBE_0(tavor_ci_free_ah_invhca_fail, 649 TAVOR_TNF_ERROR, ""); 650 TAVOR_TNF_EXIT(tavor_ci_free_ah); 651 return (IBT_HCA_HDL_INVALID); 652 } 653 654 /* Check for valid address handle pointer */ 655 if (ah == NULL) { 656 TNF_PROBE_0(tavor_ci_free_ah_invahhdl_fail, 657 TAVOR_TNF_ERROR, ""); 658 TAVOR_TNF_EXIT(tavor_ci_free_ah); 659 return (IBT_AH_HDL_INVALID); 660 } 661 662 /* Grab the Tavor softstate pointer and AH handle */ 663 state = (tavor_state_t *)hca; 664 ahhdl = (tavor_ahhdl_t)ah; 665 666 /* Free the AH */ 667 status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP); 668 if (status != DDI_SUCCESS) { 669 TNF_PROBE_1(tavor_ci_free_ah_fail, TAVOR_TNF_ERROR, "", 670 tnf_uint, status, status); 671 TAVOR_TNF_EXIT(tavor_ci_free_ah); 672 return (status); 673 } 674 675 TAVOR_TNF_EXIT(tavor_ci_free_ah); 676 return (IBT_SUCCESS); 677 } 678 679 680 /* 681 * tavor_ci_query_ah() 682 * Return the Address Vector information for a specified Address Handle 683 * Context: Can be called from interrupt or base context. 684 */ 685 static ibt_status_t 686 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p, 687 ibt_adds_vect_t *attr_p) 688 { 689 tavor_state_t *state; 690 tavor_ahhdl_t ahhdl; 691 tavor_pdhdl_t pdhdl; 692 int status; 693 694 TAVOR_TNF_ENTER(tavor_ci_query_ah); 695 696 /* Check for valid HCA handle */ 697 if (hca == NULL) { 698 TNF_PROBE_0(tavor_ci_query_ah_invhca_fail, 699 TAVOR_TNF_ERROR, ""); 700 TAVOR_TNF_EXIT(tavor_ci_query_ah); 701 return (IBT_HCA_HDL_INVALID); 702 } 703 704 /* Check for valid address handle pointer */ 705 if (ah == NULL) { 706 TNF_PROBE_0(tavor_ci_query_ah_invahhdl_fail, 707 TAVOR_TNF_ERROR, ""); 708 TAVOR_TNF_EXIT(tavor_ci_query_ah); 709 return (IBT_AH_HDL_INVALID); 710 } 711 712 /* Grab the Tavor softstate pointer and AH handle */ 713 state = (tavor_state_t *)hca; 714 ahhdl = (tavor_ahhdl_t)ah; 715 716 /* Query the AH */ 717 status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p); 718 if (status != DDI_SUCCESS) { 719 TNF_PROBE_1(tavor_ci_query_ah_fail, TAVOR_TNF_ERROR, "", 720 tnf_uint, status, status); 721 TAVOR_TNF_EXIT(tavor_ci_query_ah); 722 return (status); 723 } 724 725 /* Return the Tavor PD handle */ 726 *pd_p = (ibc_pd_hdl_t)pdhdl; 727 728 TAVOR_TNF_EXIT(tavor_ci_query_ah); 729 return (IBT_SUCCESS); 730 } 731 732 733 /* 734 * tavor_ci_modify_ah() 735 * Modify the Address Vector information of a specified Address Handle 736 * Context: Can be called from interrupt or base context. 737 */ 738 static ibt_status_t 739 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p) 740 { 741 tavor_state_t *state; 742 tavor_ahhdl_t ahhdl; 743 int status; 744 745 TAVOR_TNF_ENTER(tavor_ci_modify_ah); 746 747 /* Check for valid HCA handle */ 748 if (hca == NULL) { 749 TNF_PROBE_0(tavor_ci_modify_ah_invhca_fail, 750 TAVOR_TNF_ERROR, ""); 751 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 752 return (IBT_HCA_HDL_INVALID); 753 } 754 755 /* Check for valid address handle pointer */ 756 if (ah == NULL) { 757 TNF_PROBE_0(tavor_ci_modify_ah_invahhdl_fail, 758 TAVOR_TNF_ERROR, ""); 759 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 760 return (IBT_AH_HDL_INVALID); 761 } 762 763 /* Grab the Tavor softstate pointer and AH handle */ 764 state = (tavor_state_t *)hca; 765 ahhdl = (tavor_ahhdl_t)ah; 766 767 /* Modify the AH */ 768 status = tavor_ah_modify(state, ahhdl, attr_p); 769 if (status != DDI_SUCCESS) { 770 TNF_PROBE_1(tavor_ci_modify_ah_fail, TAVOR_TNF_ERROR, "", 771 tnf_uint, status, status); 772 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 773 return (status); 774 } 775 776 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 777 return (IBT_SUCCESS); 778 } 779 780 781 /* 782 * tavor_ci_alloc_qp() 783 * Allocate a Queue Pair 784 * Context: Can be called only from user or kernel context. 785 */ 786 static ibt_status_t 787 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl, 788 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p, 789 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p) 790 { 791 tavor_state_t *state; 792 tavor_qp_info_t qpinfo; 793 tavor_qp_options_t op; 794 int status; 795 796 TAVOR_TNF_ENTER(tavor_ci_alloc_qp); 797 798 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p)) 799 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p)) 800 801 /* Check for valid HCA handle */ 802 if (hca == NULL) { 803 TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail, 804 TAVOR_TNF_ERROR, ""); 805 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 806 return (IBT_HCA_HDL_INVALID); 807 } 808 809 /* Grab the Tavor softstate pointer */ 810 state = (tavor_state_t *)hca; 811 812 /* Allocate the QP */ 813 qpinfo.qpi_attrp = attr_p; 814 qpinfo.qpi_type = type; 815 qpinfo.qpi_ibt_qphdl = ibt_qphdl; 816 qpinfo.qpi_queueszp = queue_sizes_p; 817 qpinfo.qpi_qpn = qpn; 818 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr; 819 status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op); 820 if (status != DDI_SUCCESS) { 821 TNF_PROBE_1(tavor_ci_alloc_qp_fail, TAVOR_TNF_ERROR, "", 822 tnf_uint, status, status); 823 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 824 return (status); 825 } 826 827 /* Return the Tavor QP handle */ 828 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl; 829 830 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 831 return (IBT_SUCCESS); 832 } 833 834 835 /* 836 * tavor_ci_alloc_special_qp() 837 * Allocate a Special Queue Pair 838 * Context: Can be called only from user or kernel context. 839 */ 840 static ibt_status_t 841 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port, 842 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type, 843 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p, 844 ibc_qp_hdl_t *qp_p) 845 { 846 tavor_state_t *state; 847 tavor_qp_info_t qpinfo; 848 tavor_qp_options_t op; 849 int status; 850 851 TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp); 852 853 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p)) 854 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p)) 855 856 /* Check for valid HCA handle */ 857 if (hca == NULL) { 858 TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail, 859 TAVOR_TNF_ERROR, ""); 860 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 861 return (IBT_HCA_HDL_INVALID); 862 } 863 864 /* Grab the Tavor softstate pointer */ 865 state = (tavor_state_t *)hca; 866 867 /* Allocate the Special QP */ 868 qpinfo.qpi_attrp = attr_p; 869 qpinfo.qpi_type = type; 870 qpinfo.qpi_port = port; 871 qpinfo.qpi_ibt_qphdl = ibt_qphdl; 872 qpinfo.qpi_queueszp = queue_sizes_p; 873 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr; 874 status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op); 875 if (status != DDI_SUCCESS) { 876 TNF_PROBE_1(tavor_ci_alloc_special_qp_fail, TAVOR_TNF_ERROR, 877 "", tnf_uint, status, status); 878 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 879 return (status); 880 } 881 882 /* Return the Tavor QP handle */ 883 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl; 884 885 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 886 return (IBT_SUCCESS); 887 } 888 889 890 /* ARGSUSED */ 891 static ibt_status_t 892 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2, 893 ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type, 894 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p, 895 ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p, 896 ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p) 897 { 898 return (IBT_NOT_SUPPORTED); 899 } 900 901 /* 902 * tavor_ci_free_qp() 903 * Free a Queue Pair 904 * Context: Can be called only from user or kernel context. 905 */ 906 static ibt_status_t 907 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 908 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p) 909 { 910 tavor_state_t *state; 911 tavor_qphdl_t qphdl; 912 int status; 913 914 TAVOR_TNF_ENTER(tavor_ci_free_qp); 915 916 /* Check for valid HCA handle */ 917 if (hca == NULL) { 918 TNF_PROBE_0(tavor_ci_free_qp_invhca_fail, 919 TAVOR_TNF_ERROR, ""); 920 TAVOR_TNF_EXIT(tavor_ci_free_qp); 921 return (IBT_HCA_HDL_INVALID); 922 } 923 924 /* Check for valid QP handle pointer */ 925 if (qp == NULL) { 926 TNF_PROBE_0(tavor_ci_free_qp_invqphdl_fail, 927 TAVOR_TNF_ERROR, ""); 928 TAVOR_TNF_EXIT(tavor_ci_free_qp); 929 return (IBT_QP_HDL_INVALID); 930 } 931 932 /* Grab the Tavor softstate pointer and QP handle */ 933 state = (tavor_state_t *)hca; 934 qphdl = (tavor_qphdl_t)qp; 935 936 /* Free the QP */ 937 status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p, 938 TAVOR_NOSLEEP); 939 if (status != DDI_SUCCESS) { 940 TNF_PROBE_1(tavor_ci_free_qp_fail, TAVOR_TNF_ERROR, "", 941 tnf_uint, status, status); 942 TAVOR_TNF_EXIT(tavor_ci_free_qp); 943 return (status); 944 } 945 946 TAVOR_TNF_EXIT(tavor_ci_free_qp); 947 return (IBT_SUCCESS); 948 } 949 950 951 /* 952 * tavor_ci_release_qpn() 953 * Release a Queue Pair Number (QPN) 954 * Context: Can be called only from user or kernel context. 955 */ 956 static ibt_status_t 957 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh) 958 { 959 tavor_state_t *state; 960 tavor_qpn_entry_t *entry; 961 962 TAVOR_TNF_ENTER(tavor_ci_release_qpn); 963 964 /* Check for valid HCA handle */ 965 if (hca == NULL) { 966 TNF_PROBE_0(tavor_ci_release_qpn_invhca_fail, 967 TAVOR_TNF_ERROR, ""); 968 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 969 return (IBT_HCA_HDL_INVALID); 970 } 971 972 /* Check for valid QP handle pointer */ 973 if (qpnh == NULL) { 974 TNF_PROBE_0(tavor_ci_release_qpn_invqpnhdl_fail, 975 TAVOR_TNF_ERROR, ""); 976 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 977 return (IBT_QP_HDL_INVALID); 978 } 979 980 /* Grab the Tavor softstate pointer and QP handle */ 981 state = (tavor_state_t *)hca; 982 entry = (tavor_qpn_entry_t *)qpnh; 983 984 /* Release the QP number */ 985 tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE); 986 987 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 988 return (IBT_SUCCESS); 989 } 990 991 992 /* 993 * tavor_ci_query_qp() 994 * Query a Queue Pair 995 * Context: Can be called from interrupt or base context. 996 */ 997 static ibt_status_t 998 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 999 ibt_qp_query_attr_t *attr_p) 1000 { 1001 tavor_state_t *state; 1002 tavor_qphdl_t qphdl; 1003 int status; 1004 1005 TAVOR_TNF_ENTER(tavor_ci_query_qp); 1006 1007 /* Check for valid HCA handle */ 1008 if (hca == NULL) { 1009 TNF_PROBE_0(tavor_ci_query_qp_invhca_fail, 1010 TAVOR_TNF_ERROR, ""); 1011 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1012 return (IBT_HCA_HDL_INVALID); 1013 } 1014 1015 /* Check for valid QP handle */ 1016 if (qp == NULL) { 1017 TNF_PROBE_0(tavor_ci_query_qp_invqphdl_fail, 1018 TAVOR_TNF_ERROR, ""); 1019 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1020 return (IBT_QP_HDL_INVALID); 1021 } 1022 1023 /* Grab the Tavor softstate pointer and QP handle */ 1024 state = (tavor_state_t *)hca; 1025 qphdl = (tavor_qphdl_t)qp; 1026 1027 /* Query the QP */ 1028 status = tavor_qp_query(state, qphdl, attr_p); 1029 if (status != DDI_SUCCESS) { 1030 TNF_PROBE_1(tavor_ci_query_qp_fail, TAVOR_TNF_ERROR, "", 1031 tnf_uint, status, status); 1032 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1033 return (status); 1034 } 1035 1036 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1037 return (IBT_SUCCESS); 1038 } 1039 1040 1041 /* 1042 * tavor_ci_modify_qp() 1043 * Modify a Queue Pair 1044 * Context: Can be called from interrupt or base context. 1045 */ 1046 static ibt_status_t 1047 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 1048 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p, 1049 ibt_queue_sizes_t *actual_sz) 1050 { 1051 tavor_state_t *state; 1052 tavor_qphdl_t qphdl; 1053 int status; 1054 1055 TAVOR_TNF_ENTER(tavor_ci_modify_qp); 1056 1057 /* Check for valid HCA handle */ 1058 if (hca == NULL) { 1059 TNF_PROBE_0(tavor_ci_modify_qp_invhca_fail, 1060 TAVOR_TNF_ERROR, ""); 1061 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1062 return (IBT_HCA_HDL_INVALID); 1063 } 1064 1065 /* Check for valid QP handle */ 1066 if (qp == NULL) { 1067 TNF_PROBE_0(tavor_ci_modify_qp_invqphdl_fail, 1068 TAVOR_TNF_ERROR, ""); 1069 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1070 return (IBT_QP_HDL_INVALID); 1071 } 1072 1073 /* Grab the Tavor softstate pointer and QP handle */ 1074 state = (tavor_state_t *)hca; 1075 qphdl = (tavor_qphdl_t)qp; 1076 1077 /* Modify the QP */ 1078 status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz); 1079 if (status != DDI_SUCCESS) { 1080 TNF_PROBE_1(tavor_ci_modify_qp_fail, TAVOR_TNF_ERROR, "", 1081 tnf_uint, status, status); 1082 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1083 return (status); 1084 } 1085 1086 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1087 return (IBT_SUCCESS); 1088 } 1089 1090 1091 /* 1092 * tavor_ci_alloc_cq() 1093 * Allocate a Completion Queue 1094 * Context: Can be called only from user or kernel context. 1095 */ 1096 /* ARGSUSED */ 1097 static ibt_status_t 1098 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl, 1099 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size) 1100 { 1101 tavor_state_t *state; 1102 tavor_cqhdl_t cqhdl; 1103 int status; 1104 1105 TAVOR_TNF_ENTER(tavor_ci_alloc_cq); 1106 1107 /* Check for valid HCA handle */ 1108 if (hca == NULL) { 1109 TNF_PROBE_0(tavor_ci_alloc_cq_invhca_fail, 1110 TAVOR_TNF_ERROR, ""); 1111 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1112 return (IBT_HCA_HDL_INVALID); 1113 } 1114 1115 /* Grab the Tavor softstate pointer */ 1116 state = (tavor_state_t *)hca; 1117 1118 /* Allocate the CQ */ 1119 status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size, 1120 &cqhdl, TAVOR_NOSLEEP); 1121 if (status != DDI_SUCCESS) { 1122 TNF_PROBE_1(tavor_ci_alloc_cq_fail, TAVOR_TNF_ERROR, "", 1123 tnf_uint, status, status); 1124 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1125 return (status); 1126 } 1127 1128 /* Return the Tavor CQ handle */ 1129 *cq_p = (ibc_cq_hdl_t)cqhdl; 1130 1131 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1132 return (IBT_SUCCESS); 1133 } 1134 1135 1136 /* 1137 * tavor_ci_free_cq() 1138 * Free a Completion Queue 1139 * Context: Can be called only from user or kernel context. 1140 */ 1141 static ibt_status_t 1142 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq) 1143 { 1144 tavor_state_t *state; 1145 tavor_cqhdl_t cqhdl; 1146 int status; 1147 1148 TAVOR_TNF_ENTER(tavor_ci_free_cq); 1149 1150 1151 /* Check for valid HCA handle */ 1152 if (hca == NULL) { 1153 TNF_PROBE_0(tavor_ci_free_cq_invhca_fail, 1154 TAVOR_TNF_ERROR, ""); 1155 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1156 return (IBT_HCA_HDL_INVALID); 1157 } 1158 1159 /* Check for valid CQ handle pointer */ 1160 if (cq == NULL) { 1161 TNF_PROBE_0(tavor_ci_free_cq_invcqhdl_fail, 1162 TAVOR_TNF_ERROR, ""); 1163 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1164 return (IBT_CQ_HDL_INVALID); 1165 } 1166 1167 /* Grab the Tavor softstate pointer and CQ handle */ 1168 state = (tavor_state_t *)hca; 1169 cqhdl = (tavor_cqhdl_t)cq; 1170 1171 /* Free the CQ */ 1172 status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP); 1173 if (status != DDI_SUCCESS) { 1174 TNF_PROBE_1(tavor_ci_free_cq_fail, TAVOR_TNF_ERROR, "", 1175 tnf_uint, status, status); 1176 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1177 return (status); 1178 } 1179 1180 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1181 return (IBT_SUCCESS); 1182 } 1183 1184 1185 /* 1186 * tavor_ci_query_cq() 1187 * Return the size of a Completion Queue 1188 * Context: Can be called only from user or kernel context. 1189 */ 1190 static ibt_status_t 1191 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p, 1192 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p) 1193 { 1194 tavor_cqhdl_t cqhdl; 1195 1196 TAVOR_TNF_ENTER(tavor_ci_query_cq); 1197 1198 /* Check for valid HCA handle */ 1199 if (hca == NULL) { 1200 TNF_PROBE_0(tavor_ci_query_cq_invhca_fail, 1201 TAVOR_TNF_ERROR, ""); 1202 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1203 return (IBT_HCA_HDL_INVALID); 1204 } 1205 1206 /* Check for valid CQ handle pointer */ 1207 if (cq == NULL) { 1208 TNF_PROBE_0(tavor_ci_query_cq_invcqhdl, 1209 TAVOR_TNF_ERROR, ""); 1210 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1211 return (IBT_CQ_HDL_INVALID); 1212 } 1213 1214 /* Grab the CQ handle */ 1215 cqhdl = (tavor_cqhdl_t)cq; 1216 1217 /* Query the current CQ size */ 1218 *entries_p = cqhdl->cq_bufsz; 1219 1220 /* interrupt moderation is not supported */ 1221 *count_p = 0; 1222 *usec_p = 0; 1223 *hid_p = 0; 1224 1225 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1226 return (IBT_SUCCESS); 1227 } 1228 1229 1230 /* 1231 * tavor_ci_resize_cq() 1232 * Change the size of a Completion Queue 1233 * Context: Can be called only from user or kernel context. 1234 */ 1235 static ibt_status_t 1236 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size, 1237 uint_t *actual_size) 1238 { 1239 tavor_state_t *state; 1240 tavor_cqhdl_t cqhdl; 1241 int status; 1242 1243 TAVOR_TNF_ENTER(tavor_ci_resize_cq); 1244 1245 /* Check for valid HCA handle */ 1246 if (hca == NULL) { 1247 TNF_PROBE_0(tavor_ci_resize_cq_invhca_fail, 1248 TAVOR_TNF_ERROR, ""); 1249 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1250 return (IBT_HCA_HDL_INVALID); 1251 } 1252 1253 /* Check for valid CQ handle pointer */ 1254 if (cq == NULL) { 1255 TNF_PROBE_0(tavor_ci_resize_cq_invcqhdl_fail, 1256 TAVOR_TNF_ERROR, ""); 1257 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1258 return (IBT_CQ_HDL_INVALID); 1259 } 1260 1261 /* Grab the Tavor softstate pointer and CQ handle */ 1262 state = (tavor_state_t *)hca; 1263 cqhdl = (tavor_cqhdl_t)cq; 1264 1265 /* Resize the CQ */ 1266 status = tavor_cq_resize(state, cqhdl, size, actual_size, 1267 TAVOR_NOSLEEP); 1268 if (status != DDI_SUCCESS) { 1269 TNF_PROBE_1(tavor_ci_resize_cq_fail, TAVOR_TNF_ERROR, "", 1270 tnf_uint, status, status); 1271 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1272 return (status); 1273 } 1274 1275 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1276 return (IBT_SUCCESS); 1277 } 1278 1279 /* 1280 * CQ interrupt moderation is not supported in tavor. 1281 */ 1282 1283 /* ARGSUSED */ 1284 static ibt_status_t 1285 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, 1286 uint_t count, uint_t usec, ibt_cq_handler_id_t hid) 1287 { 1288 return (IBT_NOT_SUPPORTED); 1289 } 1290 1291 /* 1292 * tavor_ci_alloc_cq_sched() 1293 * Reserve a CQ scheduling class resource 1294 * Context: Can be called only from user or kernel context. 1295 */ 1296 /* ARGSUSED */ 1297 static ibt_status_t 1298 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_flags_t flags, 1299 ibc_cq_handler_attr_t *handler_attr_p) 1300 { 1301 TAVOR_TNF_ENTER(tavor_ci_alloc_cq_sched); 1302 1303 if (hca == NULL) { 1304 TNF_PROBE_0(tavor_ci_alloc_cq_sched_fail, 1305 TAVOR_TNF_ERROR, ""); 1306 TAVOR_TNF_EXIT(tavor_ci_alloc_cq_sched); 1307 return (IBT_HCA_HDL_INVALID); 1308 } 1309 1310 /* 1311 * This is an unsupported interface for the Tavor driver. Tavor 1312 * does not support CQ scheduling classes. 1313 */ 1314 1315 TAVOR_TNF_EXIT(tavor_ci_alloc_cq_sched); 1316 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p)) 1317 handler_attr_p->h_id = NULL; 1318 handler_attr_p->h_pri = 0; 1319 handler_attr_p->h_bind = NULL; 1320 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p)) 1321 return (IBT_SUCCESS); 1322 } 1323 1324 1325 /* 1326 * tavor_ci_free_cq_sched() 1327 * Free a CQ scheduling class resource 1328 * Context: Can be called only from user or kernel context. 1329 */ 1330 static ibt_status_t 1331 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibt_cq_handler_id_t handler_id) 1332 { 1333 TAVOR_TNF_ENTER(tavor_ci_free_cq_sched); 1334 1335 if (hca == NULL) { 1336 TNF_PROBE_0(tavor_ci_free_cq_sched_fail, 1337 TAVOR_TNF_ERROR, ""); 1338 TAVOR_TNF_EXIT(tavor_ci_free_cq_sched); 1339 return (IBT_HCA_HDL_INVALID); 1340 } 1341 1342 /* 1343 * This is an unsupported interface for the Tavor driver. Tavor 1344 * does not support CQ scheduling classes. Returning a NULL 1345 * hint is the way to treat this as unsupported. We check for 1346 * the expected NULL, but do not fail in any case. 1347 */ 1348 if (handler_id != NULL) { 1349 TNF_PROBE_1(tavor_ci_free_cq_sched, TAVOR_TNF_TRACE, "", 1350 tnf_opaque, handler_id, handler_id); 1351 } 1352 1353 TAVOR_TNF_EXIT(tavor_ci_free_cq_sched); 1354 return (IBT_SUCCESS); 1355 } 1356 1357 1358 /* 1359 * tavor_ci_alloc_eec() 1360 * Allocate an End-to-End context 1361 * Context: Can be called only from user or kernel context. 1362 */ 1363 /* ARGSUSED */ 1364 static ibt_status_t 1365 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags, 1366 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p) 1367 { 1368 TAVOR_TNF_ENTER(tavor_ci_alloc_eec); 1369 1370 /* 1371 * This is an unsupported interface for the Tavor driver. This 1372 * interface is necessary to support Reliable Datagram (RD) 1373 * operations. Tavor does not support RD. 1374 */ 1375 1376 TAVOR_TNF_EXIT(tavor_ci_alloc_eec); 1377 return (IBT_NOT_SUPPORTED); 1378 } 1379 1380 1381 /* 1382 * tavor_ci_free_eec() 1383 * Free an End-to-End context 1384 * Context: Can be called only from user or kernel context. 1385 */ 1386 /* ARGSUSED */ 1387 static ibt_status_t 1388 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec) 1389 { 1390 TAVOR_TNF_ENTER(tavor_ci_free_eec); 1391 1392 /* 1393 * This is an unsupported interface for the Tavor driver. This 1394 * interface is necessary to support Reliable Datagram (RD) 1395 * operations. Tavor does not support RD. 1396 */ 1397 1398 TAVOR_TNF_EXIT(tavor_ci_free_eec); 1399 return (IBT_NOT_SUPPORTED); 1400 } 1401 1402 1403 /* 1404 * tavor_ci_query_eec() 1405 * Query an End-to-End context 1406 * Context: Can be called from interrupt or base context. 1407 */ 1408 /* ARGSUSED */ 1409 static ibt_status_t 1410 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec, 1411 ibt_eec_query_attr_t *attr_p) 1412 { 1413 TAVOR_TNF_ENTER(tavor_ci_query_eec); 1414 1415 /* 1416 * This is an unsupported interface for the Tavor driver. This 1417 * interface is necessary to support Reliable Datagram (RD) 1418 * operations. Tavor does not support RD. 1419 */ 1420 1421 TAVOR_TNF_EXIT(tavor_ci_query_eec); 1422 return (IBT_NOT_SUPPORTED); 1423 } 1424 1425 1426 /* 1427 * tavor_ci_modify_eec() 1428 * Modify an End-to-End context 1429 * Context: Can be called from interrupt or base context. 1430 */ 1431 /* ARGSUSED */ 1432 static ibt_status_t 1433 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec, 1434 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p) 1435 { 1436 TAVOR_TNF_ENTER(tavor_ci_query_eec); 1437 1438 /* 1439 * This is an unsupported interface for the Tavor driver. This 1440 * interface is necessary to support Reliable Datagram (RD) 1441 * operations. Tavor does not support RD. 1442 */ 1443 1444 TAVOR_TNF_EXIT(tavor_ci_query_eec); 1445 return (IBT_NOT_SUPPORTED); 1446 } 1447 1448 1449 /* 1450 * tavor_ci_register_mr() 1451 * Prepare a virtually addressed Memory Region for use by an HCA 1452 * Context: Can be called from interrupt or base context. 1453 */ 1454 /* ARGSUSED */ 1455 static ibt_status_t 1456 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 1457 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p, 1458 ibt_mr_desc_t *mr_desc) 1459 { 1460 tavor_mr_options_t op; 1461 tavor_state_t *state; 1462 tavor_pdhdl_t pdhdl; 1463 tavor_mrhdl_t mrhdl; 1464 int status; 1465 1466 TAVOR_TNF_ENTER(tavor_ci_register_mr); 1467 1468 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc)) 1469 1470 ASSERT(mr_attr != NULL); 1471 ASSERT(mr_p != NULL); 1472 ASSERT(mr_desc != NULL); 1473 1474 /* Check for valid HCA handle */ 1475 if (hca == NULL) { 1476 TNF_PROBE_0(tavor_ci_register_mr_invhca_fail, 1477 TAVOR_TNF_ERROR, ""); 1478 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1479 return (IBT_HCA_HDL_INVALID); 1480 } 1481 1482 /* Check for valid PD handle pointer */ 1483 if (pd == NULL) { 1484 TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail, 1485 TAVOR_TNF_ERROR, ""); 1486 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1487 return (IBT_PD_HDL_INVALID); 1488 } 1489 1490 /* 1491 * Validate the access flags. Both Remote Write and Remote Atomic 1492 * require the Local Write flag to be set 1493 */ 1494 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1495 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1496 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1497 TNF_PROBE_0(tavor_ci_register_mr_inv_accflags_fail, 1498 TAVOR_TNF_ERROR, ""); 1499 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1500 return (IBT_MR_ACCESS_REQ_INVALID); 1501 } 1502 1503 /* Grab the Tavor softstate pointer and PD handle */ 1504 state = (tavor_state_t *)hca; 1505 pdhdl = (tavor_pdhdl_t)pd; 1506 1507 /* Register the memory region */ 1508 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1509 op.mro_bind_dmahdl = NULL; 1510 op.mro_bind_override_addr = 0; 1511 status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op); 1512 if (status != DDI_SUCCESS) { 1513 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "", 1514 tnf_uint, status, status); 1515 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1516 return (status); 1517 } 1518 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl)) 1519 1520 /* Fill in the mr_desc structure */ 1521 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr; 1522 mr_desc->md_lkey = mrhdl->mr_lkey; 1523 /* Only set RKey if remote access was requested */ 1524 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1525 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1526 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1527 mr_desc->md_rkey = mrhdl->mr_rkey; 1528 } 1529 1530 /* 1531 * If region is mapped for streaming (i.e. noncoherent), then set 1532 * sync is required 1533 */ 1534 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags & 1535 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1536 1537 /* Return the Tavor MR handle */ 1538 *mr_p = (ibc_mr_hdl_t)mrhdl; 1539 1540 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1541 return (IBT_SUCCESS); 1542 } 1543 1544 1545 /* 1546 * tavor_ci_register_buf() 1547 * Prepare a Memory Region specified by buf structure for use by an HCA 1548 * Context: Can be called from interrupt or base context. 1549 */ 1550 /* ARGSUSED */ 1551 static ibt_status_t 1552 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 1553 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved, 1554 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc) 1555 { 1556 tavor_mr_options_t op; 1557 tavor_state_t *state; 1558 tavor_pdhdl_t pdhdl; 1559 tavor_mrhdl_t mrhdl; 1560 int status; 1561 ibt_mr_flags_t flags = attrp->mr_flags; 1562 1563 TAVOR_TNF_ENTER(tavor_ci_register_buf); 1564 1565 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc)) 1566 1567 ASSERT(mr_p != NULL); 1568 ASSERT(mr_desc != NULL); 1569 1570 /* Check for valid HCA handle */ 1571 if (hca == NULL) { 1572 TNF_PROBE_0(tavor_ci_register_buf_invhca_fail, 1573 TAVOR_TNF_ERROR, ""); 1574 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1575 return (IBT_HCA_HDL_INVALID); 1576 } 1577 1578 /* Check for valid PD handle pointer */ 1579 if (pd == NULL) { 1580 TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail, 1581 TAVOR_TNF_ERROR, ""); 1582 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1583 return (IBT_PD_HDL_INVALID); 1584 } 1585 1586 /* 1587 * Validate the access flags. Both Remote Write and Remote Atomic 1588 * require the Local Write flag to be set 1589 */ 1590 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1591 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1592 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1593 TNF_PROBE_0(tavor_ci_register_buf_accflags_inv, 1594 TAVOR_TNF_ERROR, ""); 1595 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1596 return (IBT_MR_ACCESS_REQ_INVALID); 1597 } 1598 1599 /* Grab the Tavor softstate pointer and PD handle */ 1600 state = (tavor_state_t *)hca; 1601 pdhdl = (tavor_pdhdl_t)pd; 1602 1603 /* Register the memory region */ 1604 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1605 op.mro_bind_dmahdl = NULL; 1606 op.mro_bind_override_addr = 0; 1607 status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op); 1608 if (status != DDI_SUCCESS) { 1609 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "", 1610 tnf_uint, status, status); 1611 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1612 return (status); 1613 } 1614 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl)) 1615 1616 /* Fill in the mr_desc structure */ 1617 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr; 1618 mr_desc->md_lkey = mrhdl->mr_lkey; 1619 /* Only set RKey if remote access was requested */ 1620 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1621 (flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1622 (flags & IBT_MR_ENABLE_REMOTE_READ)) { 1623 mr_desc->md_rkey = mrhdl->mr_rkey; 1624 } 1625 1626 /* 1627 * If region is mapped for streaming (i.e. noncoherent), then set 1628 * sync is required 1629 */ 1630 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags & 1631 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1632 1633 /* Return the Tavor MR handle */ 1634 *mr_p = (ibc_mr_hdl_t)mrhdl; 1635 1636 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1637 return (IBT_SUCCESS); 1638 } 1639 1640 1641 /* 1642 * tavor_ci_deregister_mr() 1643 * Deregister a Memory Region from an HCA translation table 1644 * Context: Can be called only from user or kernel context. 1645 */ 1646 static ibt_status_t 1647 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr) 1648 { 1649 tavor_state_t *state; 1650 tavor_mrhdl_t mrhdl; 1651 int status; 1652 1653 TAVOR_TNF_ENTER(tavor_ci_deregister_mr); 1654 1655 /* Check for valid HCA handle */ 1656 if (hca == NULL) { 1657 TNF_PROBE_0(tavor_ci_deregister_mr_invhca_fail, 1658 TAVOR_TNF_ERROR, ""); 1659 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1660 return (IBT_HCA_HDL_INVALID); 1661 } 1662 1663 /* Check for valid memory region handle */ 1664 if (mr == NULL) { 1665 TNF_PROBE_0(tavor_ci_deregister_mr_invmrhdl_fail, 1666 TAVOR_TNF_ERROR, ""); 1667 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1668 return (IBT_MR_HDL_INVALID); 1669 } 1670 1671 /* Grab the Tavor softstate pointer */ 1672 state = (tavor_state_t *)hca; 1673 mrhdl = (tavor_mrhdl_t)mr; 1674 1675 /* 1676 * Deregister the memory region. 1677 */ 1678 status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL, 1679 TAVOR_NOSLEEP); 1680 if (status != DDI_SUCCESS) { 1681 TNF_PROBE_1(tavor_ci_deregister_mr_fail, 1682 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 1683 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1684 return (status); 1685 } 1686 1687 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1688 return (IBT_SUCCESS); 1689 } 1690 1691 1692 /* 1693 * tavor_ci_query_mr() 1694 * Retrieve information about a specified Memory Region 1695 * Context: Can be called from interrupt or base context. 1696 */ 1697 static ibt_status_t 1698 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 1699 ibt_mr_query_attr_t *mr_attr) 1700 { 1701 tavor_state_t *state; 1702 tavor_mrhdl_t mrhdl; 1703 int status; 1704 1705 TAVOR_TNF_ENTER(tavor_ci_query_mr); 1706 1707 ASSERT(mr_attr != NULL); 1708 1709 /* Check for valid HCA handle */ 1710 if (hca == NULL) { 1711 TNF_PROBE_0(tavor_ci_query_mr_invhca_fail, 1712 TAVOR_TNF_ERROR, ""); 1713 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1714 return (IBT_HCA_HDL_INVALID); 1715 } 1716 1717 /* Check for MemRegion handle */ 1718 if (mr == NULL) { 1719 TNF_PROBE_0(tavor_ci_query_mr_invmrhdl_fail, 1720 TAVOR_TNF_ERROR, ""); 1721 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1722 return (IBT_MR_HDL_INVALID); 1723 } 1724 1725 /* Grab the Tavor softstate pointer and MR handle */ 1726 state = (tavor_state_t *)hca; 1727 mrhdl = (tavor_mrhdl_t)mr; 1728 1729 /* Query the memory region */ 1730 status = tavor_mr_query(state, mrhdl, mr_attr); 1731 if (status != DDI_SUCCESS) { 1732 TNF_PROBE_1(tavor_ci_query_mr_fail, TAVOR_TNF_ERROR, "", 1733 tnf_uint, status, status); 1734 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1735 return (status); 1736 } 1737 1738 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1739 return (IBT_SUCCESS); 1740 } 1741 1742 1743 /* 1744 * tavor_ci_register_shared_mr() 1745 * Create a shared memory region matching an existing Memory Region 1746 * Context: Can be called from interrupt or base context. 1747 */ 1748 /* ARGSUSED */ 1749 static ibt_status_t 1750 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 1751 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved, 1752 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc) 1753 { 1754 tavor_state_t *state; 1755 tavor_pdhdl_t pdhdl; 1756 tavor_mrhdl_t mrhdl, mrhdl_new; 1757 int status; 1758 1759 TAVOR_TNF_ENTER(tavor_ci_register_shared_mr); 1760 1761 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc)) 1762 1763 ASSERT(mr_attr != NULL); 1764 ASSERT(mr_p != NULL); 1765 ASSERT(mr_desc != NULL); 1766 1767 /* Check for valid HCA handle */ 1768 if (hca == NULL) { 1769 TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail, 1770 TAVOR_TNF_ERROR, ""); 1771 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1772 return (IBT_HCA_HDL_INVALID); 1773 } 1774 1775 /* Check for valid PD handle pointer */ 1776 if (pd == NULL) { 1777 TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail, 1778 TAVOR_TNF_ERROR, ""); 1779 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1780 return (IBT_PD_HDL_INVALID); 1781 } 1782 1783 /* Check for valid memory region handle */ 1784 if (mr == NULL) { 1785 TNF_PROBE_0(tavor_ci_register_shared_mr_invmrhdl_fail, 1786 TAVOR_TNF_ERROR, ""); 1787 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1788 return (IBT_MR_HDL_INVALID); 1789 } 1790 /* 1791 * Validate the access flags. Both Remote Write and Remote Atomic 1792 * require the Local Write flag to be set 1793 */ 1794 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1795 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1796 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1797 TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv, 1798 TAVOR_TNF_ERROR, ""); 1799 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1800 return (IBT_MR_ACCESS_REQ_INVALID); 1801 } 1802 1803 /* Grab the Tavor softstate pointer and handles */ 1804 state = (tavor_state_t *)hca; 1805 pdhdl = (tavor_pdhdl_t)pd; 1806 mrhdl = (tavor_mrhdl_t)mr; 1807 1808 /* Register the shared memory region */ 1809 status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr, 1810 &mrhdl_new); 1811 if (status != DDI_SUCCESS) { 1812 TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR, 1813 "", tnf_uint, status, status); 1814 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1815 return (status); 1816 } 1817 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new)) 1818 1819 /* Fill in the mr_desc structure */ 1820 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1821 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1822 /* Only set RKey if remote access was requested */ 1823 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1824 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1825 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1826 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1827 } 1828 1829 /* 1830 * If shared region is mapped for streaming (i.e. noncoherent), then 1831 * set sync is required 1832 */ 1833 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 1834 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1835 1836 /* Return the Tavor MR handle */ 1837 *mr_p = (ibc_mr_hdl_t)mrhdl_new; 1838 1839 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1840 return (IBT_SUCCESS); 1841 } 1842 1843 1844 /* 1845 * tavor_ci_reregister_mr() 1846 * Modify the attributes of an existing Memory Region 1847 * Context: Can be called from interrupt or base context. 1848 */ 1849 /* ARGSUSED */ 1850 static ibt_status_t 1851 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd, 1852 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new, 1853 ibt_mr_desc_t *mr_desc) 1854 { 1855 tavor_mr_options_t op; 1856 tavor_state_t *state; 1857 tavor_pdhdl_t pdhdl; 1858 tavor_mrhdl_t mrhdl, mrhdl_new; 1859 int status; 1860 1861 TAVOR_TNF_ENTER(tavor_ci_reregister_mr); 1862 1863 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc)) 1864 1865 ASSERT(mr_attr != NULL); 1866 ASSERT(mr_new != NULL); 1867 ASSERT(mr_desc != NULL); 1868 1869 /* Check for valid HCA handle */ 1870 if (hca == NULL) { 1871 TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR, 1872 ""); 1873 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1874 return (IBT_HCA_HDL_INVALID); 1875 } 1876 1877 /* Check for valid memory region handle */ 1878 if (mr == NULL) { 1879 TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail, 1880 TAVOR_TNF_ERROR, ""); 1881 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1882 return (IBT_MR_HDL_INVALID); 1883 } 1884 1885 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */ 1886 state = (tavor_state_t *)hca; 1887 mrhdl = (tavor_mrhdl_t)mr; 1888 pdhdl = (tavor_pdhdl_t)pd; 1889 1890 /* Reregister the memory region */ 1891 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1892 status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr, 1893 &mrhdl_new, &op); 1894 if (status != DDI_SUCCESS) { 1895 TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "", 1896 tnf_uint, status, status); 1897 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1898 return (status); 1899 } 1900 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new)) 1901 1902 /* Fill in the mr_desc structure */ 1903 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1904 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1905 /* Only set RKey if remote access was requested */ 1906 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1907 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1908 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1909 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1910 } 1911 1912 /* 1913 * If region is mapped for streaming (i.e. noncoherent), then set 1914 * sync is required 1915 */ 1916 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 1917 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1918 1919 /* Return the Tavor MR handle */ 1920 *mr_new = (ibc_mr_hdl_t)mrhdl_new; 1921 1922 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1923 return (IBT_SUCCESS); 1924 } 1925 1926 1927 /* 1928 * tavor_ci_reregister_buf() 1929 * Modify the attributes of an existing Memory Region 1930 * Context: Can be called from interrupt or base context. 1931 */ 1932 /* ARGSUSED */ 1933 static ibt_status_t 1934 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd, 1935 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved, 1936 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc) 1937 { 1938 tavor_mr_options_t op; 1939 tavor_state_t *state; 1940 tavor_pdhdl_t pdhdl; 1941 tavor_mrhdl_t mrhdl, mrhdl_new; 1942 int status; 1943 ibt_mr_flags_t flags = attrp->mr_flags; 1944 1945 TAVOR_TNF_ENTER(tavor_ci_reregister_buf); 1946 1947 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc)) 1948 1949 ASSERT(mr_new != NULL); 1950 ASSERT(mr_desc != NULL); 1951 1952 /* Check for valid HCA handle */ 1953 if (hca == NULL) { 1954 TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR, 1955 ""); 1956 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1957 return (IBT_HCA_HDL_INVALID); 1958 } 1959 1960 /* Check for valid memory region handle */ 1961 if (mr == NULL) { 1962 TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail, 1963 TAVOR_TNF_ERROR, ""); 1964 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1965 return (IBT_MR_HDL_INVALID); 1966 } 1967 1968 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */ 1969 state = (tavor_state_t *)hca; 1970 mrhdl = (tavor_mrhdl_t)mr; 1971 pdhdl = (tavor_pdhdl_t)pd; 1972 1973 /* Reregister the memory region */ 1974 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1975 status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf, 1976 &mrhdl_new, &op); 1977 if (status != DDI_SUCCESS) { 1978 TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "", 1979 tnf_uint, status, status); 1980 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1981 return (status); 1982 } 1983 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new)) 1984 1985 /* Fill in the mr_desc structure */ 1986 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1987 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1988 /* Only set RKey if remote access was requested */ 1989 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1990 (flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1991 (flags & IBT_MR_ENABLE_REMOTE_READ)) { 1992 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1993 } 1994 1995 /* 1996 * If region is mapped for streaming (i.e. noncoherent), then set 1997 * sync is required 1998 */ 1999 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 2000 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 2001 2002 /* Return the Tavor MR handle */ 2003 *mr_new = (ibc_mr_hdl_t)mrhdl_new; 2004 2005 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 2006 return (IBT_SUCCESS); 2007 } 2008 2009 /* 2010 * tavor_ci_sync_mr() 2011 * Synchronize access to a Memory Region 2012 * Context: Can be called from interrupt or base context. 2013 */ 2014 static ibt_status_t 2015 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs) 2016 { 2017 tavor_state_t *state; 2018 int status; 2019 2020 TAVOR_TNF_ENTER(tavor_ci_sync_mr); 2021 2022 ASSERT(mr_segs != NULL); 2023 2024 /* Check for valid HCA handle */ 2025 if (hca == NULL) { 2026 TNF_PROBE_0(tavor_ci_sync_mr_invhca_fail, 2027 TAVOR_TNF_ERROR, ""); 2028 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2029 return (IBT_HCA_HDL_INVALID); 2030 } 2031 2032 /* Grab the Tavor softstate pointer */ 2033 state = (tavor_state_t *)hca; 2034 2035 /* Sync the memory region */ 2036 status = tavor_mr_sync(state, mr_segs, num_segs); 2037 if (status != DDI_SUCCESS) { 2038 TNF_PROBE_1(tavor_ci_sync_mr_fail, TAVOR_TNF_ERROR, "", 2039 tnf_uint, status, status); 2040 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2041 return (status); 2042 } 2043 2044 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2045 return (IBT_SUCCESS); 2046 } 2047 2048 2049 /* 2050 * tavor_ci_alloc_mw() 2051 * Allocate a Memory Window 2052 * Context: Can be called from interrupt or base context. 2053 */ 2054 static ibt_status_t 2055 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags, 2056 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p) 2057 { 2058 tavor_state_t *state; 2059 tavor_pdhdl_t pdhdl; 2060 tavor_mwhdl_t mwhdl; 2061 int status; 2062 2063 TAVOR_TNF_ENTER(tavor_ci_alloc_mw); 2064 2065 ASSERT(mw_p != NULL); 2066 ASSERT(rkey_p != NULL); 2067 2068 /* Check for valid HCA handle */ 2069 if (hca == NULL) { 2070 TNF_PROBE_0(tavor_ci_alloc_mw_invhca_fail, 2071 TAVOR_TNF_ERROR, ""); 2072 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2073 return (IBT_HCA_HDL_INVALID); 2074 } 2075 2076 /* Check for valid PD handle pointer */ 2077 if (pd == NULL) { 2078 TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail, 2079 TAVOR_TNF_ERROR, ""); 2080 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2081 return (IBT_PD_HDL_INVALID); 2082 } 2083 2084 /* Grab the Tavor softstate pointer and PD handle */ 2085 state = (tavor_state_t *)hca; 2086 pdhdl = (tavor_pdhdl_t)pd; 2087 2088 /* Allocate the memory window */ 2089 status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl); 2090 if (status != DDI_SUCCESS) { 2091 TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "", 2092 tnf_uint, status, status); 2093 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2094 return (status); 2095 } 2096 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl)) 2097 2098 /* Return the MW handle and RKey */ 2099 *mw_p = (ibc_mw_hdl_t)mwhdl; 2100 *rkey_p = mwhdl->mr_rkey; 2101 2102 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2103 return (IBT_SUCCESS); 2104 } 2105 2106 2107 /* 2108 * tavor_ci_free_mw() 2109 * Free a Memory Window 2110 * Context: Can be called from interrupt or base context. 2111 */ 2112 static ibt_status_t 2113 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw) 2114 { 2115 tavor_state_t *state; 2116 tavor_mwhdl_t mwhdl; 2117 int status; 2118 2119 TAVOR_TNF_ENTER(tavor_ci_free_mw); 2120 2121 /* Check for valid HCA handle */ 2122 if (hca == NULL) { 2123 TNF_PROBE_0(tavor_ci_free_mw_invhca_fail, 2124 TAVOR_TNF_ERROR, ""); 2125 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2126 return (IBT_HCA_HDL_INVALID); 2127 } 2128 2129 /* Check for valid MW handle */ 2130 if (mw == NULL) { 2131 TNF_PROBE_0(tavor_ci_free_mw_invmwhdl_fail, 2132 TAVOR_TNF_ERROR, ""); 2133 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2134 return (IBT_MW_HDL_INVALID); 2135 } 2136 2137 /* Grab the Tavor softstate pointer and MW handle */ 2138 state = (tavor_state_t *)hca; 2139 mwhdl = (tavor_mwhdl_t)mw; 2140 2141 /* Free the memory window */ 2142 status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP); 2143 if (status != DDI_SUCCESS) { 2144 TNF_PROBE_1(tavor_ci_free_mw_fail, TAVOR_TNF_ERROR, "", 2145 tnf_uint, status, status); 2146 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2147 return (status); 2148 } 2149 2150 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2151 return (IBT_SUCCESS); 2152 } 2153 2154 2155 /* 2156 * tavor_ci_query_mw() 2157 * Return the attributes of the specified Memory Window 2158 * Context: Can be called from interrupt or base context. 2159 */ 2160 static ibt_status_t 2161 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw, 2162 ibt_mw_query_attr_t *mw_attr_p) 2163 { 2164 tavor_mwhdl_t mwhdl; 2165 2166 TAVOR_TNF_ENTER(tavor_ci_query_mw); 2167 2168 ASSERT(mw_attr_p != NULL); 2169 2170 /* Check for valid HCA handle */ 2171 if (hca == NULL) { 2172 TNF_PROBE_0(tavor_ci_query_mw_invhca_fail, 2173 TAVOR_TNF_ERROR, ""); 2174 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2175 return (IBT_HCA_HDL_INVALID); 2176 } 2177 2178 /* Check for valid MemWin handle */ 2179 if (mw == NULL) { 2180 TNF_PROBE_0(tavor_ci_query_mw_inc_mwhdl_fail, 2181 TAVOR_TNF_ERROR, ""); 2182 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2183 return (IBT_MW_HDL_INVALID); 2184 } 2185 2186 /* Query the memory window pointer and fill in the return values */ 2187 mwhdl = (tavor_mwhdl_t)mw; 2188 mutex_enter(&mwhdl->mr_lock); 2189 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl; 2190 mw_attr_p->mw_rkey = mwhdl->mr_rkey; 2191 mutex_exit(&mwhdl->mr_lock); 2192 2193 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2194 return (IBT_SUCCESS); 2195 } 2196 2197 2198 /* 2199 * tavor_ci_attach_mcg() 2200 * Attach a Queue Pair to a Multicast Group 2201 * Context: Can be called only from user or kernel context. 2202 */ 2203 static ibt_status_t 2204 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid, 2205 ib_lid_t lid) 2206 { 2207 tavor_state_t *state; 2208 tavor_qphdl_t qphdl; 2209 int status; 2210 2211 TAVOR_TNF_ENTER(tavor_ci_attach_mcg); 2212 2213 /* Check for valid HCA handle */ 2214 if (hca == NULL) { 2215 TNF_PROBE_0(tavor_ci_attach_mcg_invhca_fail, 2216 TAVOR_TNF_ERROR, ""); 2217 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2218 return (IBT_HCA_HDL_INVALID); 2219 } 2220 2221 /* Check for valid QP handle pointer */ 2222 if (qp == NULL) { 2223 TNF_PROBE_0(tavor_ci_attach_mcg_invqphdl_fail, 2224 TAVOR_TNF_ERROR, ""); 2225 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2226 return (IBT_QP_HDL_INVALID); 2227 } 2228 2229 /* Grab the Tavor softstate pointer and QP handles */ 2230 state = (tavor_state_t *)hca; 2231 qphdl = (tavor_qphdl_t)qp; 2232 2233 /* Attach the QP to the multicast group */ 2234 status = tavor_mcg_attach(state, qphdl, gid, lid); 2235 if (status != DDI_SUCCESS) { 2236 TNF_PROBE_1(tavor_ci_attach_mcg_fail, TAVOR_TNF_ERROR, "", 2237 tnf_uint, status, status); 2238 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2239 return (status); 2240 } 2241 2242 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2243 return (IBT_SUCCESS); 2244 } 2245 2246 2247 /* 2248 * tavor_ci_detach_mcg() 2249 * Detach a Queue Pair to a Multicast Group 2250 * Context: Can be called only from user or kernel context. 2251 */ 2252 static ibt_status_t 2253 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid, 2254 ib_lid_t lid) 2255 { 2256 tavor_state_t *state; 2257 tavor_qphdl_t qphdl; 2258 int status; 2259 2260 TAVOR_TNF_ENTER(tavor_ci_attach_mcg); 2261 2262 /* Check for valid HCA handle */ 2263 if (hca == NULL) { 2264 TNF_PROBE_0(tavor_ci_detach_mcg_invhca_fail, 2265 TAVOR_TNF_ERROR, ""); 2266 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2267 return (IBT_HCA_HDL_INVALID); 2268 } 2269 2270 /* Check for valid QP handle pointer */ 2271 if (qp == NULL) { 2272 TNF_PROBE_0(tavor_ci_detach_mcg_invqphdl_fail, 2273 TAVOR_TNF_ERROR, ""); 2274 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2275 return (IBT_QP_HDL_INVALID); 2276 } 2277 2278 /* Grab the Tavor softstate pointer and QP handle */ 2279 state = (tavor_state_t *)hca; 2280 qphdl = (tavor_qphdl_t)qp; 2281 2282 /* Detach the QP from the multicast group */ 2283 status = tavor_mcg_detach(state, qphdl, gid, lid); 2284 if (status != DDI_SUCCESS) { 2285 TNF_PROBE_1(tavor_ci_detach_mcg_fail, TAVOR_TNF_ERROR, "", 2286 tnf_uint, status, status); 2287 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2288 return (status); 2289 } 2290 2291 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2292 return (IBT_SUCCESS); 2293 } 2294 2295 2296 /* 2297 * tavor_ci_post_send() 2298 * Post send work requests to the send queue on the specified QP 2299 * Context: Can be called from interrupt or base context. 2300 */ 2301 static ibt_status_t 2302 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p, 2303 uint_t num_wr, uint_t *num_posted_p) 2304 { 2305 tavor_state_t *state; 2306 tavor_qphdl_t qphdl; 2307 int status; 2308 2309 TAVOR_TNF_ENTER(tavor_ci_post_send); 2310 2311 ASSERT(wr_p != NULL); 2312 ASSERT(num_wr != 0); 2313 2314 /* Check for valid HCA handle */ 2315 if (hca == NULL) { 2316 TNF_PROBE_0(tavor_ci_post_send_invhca_fail, 2317 TAVOR_TNF_ERROR, ""); 2318 TAVOR_TNF_EXIT(tavor_ci_post_send); 2319 return (IBT_HCA_HDL_INVALID); 2320 } 2321 2322 /* Check for valid QP handle pointer */ 2323 if (qp == NULL) { 2324 TNF_PROBE_0(tavor_ci_post_send_invqphdl_fail, 2325 TAVOR_TNF_ERROR, ""); 2326 TAVOR_TNF_EXIT(tavor_ci_post_send); 2327 return (IBT_QP_HDL_INVALID); 2328 } 2329 2330 /* Grab the Tavor softstate pointer and QP handle */ 2331 state = (tavor_state_t *)hca; 2332 qphdl = (tavor_qphdl_t)qp; 2333 2334 /* Post the send WQEs */ 2335 status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p); 2336 if (status != DDI_SUCCESS) { 2337 TNF_PROBE_1(tavor_ci_post_send_fail, TAVOR_TNF_ERROR, "", 2338 tnf_uint, status, status); 2339 TAVOR_TNF_EXIT(tavor_ci_post_send); 2340 return (status); 2341 } 2342 2343 TAVOR_TNF_EXIT(tavor_ci_post_send); 2344 return (IBT_SUCCESS); 2345 } 2346 2347 2348 /* 2349 * tavor_ci_post_recv() 2350 * Post receive work requests to the receive queue on the specified QP 2351 * Context: Can be called from interrupt or base context. 2352 */ 2353 static ibt_status_t 2354 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p, 2355 uint_t num_wr, uint_t *num_posted_p) 2356 { 2357 tavor_state_t *state; 2358 tavor_qphdl_t qphdl; 2359 int status; 2360 2361 TAVOR_TNF_ENTER(tavor_ci_post_recv); 2362 2363 ASSERT(wr_p != NULL); 2364 ASSERT(num_wr != 0); 2365 2366 /* Check for valid HCA handle */ 2367 if (hca == NULL) { 2368 TNF_PROBE_0(tavor_ci_post_recv_invhca_fail, 2369 TAVOR_TNF_ERROR, ""); 2370 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2371 return (IBT_HCA_HDL_INVALID); 2372 } 2373 2374 /* Check for valid QP handle pointer */ 2375 if (qp == NULL) { 2376 TNF_PROBE_0(tavor_ci_post_recv_invqphdl_fail, 2377 TAVOR_TNF_ERROR, ""); 2378 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2379 return (IBT_QP_HDL_INVALID); 2380 } 2381 2382 /* Grab the Tavor softstate pointer and QP handle */ 2383 state = (tavor_state_t *)hca; 2384 qphdl = (tavor_qphdl_t)qp; 2385 2386 /* Post the receive WQEs */ 2387 status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p); 2388 if (status != DDI_SUCCESS) { 2389 TNF_PROBE_1(tavor_ci_post_recv_fail, TAVOR_TNF_ERROR, "", 2390 tnf_uint, status, status); 2391 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2392 return (status); 2393 } 2394 2395 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2396 return (IBT_SUCCESS); 2397 } 2398 2399 2400 /* 2401 * tavor_ci_poll_cq() 2402 * Poll for a work request completion 2403 * Context: Can be called from interrupt or base context. 2404 */ 2405 static ibt_status_t 2406 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p, 2407 uint_t num_wc, uint_t *num_polled) 2408 { 2409 tavor_state_t *state; 2410 tavor_cqhdl_t cqhdl; 2411 uint_t polled; 2412 int status; 2413 2414 TAVOR_TNF_ENTER(tavor_ci_poll_cq); 2415 2416 ASSERT(wc_p != NULL); 2417 2418 /* Check for valid HCA handle */ 2419 if (hca == NULL) { 2420 TNF_PROBE_0(tavor_ci_poll_cq_invhca_fail, 2421 TAVOR_TNF_ERROR, ""); 2422 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2423 return (IBT_HCA_HDL_INVALID); 2424 } 2425 2426 /* Check for valid CQ handle pointer */ 2427 if (cq == NULL) { 2428 TNF_PROBE_0(tavor_ci_poll_cq_invcqhdl_fail, 2429 TAVOR_TNF_ERROR, ""); 2430 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2431 return (IBT_CQ_HDL_INVALID); 2432 } 2433 2434 /* Check for valid num_wc field */ 2435 if (num_wc == 0) { 2436 TNF_PROBE_0(tavor_ci_poll_cq_num_wc_fail, 2437 TAVOR_TNF_ERROR, ""); 2438 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2439 return (IBT_INVALID_PARAM); 2440 } 2441 2442 /* Grab the Tavor softstate pointer and CQ handle */ 2443 state = (tavor_state_t *)hca; 2444 cqhdl = (tavor_cqhdl_t)cq; 2445 2446 /* Poll for work request completions */ 2447 status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled); 2448 2449 /* First fill in "num_polled" argument (only when valid) */ 2450 if (num_polled) { 2451 *num_polled = polled; 2452 } 2453 2454 /* 2455 * Check the status code; 2456 * If empty, we return empty. 2457 * If error, we print out an error and then return 2458 * If success (something was polled), we return success 2459 */ 2460 if (status != DDI_SUCCESS) { 2461 if (status != IBT_CQ_EMPTY) { 2462 TNF_PROBE_1(tavor_ci_poll_cq_fail, TAVOR_TNF_ERROR, "", 2463 tnf_uint, status, status); 2464 } 2465 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2466 return (status); 2467 } 2468 2469 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2470 return (IBT_SUCCESS); 2471 } 2472 2473 2474 /* 2475 * tavor_ci_notify_cq() 2476 * Enable notification events on the specified CQ 2477 * Context: Can be called from interrupt or base context. 2478 */ 2479 static ibt_status_t 2480 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl, 2481 ibt_cq_notify_flags_t flags) 2482 { 2483 tavor_state_t *state; 2484 tavor_cqhdl_t cqhdl; 2485 int status; 2486 2487 TAVOR_TNF_ENTER(tavor_ci_notify_cq); 2488 2489 /* Check for valid HCA handle */ 2490 if (hca == NULL) { 2491 TNF_PROBE_0(tavor_ci_notify_cq_invhca_fail, 2492 TAVOR_TNF_ERROR, ""); 2493 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2494 return (IBT_HCA_HDL_INVALID); 2495 } 2496 2497 /* Check for valid CQ handle pointer */ 2498 if (cq_hdl == NULL) { 2499 TNF_PROBE_0(tavor_ci_notify_cq_invcqhdl_fail, 2500 TAVOR_TNF_ERROR, ""); 2501 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2502 return (IBT_CQ_HDL_INVALID); 2503 } 2504 2505 /* Grab the Tavor softstate pointer and CQ handle */ 2506 state = (tavor_state_t *)hca; 2507 cqhdl = (tavor_cqhdl_t)cq_hdl; 2508 2509 /* Enable the CQ notification */ 2510 status = tavor_cq_notify(state, cqhdl, flags); 2511 if (status != DDI_SUCCESS) { 2512 TNF_PROBE_1(tavor_ci_notify_cq_fail, TAVOR_TNF_ERROR, "", 2513 tnf_uint, status, status); 2514 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2515 return (status); 2516 } 2517 2518 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2519 return (IBT_SUCCESS); 2520 } 2521 2522 /* 2523 * tavor_ci_ci_data_in() 2524 * Exchange CI-specific data. 2525 * Context: Can be called only from user or kernel context. 2526 */ 2527 static ibt_status_t 2528 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags, 2529 ibt_object_type_t object, void *ibc_object_handle, void *data_p, 2530 size_t data_sz) 2531 { 2532 tavor_state_t *state; 2533 int status; 2534 2535 TAVOR_TNF_ENTER(tavor_ci_ci_data_in); 2536 2537 /* Check for valid HCA handle */ 2538 if (hca == NULL) { 2539 TNF_PROBE_0(tavor_ci_ci_data_in_invhca_fail, 2540 TAVOR_TNF_ERROR, ""); 2541 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2542 return (IBT_HCA_HDL_INVALID); 2543 } 2544 2545 /* Grab the Tavor softstate pointer */ 2546 state = (tavor_state_t *)hca; 2547 2548 /* Get the Tavor userland mapping information */ 2549 status = tavor_umap_ci_data_in(state, flags, object, 2550 ibc_object_handle, data_p, data_sz); 2551 if (status != DDI_SUCCESS) { 2552 TNF_PROBE_1(tavor_ci_ci_data_in_umap_fail, TAVOR_TNF_ERROR, 2553 "", tnf_uint, status, status); 2554 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2555 return (status); 2556 } 2557 2558 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2559 return (IBT_SUCCESS); 2560 } 2561 2562 /* 2563 * tavor_ci_ci_data_out() 2564 * Exchange CI-specific data. 2565 * Context: Can be called only from user or kernel context. 2566 */ 2567 static ibt_status_t 2568 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags, 2569 ibt_object_type_t object, void *ibc_object_handle, void *data_p, 2570 size_t data_sz) 2571 { 2572 tavor_state_t *state; 2573 int status; 2574 2575 TAVOR_TNF_ENTER(tavor_ci_ci_data_out); 2576 2577 /* Check for valid HCA handle */ 2578 if (hca == NULL) { 2579 TNF_PROBE_0(tavor_ci_ci_data_out_invhca_fail, 2580 TAVOR_TNF_ERROR, ""); 2581 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2582 return (IBT_HCA_HDL_INVALID); 2583 } 2584 2585 /* Grab the Tavor softstate pointer */ 2586 state = (tavor_state_t *)hca; 2587 2588 /* Get the Tavor userland mapping information */ 2589 status = tavor_umap_ci_data_out(state, flags, object, 2590 ibc_object_handle, data_p, data_sz); 2591 if (status != DDI_SUCCESS) { 2592 TNF_PROBE_1(tavor_ci_ci_data_out_umap_fail, TAVOR_TNF_ERROR, 2593 "", tnf_uint, status, status); 2594 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2595 return (status); 2596 } 2597 2598 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2599 return (IBT_SUCCESS); 2600 } 2601 2602 2603 /* 2604 * tavor_ci_alloc_srq() 2605 * Allocate a Shared Receive Queue (SRQ) 2606 * Context: Can be called only from user or kernel context 2607 */ 2608 static ibt_status_t 2609 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags, 2610 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes, 2611 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p) 2612 { 2613 tavor_state_t *state; 2614 tavor_pdhdl_t pdhdl; 2615 tavor_srqhdl_t srqhdl; 2616 tavor_srq_info_t srqinfo; 2617 tavor_srq_options_t op; 2618 int status; 2619 2620 TAVOR_TNF_ENTER(tavor_ci_alloc_srq); 2621 2622 /* Check for valid HCA handle */ 2623 if (hca == NULL) { 2624 TNF_PROBE_0(tavor_ci_alloc_srq_invhca_fail, 2625 TAVOR_TNF_ERROR, ""); 2626 TAVOR_TNF_EXIT(tavor_alloc_srq); 2627 return (IBT_HCA_HDL_INVALID); 2628 } 2629 2630 state = (tavor_state_t *)hca; 2631 2632 /* Check if SRQ is even supported */ 2633 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2634 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail, 2635 TAVOR_TNF_ERROR, ""); 2636 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2637 return (IBT_NOT_SUPPORTED); 2638 } 2639 2640 /* Check for valid PD handle pointer */ 2641 if (pd == NULL) { 2642 TNF_PROBE_0(tavor_ci_alloc_srq_invpdhdl_fail, 2643 TAVOR_TNF_ERROR, ""); 2644 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2645 return (IBT_PD_HDL_INVALID); 2646 } 2647 2648 pdhdl = (tavor_pdhdl_t)pd; 2649 2650 srqinfo.srqi_ibt_srqhdl = ibt_srq; 2651 srqinfo.srqi_pd = pdhdl; 2652 srqinfo.srqi_sizes = sizes; 2653 srqinfo.srqi_real_sizes = ret_sizes_p; 2654 srqinfo.srqi_srqhdl = &srqhdl; 2655 srqinfo.srqi_flags = flags; 2656 op.srqo_wq_loc = state->ts_cfg_profile->cp_srq_wq_inddr; 2657 status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op); 2658 if (status != DDI_SUCCESS) { 2659 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2660 return (status); 2661 } 2662 2663 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl; 2664 2665 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2666 return (IBT_SUCCESS); 2667 } 2668 2669 /* 2670 * tavor_ci_free_srq() 2671 * Free a Shared Receive Queue (SRQ) 2672 * Context: Can be called only from user or kernel context 2673 */ 2674 static ibt_status_t 2675 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq) 2676 { 2677 tavor_state_t *state; 2678 tavor_srqhdl_t srqhdl; 2679 int status; 2680 2681 TAVOR_TNF_ENTER(tavor_ci_free_srq); 2682 2683 /* Check for valid HCA handle */ 2684 if (hca == NULL) { 2685 TNF_PROBE_0(tavor_ci_free_srq_invhca_fail, 2686 TAVOR_TNF_ERROR, ""); 2687 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2688 return (IBT_HCA_HDL_INVALID); 2689 } 2690 2691 state = (tavor_state_t *)hca; 2692 2693 /* Check if SRQ is even supported */ 2694 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2695 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail, 2696 TAVOR_TNF_ERROR, ""); 2697 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2698 return (IBT_NOT_SUPPORTED); 2699 } 2700 2701 /* Check for valid SRQ handle pointer */ 2702 if (srq == NULL) { 2703 TNF_PROBE_0(tavor_ci_free_srq_invsrqhdl_fail, 2704 TAVOR_TNF_ERROR, ""); 2705 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2706 return (IBT_SRQ_HDL_INVALID); 2707 } 2708 2709 srqhdl = (tavor_srqhdl_t)srq; 2710 2711 /* Free the SRQ */ 2712 status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP); 2713 if (status != DDI_SUCCESS) { 2714 TNF_PROBE_1(tavor_ci_free_srq_fail, TAVOR_TNF_ERROR, "", 2715 tnf_uint, status, status); 2716 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2717 return (status); 2718 } 2719 2720 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2721 return (IBT_SUCCESS); 2722 } 2723 2724 /* 2725 * tavor_ci_query_srq() 2726 * Query properties of a Shared Receive Queue (SRQ) 2727 * Context: Can be called from interrupt or base context. 2728 */ 2729 static ibt_status_t 2730 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p, 2731 ibt_srq_sizes_t *sizes_p, uint_t *limit_p) 2732 { 2733 tavor_state_t *state; 2734 tavor_srqhdl_t srqhdl; 2735 2736 TAVOR_TNF_ENTER(tavor_ci_query_srq); 2737 2738 /* Check for valid HCA handle */ 2739 if (hca == NULL) { 2740 TNF_PROBE_0(tavor_ci_query_srq_invhca_fail, 2741 TAVOR_TNF_ERROR, ""); 2742 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2743 return (IBT_HCA_HDL_INVALID); 2744 } 2745 2746 state = (tavor_state_t *)hca; 2747 2748 /* Check if SRQ is even supported */ 2749 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2750 TNF_PROBE_0(tavor_ci_query_srq_not_supported_fail, 2751 TAVOR_TNF_ERROR, ""); 2752 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2753 return (IBT_NOT_SUPPORTED); 2754 } 2755 2756 /* Check for valid SRQ handle pointer */ 2757 if (srq == NULL) { 2758 TNF_PROBE_0(tavor_ci_query_srq_invsrqhdl_fail, 2759 TAVOR_TNF_ERROR, ""); 2760 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2761 return (IBT_SRQ_HDL_INVALID); 2762 } 2763 2764 srqhdl = (tavor_srqhdl_t)srq; 2765 2766 mutex_enter(&srqhdl->srq_lock); 2767 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) { 2768 mutex_exit(&srqhdl->srq_lock); 2769 TNF_PROBE_0(tavor_ci_query_srq_error_state, 2770 TAVOR_TNF_ERROR, ""); 2771 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2772 return (IBT_SRQ_ERROR_STATE); 2773 } 2774 2775 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl; 2776 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz; 2777 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz; 2778 mutex_exit(&srqhdl->srq_lock); 2779 *limit_p = 0; 2780 2781 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2782 return (IBT_SUCCESS); 2783 } 2784 2785 /* 2786 * tavor_ci_modify_srq() 2787 * Modify properties of a Shared Receive Queue (SRQ) 2788 * Context: Can be called from interrupt or base context. 2789 */ 2790 /* ARGSUSED */ 2791 static ibt_status_t 2792 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, 2793 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p) 2794 { 2795 tavor_state_t *state; 2796 tavor_srqhdl_t srqhdl; 2797 uint_t resize_supported, cur_srq_size; 2798 int status; 2799 2800 TAVOR_TNF_ENTER(tavor_ci_modify_srq); 2801 2802 /* Check for valid HCA handle */ 2803 if (hca == NULL) { 2804 TNF_PROBE_0(tavor_ci_modify_srq_invhca_fail, 2805 TAVOR_TNF_ERROR, ""); 2806 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2807 return (IBT_HCA_HDL_INVALID); 2808 } 2809 2810 state = (tavor_state_t *)hca; 2811 2812 /* Check if SRQ is even supported */ 2813 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2814 TNF_PROBE_0(tavor_ci_modify_srq_not_supported_fail, 2815 TAVOR_TNF_ERROR, ""); 2816 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2817 return (IBT_NOT_SUPPORTED); 2818 } 2819 2820 /* Check for valid SRQ handle pointer */ 2821 if (srq == NULL) { 2822 TNF_PROBE_0(tavor_ci_modify_srq_invcqhdl_fail, 2823 TAVOR_TNF_ERROR, ""); 2824 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2825 return (IBT_SRQ_HDL_INVALID); 2826 } 2827 2828 srqhdl = (tavor_srqhdl_t)srq; 2829 2830 /* 2831 * Check Error State of SRQ. 2832 * Also, while we are holding the lock we save away the current SRQ 2833 * size for later use. 2834 */ 2835 mutex_enter(&srqhdl->srq_lock); 2836 cur_srq_size = srqhdl->srq_wq_bufsz; 2837 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) { 2838 mutex_exit(&srqhdl->srq_lock); 2839 TNF_PROBE_0(tavor_ci_modify_srq_error_state, 2840 TAVOR_TNF_ERROR, ""); 2841 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2842 return (IBT_SRQ_ERROR_STATE); 2843 } 2844 mutex_exit(&srqhdl->srq_lock); 2845 2846 /* 2847 * Setting the limit watermark is not currently supported. This is a 2848 * tavor hardware (firmware) limitation. We return NOT_SUPPORTED here, 2849 * and have the limit code commented out for now. 2850 * 2851 * XXX If we enable the limit watermark support, we need to do checks 2852 * and set the 'srq->srq_wr_limit' here, instead of returning not 2853 * supported. The 'tavor_srq_modify' operation below is for resizing 2854 * the SRQ only, the limit work should be done here. If this is 2855 * changed to use the 'limit' field, the 'ARGSUSED' comment for this 2856 * function should also be removed at that time. 2857 */ 2858 if (flags & IBT_SRQ_SET_LIMIT) { 2859 TNF_PROBE_0(tavor_ci_modify_srq_limit_not_supported, 2860 TAVOR_TNF_ERROR, ""); 2861 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2862 return (IBT_NOT_SUPPORTED); 2863 } 2864 2865 /* 2866 * Check the SET_SIZE flag. If not set, we simply return success here. 2867 * However if it is set, we check if resize is supported and only then 2868 * do we continue on with our resize processing. 2869 */ 2870 if (!(flags & IBT_SRQ_SET_SIZE)) { 2871 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2872 return (IBT_SUCCESS); 2873 } 2874 2875 resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags & 2876 IBT_HCA_RESIZE_SRQ; 2877 2878 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) { 2879 TNF_PROBE_0(tavor_ci_modify_srq_resize_not_supp_fail, 2880 TAVOR_TNF_ERROR, ""); 2881 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2882 return (IBT_NOT_SUPPORTED); 2883 } 2884 2885 /* 2886 * We do not support resizing an SRQ to be smaller than it's current 2887 * size. If a smaller (or equal) size is requested, then we simply 2888 * return success, and do nothing. 2889 */ 2890 if (size <= cur_srq_size) { 2891 *ret_size_p = cur_srq_size; 2892 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2893 return (IBT_SUCCESS); 2894 } 2895 2896 status = tavor_srq_modify(state, srqhdl, size, ret_size_p, 2897 TAVOR_NOSLEEP); 2898 if (status != DDI_SUCCESS) { 2899 /* Set return value to current SRQ size */ 2900 *ret_size_p = cur_srq_size; 2901 TNF_PROBE_1(tavor_ci_modify_srq_fail, TAVOR_TNF_ERROR, "", 2902 tnf_uint, status, status); 2903 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2904 return (status); 2905 } 2906 2907 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2908 return (IBT_SUCCESS); 2909 } 2910 2911 /* 2912 * tavor_ci_post_srq() 2913 * Post a Work Request to the specified Shared Receive Queue (SRQ) 2914 * Context: Can be called from interrupt or base context. 2915 */ 2916 static ibt_status_t 2917 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, 2918 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p) 2919 { 2920 tavor_state_t *state; 2921 tavor_srqhdl_t srqhdl; 2922 int status; 2923 2924 TAVOR_TNF_ENTER(tavor_ci_post_srq); 2925 2926 /* Check for valid HCA handle */ 2927 if (hca == NULL) { 2928 TNF_PROBE_0(tavor_ci_post_srq_invhca_fail, 2929 TAVOR_TNF_ERROR, ""); 2930 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2931 return (IBT_HCA_HDL_INVALID); 2932 } 2933 2934 state = (tavor_state_t *)hca; 2935 2936 /* Check if SRQ is even supported */ 2937 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2938 TNF_PROBE_0(tavor_ci_post_srq_not_supported_fail, 2939 TAVOR_TNF_ERROR, ""); 2940 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2941 return (IBT_NOT_SUPPORTED); 2942 } 2943 2944 /* Check for valid SRQ handle pointer */ 2945 if (srq == NULL) { 2946 TNF_PROBE_0(tavor_ci_post_srq_invsrqhdl_fail, 2947 TAVOR_TNF_ERROR, ""); 2948 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2949 return (IBT_SRQ_HDL_INVALID); 2950 } 2951 2952 srqhdl = (tavor_srqhdl_t)srq; 2953 2954 status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p); 2955 if (status != DDI_SUCCESS) { 2956 TNF_PROBE_1(tavor_ci_post_srq_fail, TAVOR_TNF_ERROR, "", 2957 tnf_uint, status, status); 2958 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2959 return (status); 2960 } 2961 2962 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2963 return (IBT_SUCCESS); 2964 } 2965 2966 /* Address translation */ 2967 /* 2968 * tavor_ci_map_mem_area() 2969 * Context: Can be called from interrupt or base context. 2970 */ 2971 /* ARGSUSED */ 2972 static ibt_status_t 2973 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs, 2974 void *ibtl_reserved, uint_t list_len, ibt_phys_buf_t *paddr_list_p, 2975 uint_t *ret_num_paddr_p, size_t *paddr_buf_sz_p, 2976 ib_memlen_t *paddr_offset_p, ibc_ma_hdl_t *ibc_ma_hdl_p) 2977 { 2978 tavor_state_t *state; 2979 uint_t cookiecnt; 2980 int status; 2981 2982 TAVOR_TNF_ENTER(tavor_ci_map_mem_area); 2983 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*paddr_list_p)) 2984 2985 /* Check for valid HCA handle */ 2986 if (hca == NULL) { 2987 TNF_PROBE_0(tavor_ci_map_mem_area_fail, 2988 TAVOR_TNF_ERROR, ""); 2989 TAVOR_TNF_EXIT(tavor_ci_map_mem_area); 2990 return (IBT_HCA_HDL_INVALID); 2991 } 2992 2993 if ((va_attrs->va_flags & IBT_VA_BUF) && (va_attrs->va_buf == NULL)) { 2994 TNF_PROBE_0(tavor_ci_map_mem_area_fail, 2995 TAVOR_TNF_ERROR, ""); 2996 TAVOR_TNF_EXIT(tavor_ci_map_mem_area); 2997 return (IBT_INVALID_PARAM); 2998 } 2999 3000 state = (tavor_state_t *)hca; 3001 3002 /* 3003 * Based on the length of the buffer and the paddr_list passed in, 3004 * retrieve DMA cookies for the virtual to physical address 3005 * translation. 3006 */ 3007 status = tavor_get_dma_cookies(state, paddr_list_p, va_attrs, 3008 list_len, &cookiecnt, ibc_ma_hdl_p); 3009 if (status != DDI_SUCCESS) { 3010 TNF_PROBE_1(tavor_ci_map_mem_area, TAVOR_TNF_ERROR, "", 3011 tnf_uint, status, status); 3012 TAVOR_TNF_EXIT(tavor_ci_map_mem_area); 3013 return (status); 3014 } 3015 3016 /* 3017 * Split the cookies returned from 'tavor_get_dma_cookies() above. We 3018 * also pass in the size of the cookies we would like. 3019 * Note: for now, we only support PAGESIZE cookies. 3020 */ 3021 status = tavor_split_dma_cookies(state, paddr_list_p, paddr_offset_p, 3022 list_len, &cookiecnt, PAGESIZE); 3023 if (status != DDI_SUCCESS) { 3024 TNF_PROBE_1(tavor_ci_map_mem_area, TAVOR_TNF_ERROR, "", 3025 tnf_uint, status, status); 3026 TAVOR_TNF_EXIT(tavor_ci_map_mem_area); 3027 return (status); 3028 } 3029 3030 /* Setup return values */ 3031 *ret_num_paddr_p = cookiecnt; 3032 *paddr_buf_sz_p = PAGESIZE; 3033 3034 TAVOR_TNF_EXIT(tavor_ci_map_mem_area); 3035 return (IBT_SUCCESS); 3036 } 3037 3038 /* 3039 * tavor_ci_unmap_mem_area() 3040 * Unmap the memory area 3041 * Context: Can be called from interrupt or base context. 3042 */ 3043 /* ARGSUSED */ 3044 static ibt_status_t 3045 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl) 3046 { 3047 int status; 3048 3049 TAVOR_TNF_ENTER(tavor_ci_unmap_mem_area); 3050 3051 if (ma_hdl == NULL) { 3052 TNF_PROBE_0(tavor_ci_unmap_mem_area_invalid_mahdl_fail, 3053 TAVOR_TNF_ERROR, ""); 3054 TAVOR_TNF_EXIT(tavor_ci_unmap_mem_area); 3055 return (IBT_MA_HDL_INVALID); 3056 } 3057 3058 status = tavor_free_dma_cookies(ma_hdl); 3059 if (status != DDI_SUCCESS) { 3060 TNF_PROBE_0(tavor_ci_unmap_mem_area_free_dma_fail, 3061 TAVOR_TNF_ERROR, ""); 3062 TAVOR_TNF_EXIT(tavor_ci_unmap_mem_area); 3063 return (ibc_get_ci_failure(0)); 3064 } 3065 3066 TAVOR_TNF_EXIT(tavor_ci_unmap_mem_area); 3067 return (IBT_SUCCESS); 3068 } 3069 3070 /* ARGSUSED */ 3071 static ibt_status_t 3072 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov, 3073 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p) 3074 { 3075 return (IBT_NOT_SUPPORTED); 3076 } 3077 3078 /* ARGSUSED */ 3079 static ibt_status_t 3080 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl) 3081 { 3082 return (IBT_NOT_SUPPORTED); 3083 } 3084 3085 /* Allocate L_Key */ 3086 /* 3087 * tavor_ci_alloc_lkey() 3088 */ 3089 /* ARGSUSED */ 3090 static ibt_status_t 3091 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3092 ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p, 3093 ibt_pmr_desc_t *mem_desc_p) 3094 { 3095 TAVOR_TNF_ENTER(tavor_ci_alloc_lkey); 3096 TAVOR_TNF_EXIT(tavor_ci_alloc_lkey); 3097 return (IBT_NOT_SUPPORTED); 3098 } 3099 3100 /* Physical Register Memory Region */ 3101 /* 3102 * tavor_ci_register_physical_mr() 3103 */ 3104 /* ARGSUSED */ 3105 static ibt_status_t 3106 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3107 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p, 3108 ibt_pmr_desc_t *mem_desc_p) 3109 { 3110 TAVOR_TNF_ENTER(tavor_ci_register_physical_mr); 3111 TAVOR_TNF_EXIT(tavor_ci_register_physical_mr); 3112 return (IBT_NOT_SUPPORTED); 3113 } 3114 3115 /* 3116 * tavor_ci_reregister_physical_mr() 3117 */ 3118 /* ARGSUSED */ 3119 static ibt_status_t 3120 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 3121 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, 3122 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p) 3123 { 3124 TAVOR_TNF_ENTER(tavor_ci_reregister_physical_mr); 3125 TAVOR_TNF_EXIT(tavor_ci_reregister_physical_mr); 3126 return (IBT_NOT_SUPPORTED); 3127 } 3128 3129 /* Mellanox FMR Support */ 3130 /* 3131 * tavor_ci_create_fmr_pool() 3132 * Creates a pool of memory regions suitable for FMR registration 3133 * Context: Can be called from base context only 3134 */ 3135 static ibt_status_t 3136 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3137 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p) 3138 { 3139 tavor_state_t *state; 3140 tavor_pdhdl_t pdhdl; 3141 tavor_fmrhdl_t fmrpoolhdl; 3142 int status; 3143 3144 TAVOR_TNF_ENTER(tavor_ci_create_fmr_pool); 3145 3146 /* Check for valid HCA handle */ 3147 if (hca == NULL) { 3148 TNF_PROBE_0(tavor_ci_create_fmr_pool_invhca_fail, 3149 TAVOR_TNF_ERROR, ""); 3150 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3151 return (IBT_HCA_HDL_INVALID); 3152 } 3153 3154 state = (tavor_state_t *)hca; 3155 3156 /* Check if FMR is even supported */ 3157 if (state->ts_cfg_profile->cp_fmr_enable == 0) { 3158 TNF_PROBE_0(tavor_ci_create_fmr_pool_not_supported_fail, 3159 TAVOR_TNF_ERROR, ""); 3160 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3161 return (IBT_HCA_FMR_NOT_SUPPORTED); 3162 } 3163 3164 /* Check for valid PD handle pointer */ 3165 if (pd == NULL) { 3166 TNF_PROBE_0(tavor_ci_create_fmr_invpdhdl_fail, 3167 TAVOR_TNF_ERROR, ""); 3168 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3169 return (IBT_PD_HDL_INVALID); 3170 } 3171 3172 pdhdl = (tavor_pdhdl_t)pd; 3173 3174 /* 3175 * Validate the access flags. Both Remote Write and Remote Atomic 3176 * require the Local Write flag to be set 3177 */ 3178 if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 3179 (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 3180 !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 3181 TNF_PROBE_0(tavor_ci_create_fmr_pool_inv_accflags_fail, 3182 TAVOR_TNF_ERROR, ""); 3183 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3184 return (IBT_MR_ACCESS_REQ_INVALID); 3185 } 3186 3187 status = tavor_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl); 3188 if (status != DDI_SUCCESS) { 3189 TNF_PROBE_1(tavor_ci_create_fmr_pool, TAVOR_TNF_ERROR, "", 3190 tnf_uint, status, status); 3191 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3192 return (status); 3193 } 3194 3195 /* Set fmr_pool from tavor handle */ 3196 *fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl; 3197 3198 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3199 3200 return (IBT_SUCCESS); 3201 } 3202 3203 /* 3204 * tavor_ci_destroy_fmr_pool() 3205 * Free all resources associated with an FMR pool. 3206 * Context: Can be called from base context only. 3207 */ 3208 static ibt_status_t 3209 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool) 3210 { 3211 tavor_state_t *state; 3212 tavor_fmrhdl_t fmrpoolhdl; 3213 int status; 3214 3215 TAVOR_TNF_ENTER(tavor_ci_destroy_fmr_pool); 3216 3217 /* Check for valid HCA handle */ 3218 if (hca == NULL) { 3219 TNF_PROBE_0(tavor_ci_destroy_fmr_pool_invhca_fail, 3220 TAVOR_TNF_ERROR, ""); 3221 TAVOR_TNF_EXIT(tavor_ci_create_fmr_pool); 3222 return (IBT_HCA_HDL_INVALID); 3223 } 3224 3225 state = (tavor_state_t *)hca; 3226 3227 /* Check for valid FMR Pool handle */ 3228 if (fmr_pool == NULL) { 3229 TNF_PROBE_0(tavor_ci_destroy_fmr_pool_invfmr_pool_fail, 3230 TAVOR_TNF_ERROR, ""); 3231 TAVOR_TNF_EXIT(tavor_ci_destroy_fmr_pool); 3232 return (IBT_FMR_POOL_HDL_INVALID); 3233 } 3234 3235 fmrpoolhdl = (tavor_fmrhdl_t)fmr_pool; 3236 3237 status = tavor_destroy_fmr_pool(state, fmrpoolhdl); 3238 if (status != DDI_SUCCESS) { 3239 TNF_PROBE_1(tavor_ci_destroy_fmr_pool, TAVOR_TNF_ERROR, "", 3240 tnf_uint, status, status); 3241 TAVOR_TNF_EXIT(tavor_ci_destroy_fmr_pool); 3242 return (status); 3243 } 3244 3245 TAVOR_TNF_EXIT(tavor_ci_destroy_fmr_pool); 3246 return (IBT_SUCCESS); 3247 } 3248 3249 /* 3250 * tavor_ci_flush_fmr_pool() 3251 * Force a flush of the memory tables, cleaning up used FMR resources. 3252 * Context: Can be called from interrupt or base context. 3253 */ 3254 static ibt_status_t 3255 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool) 3256 { 3257 tavor_state_t *state; 3258 tavor_fmrhdl_t fmrpoolhdl; 3259 int status; 3260 3261 TAVOR_TNF_ENTER(tavor_ci_flush_fmr_pool); 3262 3263 /* Check for valid HCA handle */ 3264 if (hca == NULL) { 3265 TNF_PROBE_0(tavor_ci_flush_fmr_pool_invhca_fail, 3266 TAVOR_TNF_ERROR, ""); 3267 TAVOR_TNF_EXIT(tavor_ci_flush_fmr_pool); 3268 return (IBT_HCA_HDL_INVALID); 3269 } 3270 3271 state = (tavor_state_t *)hca; 3272 3273 /* Check for valid FMR Pool handle */ 3274 if (fmr_pool == NULL) { 3275 TNF_PROBE_0(tavor_ci_flush_fmr_pool_invhca_fail, 3276 TAVOR_TNF_ERROR, ""); 3277 TAVOR_TNF_EXIT(tavor_ci_flush_fmr_pool); 3278 return (IBT_FMR_POOL_HDL_INVALID); 3279 } 3280 3281 fmrpoolhdl = (tavor_fmrhdl_t)fmr_pool; 3282 3283 status = tavor_flush_fmr_pool(state, fmrpoolhdl); 3284 if (status != DDI_SUCCESS) { 3285 TNF_PROBE_1(tavor_ci_flush_fmr_pool, TAVOR_TNF_ERROR, "", 3286 tnf_uint, status, status); 3287 TAVOR_TNF_EXIT(tavor_ci_flush_fmr_pool); 3288 return (status); 3289 } 3290 3291 TAVOR_TNF_EXIT(tavor_ci_flush_fmr_pool); 3292 return (IBT_SUCCESS); 3293 } 3294 3295 /* 3296 * tavor_ci_register_physical_fmr() 3297 * From the 'pool' of FMR regions passed in, performs register physical 3298 * operation. 3299 * Context: Can be called from interrupt or base context. 3300 */ 3301 /* ARGSUSED */ 3302 static ibt_status_t 3303 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca, 3304 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr, 3305 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p) 3306 { 3307 tavor_state_t *state; 3308 tavor_mrhdl_t mrhdl; 3309 tavor_fmrhdl_t fmrpoolhdl; 3310 int status; 3311 3312 TAVOR_TNF_ENTER(tavor_ci_register_physical_fmr); 3313 3314 ASSERT(mem_pattr != NULL); 3315 ASSERT(mr_p != NULL); 3316 ASSERT(mem_desc_p != NULL); 3317 3318 /* Check for valid HCA handle */ 3319 if (hca == NULL) { 3320 TNF_PROBE_0(tavor_ci_register_physical_fmr_invhca_fail, 3321 TAVOR_TNF_ERROR, ""); 3322 TAVOR_TNF_EXIT(tavor_ci_register_physical_fmr); 3323 return (IBT_HCA_HDL_INVALID); 3324 } 3325 3326 /* Grab the Tavor softstate pointer */ 3327 state = (tavor_state_t *)hca; 3328 3329 /* Check for valid FMR Pool handle */ 3330 if (fmr_pool == NULL) { 3331 TNF_PROBE_0(tavor_ci_register_physical_fmr_invhca_fail, 3332 TAVOR_TNF_ERROR, ""); 3333 TAVOR_TNF_EXIT(tavor_ci_register_physical_fmr); 3334 return (IBT_FMR_POOL_HDL_INVALID); 3335 } 3336 3337 fmrpoolhdl = (tavor_fmrhdl_t)fmr_pool; 3338 3339 status = tavor_register_physical_fmr(state, fmrpoolhdl, mem_pattr, 3340 &mrhdl, mem_desc_p); 3341 if (status != DDI_SUCCESS) { 3342 TNF_PROBE_1(tavor_ci_register_physical_fmr_reg_fail, 3343 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 3344 TAVOR_TNF_EXIT(tavor_ci_register_physical_fmr); 3345 return (status); 3346 } 3347 3348 /* 3349 * If region is mapped for streaming (i.e. noncoherent), then set 3350 * sync is required 3351 */ 3352 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p)) 3353 mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags & 3354 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 3355 if (mem_desc_p->pmd_sync_required == B_TRUE) { 3356 /* Fill in DMA handle for future sync operations */ 3357 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo)) 3358 mrhdl->mr_bindinfo.bi_dmahdl = 3359 (ddi_dma_handle_t)mem_pattr->pmr_ma; 3360 } 3361 3362 /* Return the Tavor MR handle */ 3363 *mr_p = (ibc_mr_hdl_t)mrhdl; 3364 3365 TAVOR_TNF_EXIT(tavor_ci_register_physical_fmr); 3366 return (IBT_SUCCESS); 3367 } 3368 3369 /* 3370 * tavor_ci_deregister_fmr() 3371 * Moves an FMR (specified by 'mr') to the deregistered state. 3372 * Context: Can be called from base context only. 3373 */ 3374 static ibt_status_t 3375 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr) 3376 { 3377 tavor_state_t *state; 3378 tavor_mrhdl_t mrhdl; 3379 int status; 3380 3381 TAVOR_TNF_ENTER(tavor_ci_deregister_fmr); 3382 3383 /* Check for valid HCA handle */ 3384 if (hca == NULL) { 3385 TNF_PROBE_0(tavor_ci_deregister_fmr_invhca_fail, 3386 TAVOR_TNF_ERROR, ""); 3387 TAVOR_TNF_EXIT(tavor_ci_deregister_fmr); 3388 return (IBT_HCA_HDL_INVALID); 3389 } 3390 3391 /* Check for valid memory region handle */ 3392 if (mr == NULL) { 3393 TNF_PROBE_0(tavor_ci_deregister_fmr_invmrhdl_fail, 3394 TAVOR_TNF_ERROR, ""); 3395 TAVOR_TNF_EXIT(tavor_ci_deregister_fmr); 3396 return (IBT_MR_HDL_INVALID); 3397 } 3398 3399 /* Grab the Tavor softstate pointer */ 3400 state = (tavor_state_t *)hca; 3401 mrhdl = (tavor_mrhdl_t)mr; 3402 3403 /* 3404 * Deregister the memory region, either "unmap" the FMR or deregister 3405 * the normal memory region. 3406 */ 3407 status = tavor_deregister_fmr(state, mrhdl); 3408 if (status != DDI_SUCCESS) { 3409 TNF_PROBE_1(tavor_ci_deregister_mr_fmr_fail, 3410 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 3411 TAVOR_TNF_EXIT(tavor_ci_deregister_fmr); 3412 return (status); 3413 } 3414 3415 TAVOR_TNF_EXIT(tavor_ci_deregister_fmr); 3416 return (IBT_SUCCESS); 3417 } 3418 3419 /* 3420 * tavor_ci_alloc_io_mem() 3421 * Allocate dmable memory 3422 * 3423 */ 3424 ibt_status_t 3425 tavor_ci_alloc_io_mem( 3426 ibc_hca_hdl_t hca, 3427 size_t size, 3428 ibt_mr_flags_t mr_flag, 3429 caddr_t *kaddrp, 3430 ibc_mem_alloc_hdl_t *mem_alloc_hdl) 3431 { 3432 tavor_state_t *state; 3433 int status; 3434 3435 TAVOR_TNF_ENTER(tavor_ci_alloc_io_mem); 3436 3437 /* Check for valid HCA handle */ 3438 if (hca == NULL) { 3439 TNF_PROBE_0(tavor_ci_alloc_io_mem_invhca_fail, 3440 TAVOR_TNF_ERROR, ""); 3441 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3442 return (IBT_HCA_HDL_INVALID); 3443 } 3444 3445 /* Check for valid mem_alloc_hdl handle pointer */ 3446 if (mem_alloc_hdl == NULL) { 3447 TNF_PROBE_0(tavor_ci_alloc_io_mem_hdl_fail, 3448 TAVOR_TNF_ERROR, ""); 3449 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3450 return (IBT_MEM_ALLOC_HDL_INVALID); 3451 } 3452 3453 /* Grab the Tavor softstate pointer and mem handle */ 3454 state = (tavor_state_t *)hca; 3455 3456 /* Allocate the AH */ 3457 status = tavor_mem_alloc(state, size, mr_flag, kaddrp, 3458 (tavor_mem_alloc_hdl_t *)mem_alloc_hdl); 3459 3460 if (status != DDI_SUCCESS) { 3461 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "", 3462 tnf_uint, status, status); 3463 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3464 return (status); 3465 } 3466 3467 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3468 return (IBT_SUCCESS); 3469 } 3470 3471 3472 /* 3473 * tavor_ci_free_io_mem() 3474 * free the memory 3475 */ 3476 ibt_status_t 3477 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl) 3478 { 3479 tavor_mem_alloc_hdl_t memhdl; 3480 3481 TAVOR_TNF_ENTER(tavor_ci_free_io_mem); 3482 3483 /* Check for valid HCA handle */ 3484 if (hca == NULL) { 3485 TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail, 3486 TAVOR_TNF_ERROR, ""); 3487 TAVOR_TNF_EXIT(tavor_ci_free_io_mem); 3488 return (IBT_HCA_HDL_INVALID); 3489 } 3490 3491 /* Check for valid mem_alloc_hdl handle pointer */ 3492 if (mem_alloc_hdl == NULL) { 3493 TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail, 3494 TAVOR_TNF_ERROR, ""); 3495 TAVOR_TNF_EXIT(tavor_ci_free_io_mem); 3496 return (IBT_MEM_ALLOC_HDL_INVALID); 3497 } 3498 3499 memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl; 3500 3501 /* free the memory */ 3502 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl)) 3503 ddi_dma_mem_free(&memhdl->tavor_acc_hdl); 3504 ddi_dma_free_handle(&memhdl->tavor_dma_hdl); 3505 3506 kmem_free(memhdl, sizeof (*memhdl)); 3507 TAVOR_TNF_EXIT(tavor_dma_free); 3508 return (IBT_SUCCESS); 3509 } 3510 3511 3512 int 3513 tavor_mem_alloc( 3514 tavor_state_t *state, 3515 size_t size, 3516 ibt_mr_flags_t flags, 3517 caddr_t *kaddrp, 3518 tavor_mem_alloc_hdl_t *mem_hdl) 3519 { 3520 ddi_dma_handle_t dma_hdl; 3521 ddi_dma_attr_t dma_attr; 3522 ddi_acc_handle_t acc_hdl; 3523 size_t real_len; 3524 int status; 3525 int (*ddi_cb)(caddr_t); 3526 3527 TAVOR_TNF_ENTER(tavor_mem_alloc); 3528 3529 tavor_dma_attr_init(&dma_attr); 3530 3531 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3532 3533 /* Allocate a DMA handle */ 3534 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb, 3535 NULL, &dma_hdl); 3536 if (status != DDI_SUCCESS) { 3537 TNF_PROBE_0(tavor_dma_alloc_handle_fail, TAVOR_TNF_ERROR, ""); 3538 TAVOR_TNF_EXIT(tavor_mem_alloc); 3539 return (DDI_FAILURE); 3540 } 3541 3542 /* Allocate DMA memory */ 3543 status = ddi_dma_mem_alloc(dma_hdl, size, 3544 &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb, 3545 NULL, 3546 kaddrp, &real_len, &acc_hdl); 3547 if (status != DDI_SUCCESS) { 3548 ddi_dma_free_handle(&dma_hdl); 3549 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, ""); 3550 TAVOR_TNF_EXIT(tavor_mem_alloc); 3551 return (DDI_FAILURE); 3552 } 3553 3554 /* Package the tavor_dma_info contents and return */ 3555 *mem_hdl = kmem_alloc(sizeof (**mem_hdl), 3556 flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP); 3557 if (*mem_hdl == NULL) { 3558 ddi_dma_mem_free(&acc_hdl); 3559 ddi_dma_free_handle(&dma_hdl); 3560 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, ""); 3561 TAVOR_TNF_EXIT(tavor_mem_alloc); 3562 return (DDI_FAILURE); 3563 } 3564 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl)) 3565 (*mem_hdl)->tavor_dma_hdl = dma_hdl; 3566 (*mem_hdl)->tavor_acc_hdl = acc_hdl; 3567 3568 TAVOR_TNF_EXIT(tavor_mem_alloc); 3569 return (DDI_SUCCESS); 3570 } 3571