1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * tavor_ci.c
29  *    Tavor Channel Interface (CI) Routines
30  *
31  *    Implements all the routines necessary to interface with the IBTF.
32  *    Pointers to all of these functions are passed to the IBTF at attach()
33  *    time in the ibc_operations_t structure.  These functions include all
34  *    of the necessary routines to implement the required InfiniBand "verbs"
35  *    and additional IBTF-specific interfaces.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/conf.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 
43 #include <sys/ib/adapters/tavor/tavor.h>
44 
45 /* HCA and port related operations */
46 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
47     ibt_hca_portinfo_t *);
48 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
49     ibt_port_modify_flags_t, uint8_t);
50 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
51 
52 /* Protection Domains */
53 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
54     ibc_pd_hdl_t *);
55 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
56 
57 /* Reliable Datagram Domains */
58 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
59     ibc_rdd_hdl_t *);
60 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
61 
62 /* Address Handles */
63 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
64     ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
65 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
66 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
67     ibc_pd_hdl_t *, ibt_adds_vect_t *);
68 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69     ibt_adds_vect_t *);
70 
71 /* Queue Pairs */
72 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
73     ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
74     ibc_qp_hdl_t *);
75 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
76     ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
77     ibt_chan_sizes_t *, ibc_qp_hdl_t *);
78 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
79     ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
80     ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
81 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
82     ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
83 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
84 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
85     ibt_qp_query_attr_t *);
86 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87     ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
88 
89 /* Completion Queues */
90 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
91     ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
92 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
93 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *,
94     uint_t *, uint_t *, ibt_cq_handler_id_t *);
95 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96     uint_t, uint_t *);
97 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98     uint_t, uint_t, ibt_cq_handler_id_t);
99 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t, ibt_cq_sched_flags_t,
100     ibc_cq_handler_attr_t *);
101 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibt_cq_handler_id_t);
102 
103 /* EE Contexts */
104 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
105     ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
106 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
107 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
108     ibt_eec_query_attr_t *);
109 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
110     ibt_cep_modify_flags_t, ibt_eec_info_t *);
111 
112 /* Memory Registration */
113 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
114     ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
115 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
116     ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
117 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t,
118     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
119     ibc_mr_hdl_t *, ibt_mr_desc_t *);
120 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
121 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
122     ibt_mr_query_attr_t *);
123 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
124     ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
125     ibt_mr_desc_t *);
126 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
127     ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
128     ibt_mr_desc_t *);
129 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
130 
131 /* Memory Windows */
132 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
133     ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
134 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
135 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
136     ibt_mw_query_attr_t *);
137 
138 /* Multicast Groups */
139 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
140     ib_gid_t, ib_lid_t);
141 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
142     ib_gid_t, ib_lid_t);
143 
144 /* Work Request and Completion Processing */
145 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
146     ibt_send_wr_t *, uint_t, uint_t *);
147 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
148     ibt_recv_wr_t *, uint_t, uint_t *);
149 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
150     ibt_wc_t *, uint_t, uint_t *);
151 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
152     ibt_cq_notify_flags_t);
153 
154 /* CI Object Private Data */
155 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
156     ibt_object_type_t, void *, void *, size_t);
157 
158 /* CI Object Private Data */
159 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
160     ibt_object_type_t, void *, void *, size_t);
161 
162 /* Shared Receive Queues */
163 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
164     ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
165     ibt_srq_sizes_t *);
166 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
167 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
168     ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
169 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
170     ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
171 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
172     ibt_recv_wr_t *, uint_t, uint_t *);
173 
174 /* Address translation */
175 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
176     void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
177 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
178 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
179     ibt_all_wr_t *, ibc_mi_hdl_t *);
180 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
181 
182 /* Allocate L_Key */
183 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
184     ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
185 
186 /* Physical Register Memory Region */
187 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
188     ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
189 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t,
190     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
191     ibt_pmr_desc_t *);
192 
193 /* Mellanox FMR */
194 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
195     ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool);
196 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
197     ibc_fmr_pool_hdl_t fmr_pool);
198 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
199     ibc_fmr_pool_hdl_t fmr_pool);
200 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
201     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
202     void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
203 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,
204     ibc_mr_hdl_t mr);
205 
206 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t,
207     ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *);
208 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t);
209 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t,
210 	caddr_t *, tavor_mem_alloc_hdl_t *);
211 
212 
213 /*
214  * This ibc_operations_t structure includes pointers to all the entry points
215  * provided by the Tavor driver.  This structure is passed to the IBTF at
216  * driver attach time, using the ibc_attach() call.
217  */
218 ibc_operations_t tavor_ibc_ops = {
219 	/* HCA and port related operations */
220 	tavor_ci_query_hca_ports,
221 	tavor_ci_modify_ports,
222 	tavor_ci_modify_system_image,
223 
224 	/* Protection Domains */
225 	tavor_ci_alloc_pd,
226 	tavor_ci_free_pd,
227 
228 	/* Reliable Datagram Domains */
229 	tavor_ci_alloc_rdd,
230 	tavor_ci_free_rdd,
231 
232 	/* Address Handles */
233 	tavor_ci_alloc_ah,
234 	tavor_ci_free_ah,
235 	tavor_ci_query_ah,
236 	tavor_ci_modify_ah,
237 
238 	/* Queue Pairs */
239 	tavor_ci_alloc_qp,
240 	tavor_ci_alloc_special_qp,
241 	tavor_ci_alloc_qp_range,
242 	tavor_ci_free_qp,
243 	tavor_ci_release_qpn,
244 	tavor_ci_query_qp,
245 	tavor_ci_modify_qp,
246 
247 	/* Completion Queues */
248 	tavor_ci_alloc_cq,
249 	tavor_ci_free_cq,
250 	tavor_ci_query_cq,
251 	tavor_ci_resize_cq,
252 	tavor_ci_modify_cq,
253 	tavor_ci_alloc_cq_sched,
254 	tavor_ci_free_cq_sched,
255 
256 	/* EE Contexts */
257 	tavor_ci_alloc_eec,
258 	tavor_ci_free_eec,
259 	tavor_ci_query_eec,
260 	tavor_ci_modify_eec,
261 
262 	/* Memory Registration */
263 	tavor_ci_register_mr,
264 	tavor_ci_register_buf,
265 	tavor_ci_register_shared_mr,
266 	tavor_ci_deregister_mr,
267 	tavor_ci_query_mr,
268 	tavor_ci_reregister_mr,
269 	tavor_ci_reregister_buf,
270 	tavor_ci_sync_mr,
271 
272 	/* Memory Windows */
273 	tavor_ci_alloc_mw,
274 	tavor_ci_free_mw,
275 	tavor_ci_query_mw,
276 
277 	/* Multicast Groups */
278 	tavor_ci_attach_mcg,
279 	tavor_ci_detach_mcg,
280 
281 	/* Work Request and Completion Processing */
282 	tavor_ci_post_send,
283 	tavor_ci_post_recv,
284 	tavor_ci_poll_cq,
285 	tavor_ci_notify_cq,
286 
287 	/* CI Object Mapping Data */
288 	tavor_ci_ci_data_in,
289 	tavor_ci_ci_data_out,
290 
291 	/* Shared Receive Queue */
292 	tavor_ci_alloc_srq,
293 	tavor_ci_free_srq,
294 	tavor_ci_query_srq,
295 	tavor_ci_modify_srq,
296 	tavor_ci_post_srq,
297 
298 	/* Address translation */
299 	tavor_ci_map_mem_area,
300 	tavor_ci_unmap_mem_area,
301 	tavor_ci_map_mem_iov,
302 	tavor_ci_unmap_mem_iov,
303 
304 	/* Allocate L_key */
305 	tavor_ci_alloc_lkey,
306 
307 	/* Physical Register Memory Region */
308 	tavor_ci_register_physical_mr,
309 	tavor_ci_reregister_physical_mr,
310 
311 	/* Mellanox FMR */
312 	tavor_ci_create_fmr_pool,
313 	tavor_ci_destroy_fmr_pool,
314 	tavor_ci_flush_fmr_pool,
315 	tavor_ci_register_physical_fmr,
316 	tavor_ci_deregister_fmr,
317 
318 	/* dmable memory */
319 	tavor_ci_alloc_io_mem,
320 	tavor_ci_free_io_mem
321 };
322 
323 
324 /*
325  * tavor_ci_query_hca_ports()
326  *    Returns HCA port attributes for either one or all of the HCA's ports.
327  *    Context: Can be called only from user or kernel context.
328  */
329 static ibt_status_t
330 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
331     ibt_hca_portinfo_t *info_p)
332 {
333 	tavor_state_t	*state;
334 	uint_t		start, end, port;
335 	int		status, indx;
336 
337 	TAVOR_TNF_ENTER(tavor_ci_query_hca_ports);
338 
339 	/* Check for valid HCA handle */
340 	if (hca == NULL) {
341 		TNF_PROBE_0(tavor_ci_query_hca_ports_invhca_fail,
342 		    TAVOR_TNF_ERROR, "");
343 		TAVOR_TNF_EXIT(tavor_ci_query_port);
344 		return (IBT_HCA_HDL_INVALID);
345 	}
346 
347 	/* Grab the Tavor softstate pointer */
348 	state = (tavor_state_t *)hca;
349 
350 	/*
351 	 * If the specified port is zero, then we are supposed to query all
352 	 * ports.  Otherwise, we query only the port number specified.
353 	 * Setup the start and end port numbers as appropriate for the loop
354 	 * below.  Note:  The first Tavor port is port number one (1).
355 	 */
356 	if (query_port == 0) {
357 		start = 1;
358 		end = start + (state->ts_cfg_profile->cp_num_ports - 1);
359 	} else {
360 		end = start = query_port;
361 	}
362 
363 	/* Query the port(s) */
364 	for (port = start, indx = 0; port <= end; port++, indx++) {
365 		status = tavor_port_query(state, port, &info_p[indx]);
366 		if (status != DDI_SUCCESS) {
367 			TNF_PROBE_1(tavor_port_query_fail, TAVOR_TNF_ERROR,
368 			    "", tnf_uint, status, status);
369 			TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
370 			return (status);
371 		}
372 	}
373 
374 	TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
375 	return (IBT_SUCCESS);
376 }
377 
378 
379 /*
380  * tavor_ci_modify_ports()
381  *    Modify HCA port attributes
382  *    Context: Can be called only from user or kernel context.
383  */
384 static ibt_status_t
385 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
386     ibt_port_modify_flags_t flags, uint8_t init_type)
387 {
388 	tavor_state_t	*state;
389 	int		status;
390 
391 	TAVOR_TNF_ENTER(tavor_ci_modify_ports);
392 
393 	/* Check for valid HCA handle */
394 	if (hca == NULL) {
395 		TNF_PROBE_0(tavor_ci_modify_ports_invhca_fail,
396 		    TAVOR_TNF_ERROR, "");
397 		TAVOR_TNF_EXIT(tavor_ci_modify_ports);
398 		return (IBT_HCA_HDL_INVALID);
399 	}
400 
401 	/* Grab the Tavor softstate pointer */
402 	state = (tavor_state_t *)hca;
403 
404 	/* Modify the port(s) */
405 	status = tavor_port_modify(state, port, flags, init_type);
406 	if (status != DDI_SUCCESS) {
407 		TNF_PROBE_1(tavor_ci_modify_ports_fail,
408 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
409 		TAVOR_TNF_EXIT(tavor_ci_modify_ports);
410 		return (status);
411 	}
412 
413 	TAVOR_TNF_EXIT(tavor_ci_modify_ports);
414 	return (IBT_SUCCESS);
415 }
416 
417 /*
418  * tavor_ci_modify_system_image()
419  *    Modify the System Image GUID
420  *    Context: Can be called only from user or kernel context.
421  */
422 /* ARGSUSED */
423 static ibt_status_t
424 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
425 {
426 	TAVOR_TNF_ENTER(tavor_ci_modify_system_image);
427 
428 	/*
429 	 * This is an unsupported interface for the Tavor driver.  This
430 	 * interface is necessary to support modification of the System
431 	 * Image GUID.  Tavor is only capable of modifying this parameter
432 	 * once (during driver initialization).
433 	 */
434 
435 	TAVOR_TNF_EXIT(tavor_ci_modify_system_image);
436 	return (IBT_NOT_SUPPORTED);
437 }
438 
439 /*
440  * tavor_ci_alloc_pd()
441  *    Allocate a Protection Domain
442  *    Context: Can be called only from user or kernel context.
443  */
444 /* ARGSUSED */
445 static ibt_status_t
446 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
447 {
448 	tavor_state_t	*state;
449 	tavor_pdhdl_t	pdhdl;
450 	int		status;
451 
452 	TAVOR_TNF_ENTER(tavor_ci_alloc_pd);
453 
454 	ASSERT(pd_p != NULL);
455 
456 	/* Check for valid HCA handle */
457 	if (hca == NULL) {
458 		TNF_PROBE_0(tavor_ci_alloc_pd_invhca_fail,
459 		    TAVOR_TNF_ERROR, "");
460 		TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
461 		return (IBT_HCA_HDL_INVALID);
462 	}
463 
464 	/* Grab the Tavor softstate pointer */
465 	state = (tavor_state_t *)hca;
466 
467 	/* Allocate the PD */
468 	status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP);
469 	if (status != DDI_SUCCESS) {
470 		TNF_PROBE_1(tavor_ci_alloc_pd_fail, TAVOR_TNF_ERROR, "",
471 		    tnf_uint, status, status);
472 		TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
473 		return (status);
474 	}
475 
476 	/* Return the Tavor PD handle */
477 	*pd_p = (ibc_pd_hdl_t)pdhdl;
478 
479 	TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
480 	return (IBT_SUCCESS);
481 }
482 
483 
484 /*
485  * tavor_ci_free_pd()
486  *    Free a Protection Domain
487  *    Context: Can be called only from user or kernel context
488  */
489 static ibt_status_t
490 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
491 {
492 	tavor_state_t		*state;
493 	tavor_pdhdl_t		pdhdl;
494 	int			status;
495 
496 	TAVOR_TNF_ENTER(tavor_ci_free_pd);
497 
498 	/* Check for valid HCA handle */
499 	if (hca == NULL) {
500 		TNF_PROBE_0(tavor_ci_free_pd_invhca_fail,
501 		    TAVOR_TNF_ERROR, "");
502 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
503 		return (IBT_HCA_HDL_INVALID);
504 	}
505 
506 	/* Check for valid PD handle pointer */
507 	if (pd == NULL) {
508 		TNF_PROBE_0(tavor_ci_free_pd_invpdhdl_fail,
509 		    TAVOR_TNF_ERROR, "");
510 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
511 		return (IBT_PD_HDL_INVALID);
512 	}
513 
514 	/* Grab the Tavor softstate pointer and PD handle */
515 	state = (tavor_state_t *)hca;
516 	pdhdl = (tavor_pdhdl_t)pd;
517 
518 	/* Free the PD */
519 	status = tavor_pd_free(state, &pdhdl);
520 	if (status != DDI_SUCCESS) {
521 		TNF_PROBE_1(tavor_ci_free_pd_fail, TAVOR_TNF_ERROR, "",
522 		    tnf_uint, status, status);
523 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
524 		return (status);
525 	}
526 
527 	TAVOR_TNF_EXIT(tavor_ci_free_pd);
528 	return (IBT_SUCCESS);
529 }
530 
531 
532 /*
533  * tavor_ci_alloc_rdd()
534  *    Allocate a Reliable Datagram Domain
535  *    Context: Can be called only from user or kernel context.
536  */
537 /* ARGSUSED */
538 static ibt_status_t
539 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
540     ibc_rdd_hdl_t *rdd_p)
541 {
542 	TAVOR_TNF_ENTER(tavor_ci_alloc_rdd);
543 
544 	/*
545 	 * This is an unsupported interface for the Tavor driver.  This
546 	 * interface is necessary to support Reliable Datagram (RD)
547 	 * operations.  Tavor does not support RD.
548 	 */
549 
550 	TAVOR_TNF_EXIT(tavor_ci_alloc_rdd);
551 	return (IBT_NOT_SUPPORTED);
552 }
553 
554 
555 /*
556  * tavor_free_rdd()
557  *    Free a Reliable Datagram Domain
558  *    Context: Can be called only from user or kernel context.
559  */
560 /* ARGSUSED */
561 static ibt_status_t
562 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
563 {
564 	TAVOR_TNF_ENTER(tavor_ci_free_rdd);
565 
566 	/*
567 	 * This is an unsupported interface for the Tavor driver.  This
568 	 * interface is necessary to support Reliable Datagram (RD)
569 	 * operations.  Tavor does not support RD.
570 	 */
571 
572 	TAVOR_TNF_EXIT(tavor_ci_free_rdd);
573 	return (IBT_NOT_SUPPORTED);
574 }
575 
576 
577 /*
578  * tavor_ci_alloc_ah()
579  *    Allocate an Address Handle
580  *    Context: Can be called only from user or kernel context.
581  */
582 /* ARGSUSED */
583 static ibt_status_t
584 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
585     ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
586 {
587 	tavor_state_t	*state;
588 	tavor_ahhdl_t	ahhdl;
589 	tavor_pdhdl_t	pdhdl;
590 	int		status;
591 
592 	TAVOR_TNF_ENTER(tavor_ci_alloc_ah);
593 
594 	/* Check for valid HCA handle */
595 	if (hca == NULL) {
596 		TNF_PROBE_0(tavor_ci_alloc_ah_invhca_fail,
597 		    TAVOR_TNF_ERROR, "");
598 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
599 		return (IBT_HCA_HDL_INVALID);
600 	}
601 
602 	/* Check for valid PD handle pointer */
603 	if (pd == NULL) {
604 		TNF_PROBE_0(tavor_ci_alloc_ah_invpdhdl_fail,
605 		    TAVOR_TNF_ERROR, "");
606 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
607 		return (IBT_PD_HDL_INVALID);
608 	}
609 
610 	/* Grab the Tavor softstate pointer and PD handle */
611 	state = (tavor_state_t *)hca;
612 	pdhdl = (tavor_pdhdl_t)pd;
613 
614 	/* Allocate the AH */
615 	status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP);
616 	if (status != DDI_SUCCESS) {
617 		TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
618 		    tnf_uint, status, status);
619 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
620 		return (status);
621 	}
622 
623 	/* Return the Tavor AH handle */
624 	*ah_p = (ibc_ah_hdl_t)ahhdl;
625 
626 	TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
627 	return (IBT_SUCCESS);
628 }
629 
630 
631 /*
632  * tavor_ci_free_ah()
633  *    Free an Address Handle
634  *    Context: Can be called only from user or kernel context.
635  */
636 static ibt_status_t
637 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
638 {
639 	tavor_state_t	*state;
640 	tavor_ahhdl_t	ahhdl;
641 	int		status;
642 
643 	TAVOR_TNF_ENTER(tavor_ci_free_ah);
644 
645 	/* Check for valid HCA handle */
646 	if (hca == NULL) {
647 		TNF_PROBE_0(tavor_ci_free_ah_invhca_fail,
648 		    TAVOR_TNF_ERROR, "");
649 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
650 		return (IBT_HCA_HDL_INVALID);
651 	}
652 
653 	/* Check for valid address handle pointer */
654 	if (ah == NULL) {
655 		TNF_PROBE_0(tavor_ci_free_ah_invahhdl_fail,
656 		    TAVOR_TNF_ERROR, "");
657 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
658 		return (IBT_AH_HDL_INVALID);
659 	}
660 
661 	/* Grab the Tavor softstate pointer and AH handle */
662 	state = (tavor_state_t *)hca;
663 	ahhdl = (tavor_ahhdl_t)ah;
664 
665 	/* Free the AH */
666 	status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP);
667 	if (status != DDI_SUCCESS) {
668 		TNF_PROBE_1(tavor_ci_free_ah_fail, TAVOR_TNF_ERROR, "",
669 		    tnf_uint, status, status);
670 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
671 		return (status);
672 	}
673 
674 	TAVOR_TNF_EXIT(tavor_ci_free_ah);
675 	return (IBT_SUCCESS);
676 }
677 
678 
679 /*
680  * tavor_ci_query_ah()
681  *    Return the Address Vector information for a specified Address Handle
682  *    Context: Can be called from interrupt or base context.
683  */
684 static ibt_status_t
685 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
686     ibt_adds_vect_t *attr_p)
687 {
688 	tavor_state_t	*state;
689 	tavor_ahhdl_t	ahhdl;
690 	tavor_pdhdl_t	pdhdl;
691 	int		status;
692 
693 	TAVOR_TNF_ENTER(tavor_ci_query_ah);
694 
695 	/* Check for valid HCA handle */
696 	if (hca == NULL) {
697 		TNF_PROBE_0(tavor_ci_query_ah_invhca_fail,
698 		    TAVOR_TNF_ERROR, "");
699 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
700 		return (IBT_HCA_HDL_INVALID);
701 	}
702 
703 	/* Check for valid address handle pointer */
704 	if (ah == NULL) {
705 		TNF_PROBE_0(tavor_ci_query_ah_invahhdl_fail,
706 		    TAVOR_TNF_ERROR, "");
707 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
708 		return (IBT_AH_HDL_INVALID);
709 	}
710 
711 	/* Grab the Tavor softstate pointer and AH handle */
712 	state = (tavor_state_t *)hca;
713 	ahhdl = (tavor_ahhdl_t)ah;
714 
715 	/* Query the AH */
716 	status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p);
717 	if (status != DDI_SUCCESS) {
718 		TNF_PROBE_1(tavor_ci_query_ah_fail, TAVOR_TNF_ERROR, "",
719 		    tnf_uint, status, status);
720 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
721 		return (status);
722 	}
723 
724 	/* Return the Tavor PD handle */
725 	*pd_p = (ibc_pd_hdl_t)pdhdl;
726 
727 	TAVOR_TNF_EXIT(tavor_ci_query_ah);
728 	return (IBT_SUCCESS);
729 }
730 
731 
732 /*
733  * tavor_ci_modify_ah()
734  *    Modify the Address Vector information of a specified Address Handle
735  *    Context: Can be called from interrupt or base context.
736  */
737 static ibt_status_t
738 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
739 {
740 	tavor_state_t	*state;
741 	tavor_ahhdl_t	ahhdl;
742 	int		status;
743 
744 	TAVOR_TNF_ENTER(tavor_ci_modify_ah);
745 
746 	/* Check for valid HCA handle */
747 	if (hca == NULL) {
748 		TNF_PROBE_0(tavor_ci_modify_ah_invhca_fail,
749 		    TAVOR_TNF_ERROR, "");
750 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
751 		return (IBT_HCA_HDL_INVALID);
752 	}
753 
754 	/* Check for valid address handle pointer */
755 	if (ah == NULL) {
756 		TNF_PROBE_0(tavor_ci_modify_ah_invahhdl_fail,
757 		    TAVOR_TNF_ERROR, "");
758 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
759 		return (IBT_AH_HDL_INVALID);
760 	}
761 
762 	/* Grab the Tavor softstate pointer and AH handle */
763 	state = (tavor_state_t *)hca;
764 	ahhdl = (tavor_ahhdl_t)ah;
765 
766 	/* Modify the AH */
767 	status = tavor_ah_modify(state, ahhdl, attr_p);
768 	if (status != DDI_SUCCESS) {
769 		TNF_PROBE_1(tavor_ci_modify_ah_fail, TAVOR_TNF_ERROR, "",
770 		    tnf_uint, status, status);
771 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
772 		return (status);
773 	}
774 
775 	TAVOR_TNF_EXIT(tavor_ci_modify_ah);
776 	return (IBT_SUCCESS);
777 }
778 
779 
780 /*
781  * tavor_ci_alloc_qp()
782  *    Allocate a Queue Pair
783  *    Context: Can be called only from user or kernel context.
784  */
785 static ibt_status_t
786 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
787     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
788     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
789 {
790 	tavor_state_t		*state;
791 	tavor_qp_info_t		qpinfo;
792 	tavor_qp_options_t	op;
793 	int			status;
794 
795 	TAVOR_TNF_ENTER(tavor_ci_alloc_qp);
796 
797 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
798 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
799 
800 	/* Check for valid HCA handle */
801 	if (hca == NULL) {
802 		TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail,
803 		    TAVOR_TNF_ERROR, "");
804 		TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
805 		return (IBT_HCA_HDL_INVALID);
806 	}
807 
808 	/* Grab the Tavor softstate pointer */
809 	state = (tavor_state_t *)hca;
810 
811 	/* Allocate the QP */
812 	qpinfo.qpi_attrp	= attr_p;
813 	qpinfo.qpi_type		= type;
814 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
815 	qpinfo.qpi_queueszp	= queue_sizes_p;
816 	qpinfo.qpi_qpn		= qpn;
817 	op.qpo_wq_loc		= state->ts_cfg_profile->cp_qp_wq_inddr;
818 	status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
819 	if (status != DDI_SUCCESS) {
820 		TNF_PROBE_1(tavor_ci_alloc_qp_fail, TAVOR_TNF_ERROR, "",
821 		    tnf_uint, status, status);
822 		TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
823 		return (status);
824 	}
825 
826 	/* Return the Tavor QP handle */
827 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
828 
829 	TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
830 	return (IBT_SUCCESS);
831 }
832 
833 
834 /*
835  * tavor_ci_alloc_special_qp()
836  *    Allocate a Special Queue Pair
837  *    Context: Can be called only from user or kernel context.
838  */
839 static ibt_status_t
840 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
841     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
842     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
843     ibc_qp_hdl_t *qp_p)
844 {
845 	tavor_state_t		*state;
846 	tavor_qp_info_t		qpinfo;
847 	tavor_qp_options_t	op;
848 	int			status;
849 
850 	TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp);
851 
852 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
853 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
854 
855 	/* Check for valid HCA handle */
856 	if (hca == NULL) {
857 		TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail,
858 		    TAVOR_TNF_ERROR, "");
859 		TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
860 		return (IBT_HCA_HDL_INVALID);
861 	}
862 
863 	/* Grab the Tavor softstate pointer */
864 	state = (tavor_state_t *)hca;
865 
866 	/* Allocate the Special QP */
867 	qpinfo.qpi_attrp	= attr_p;
868 	qpinfo.qpi_type		= type;
869 	qpinfo.qpi_port		= port;
870 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
871 	qpinfo.qpi_queueszp	= queue_sizes_p;
872 	op.qpo_wq_loc		= state->ts_cfg_profile->cp_qp_wq_inddr;
873 	status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
874 	if (status != DDI_SUCCESS) {
875 		TNF_PROBE_1(tavor_ci_alloc_special_qp_fail, TAVOR_TNF_ERROR,
876 		    "", tnf_uint, status, status);
877 		TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
878 		return (status);
879 	}
880 
881 	/* Return the Tavor QP handle */
882 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
883 
884 	TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
885 	return (IBT_SUCCESS);
886 }
887 
888 
889 /* ARGSUSED */
890 static ibt_status_t
891 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
892     ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
893     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
894     ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
895     ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
896 {
897 	return (IBT_NOT_SUPPORTED);
898 }
899 
900 /*
901  * tavor_ci_free_qp()
902  *    Free a Queue Pair
903  *    Context: Can be called only from user or kernel context.
904  */
905 static ibt_status_t
906 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
907     ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
908 {
909 	tavor_state_t	*state;
910 	tavor_qphdl_t	qphdl;
911 	int		status;
912 
913 	TAVOR_TNF_ENTER(tavor_ci_free_qp);
914 
915 	/* Check for valid HCA handle */
916 	if (hca == NULL) {
917 		TNF_PROBE_0(tavor_ci_free_qp_invhca_fail,
918 		    TAVOR_TNF_ERROR, "");
919 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
920 		return (IBT_HCA_HDL_INVALID);
921 	}
922 
923 	/* Check for valid QP handle pointer */
924 	if (qp == NULL) {
925 		TNF_PROBE_0(tavor_ci_free_qp_invqphdl_fail,
926 		    TAVOR_TNF_ERROR, "");
927 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
928 		return (IBT_QP_HDL_INVALID);
929 	}
930 
931 	/* Grab the Tavor softstate pointer and QP handle */
932 	state = (tavor_state_t *)hca;
933 	qphdl = (tavor_qphdl_t)qp;
934 
935 	/* Free the QP */
936 	status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
937 	    TAVOR_NOSLEEP);
938 	if (status != DDI_SUCCESS) {
939 		TNF_PROBE_1(tavor_ci_free_qp_fail, TAVOR_TNF_ERROR, "",
940 		    tnf_uint, status, status);
941 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
942 		return (status);
943 	}
944 
945 	TAVOR_TNF_EXIT(tavor_ci_free_qp);
946 	return (IBT_SUCCESS);
947 }
948 
949 
950 /*
951  * tavor_ci_release_qpn()
952  *    Release a Queue Pair Number (QPN)
953  *    Context: Can be called only from user or kernel context.
954  */
955 static ibt_status_t
956 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
957 {
958 	tavor_state_t		*state;
959 	tavor_qpn_entry_t	*entry;
960 
961 	TAVOR_TNF_ENTER(tavor_ci_release_qpn);
962 
963 	/* Check for valid HCA handle */
964 	if (hca == NULL) {
965 		TNF_PROBE_0(tavor_ci_release_qpn_invhca_fail,
966 		    TAVOR_TNF_ERROR, "");
967 		TAVOR_TNF_EXIT(tavor_ci_release_qpn);
968 		return (IBT_HCA_HDL_INVALID);
969 	}
970 
971 	/* Check for valid QP handle pointer */
972 	if (qpnh == NULL) {
973 		TNF_PROBE_0(tavor_ci_release_qpn_invqpnhdl_fail,
974 		    TAVOR_TNF_ERROR, "");
975 		TAVOR_TNF_EXIT(tavor_ci_release_qpn);
976 		return (IBT_QP_HDL_INVALID);
977 	}
978 
979 	/* Grab the Tavor softstate pointer and QP handle */
980 	state = (tavor_state_t *)hca;
981 	entry = (tavor_qpn_entry_t *)qpnh;
982 
983 	/* Release the QP number */
984 	tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE);
985 
986 	TAVOR_TNF_EXIT(tavor_ci_release_qpn);
987 	return (IBT_SUCCESS);
988 }
989 
990 
991 /*
992  * tavor_ci_query_qp()
993  *    Query a Queue Pair
994  *    Context: Can be called from interrupt or base context.
995  */
996 static ibt_status_t
997 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
998     ibt_qp_query_attr_t *attr_p)
999 {
1000 	tavor_state_t	*state;
1001 	tavor_qphdl_t	qphdl;
1002 	int		status;
1003 
1004 	TAVOR_TNF_ENTER(tavor_ci_query_qp);
1005 
1006 	/* Check for valid HCA handle */
1007 	if (hca == NULL) {
1008 		TNF_PROBE_0(tavor_ci_query_qp_invhca_fail,
1009 		    TAVOR_TNF_ERROR, "");
1010 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1011 		return (IBT_HCA_HDL_INVALID);
1012 	}
1013 
1014 	/* Check for valid QP handle */
1015 	if (qp == NULL) {
1016 		TNF_PROBE_0(tavor_ci_query_qp_invqphdl_fail,
1017 		    TAVOR_TNF_ERROR, "");
1018 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1019 		return (IBT_QP_HDL_INVALID);
1020 	}
1021 
1022 	/* Grab the Tavor softstate pointer and QP handle */
1023 	state = (tavor_state_t *)hca;
1024 	qphdl = (tavor_qphdl_t)qp;
1025 
1026 	/* Query the QP */
1027 	status = tavor_qp_query(state, qphdl, attr_p);
1028 	if (status != DDI_SUCCESS) {
1029 		TNF_PROBE_1(tavor_ci_query_qp_fail, TAVOR_TNF_ERROR, "",
1030 		    tnf_uint, status, status);
1031 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1032 		return (status);
1033 	}
1034 
1035 	TAVOR_TNF_EXIT(tavor_ci_query_qp);
1036 	return (IBT_SUCCESS);
1037 }
1038 
1039 
1040 /*
1041  * tavor_ci_modify_qp()
1042  *    Modify a Queue Pair
1043  *    Context: Can be called from interrupt or base context.
1044  */
1045 static ibt_status_t
1046 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
1047     ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
1048     ibt_queue_sizes_t *actual_sz)
1049 {
1050 	tavor_state_t	*state;
1051 	tavor_qphdl_t	qphdl;
1052 	int		status;
1053 
1054 	TAVOR_TNF_ENTER(tavor_ci_modify_qp);
1055 
1056 	/* Check for valid HCA handle */
1057 	if (hca == NULL) {
1058 		TNF_PROBE_0(tavor_ci_modify_qp_invhca_fail,
1059 		    TAVOR_TNF_ERROR, "");
1060 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1061 		return (IBT_HCA_HDL_INVALID);
1062 	}
1063 
1064 	/* Check for valid QP handle */
1065 	if (qp == NULL) {
1066 		TNF_PROBE_0(tavor_ci_modify_qp_invqphdl_fail,
1067 		    TAVOR_TNF_ERROR, "");
1068 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1069 		return (IBT_QP_HDL_INVALID);
1070 	}
1071 
1072 	/* Grab the Tavor softstate pointer and QP handle */
1073 	state = (tavor_state_t *)hca;
1074 	qphdl = (tavor_qphdl_t)qp;
1075 
1076 	/* Modify the QP */
1077 	status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz);
1078 	if (status != DDI_SUCCESS) {
1079 		TNF_PROBE_1(tavor_ci_modify_qp_fail, TAVOR_TNF_ERROR, "",
1080 		    tnf_uint, status, status);
1081 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1082 		return (status);
1083 	}
1084 
1085 	TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1086 	return (IBT_SUCCESS);
1087 }
1088 
1089 
1090 /*
1091  * tavor_ci_alloc_cq()
1092  *    Allocate a Completion Queue
1093  *    Context: Can be called only from user or kernel context.
1094  */
1095 /* ARGSUSED */
1096 static ibt_status_t
1097 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
1098     ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
1099 {
1100 	tavor_state_t	*state;
1101 	tavor_cqhdl_t	cqhdl;
1102 	int		status;
1103 
1104 	TAVOR_TNF_ENTER(tavor_ci_alloc_cq);
1105 
1106 	/* Check for valid HCA handle */
1107 	if (hca == NULL) {
1108 		TNF_PROBE_0(tavor_ci_alloc_cq_invhca_fail,
1109 		    TAVOR_TNF_ERROR, "");
1110 		TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1111 		return (IBT_HCA_HDL_INVALID);
1112 	}
1113 
1114 	/* Grab the Tavor softstate pointer */
1115 	state = (tavor_state_t *)hca;
1116 
1117 	/* Allocate the CQ */
1118 	status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
1119 	    &cqhdl, TAVOR_NOSLEEP);
1120 	if (status != DDI_SUCCESS) {
1121 		TNF_PROBE_1(tavor_ci_alloc_cq_fail, TAVOR_TNF_ERROR, "",
1122 		    tnf_uint, status, status);
1123 		TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1124 		return (status);
1125 	}
1126 
1127 	/* Return the Tavor CQ handle */
1128 	*cq_p = (ibc_cq_hdl_t)cqhdl;
1129 
1130 	TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1131 	return (IBT_SUCCESS);
1132 }
1133 
1134 
1135 /*
1136  * tavor_ci_free_cq()
1137  *    Free a Completion Queue
1138  *    Context: Can be called only from user or kernel context.
1139  */
1140 static ibt_status_t
1141 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
1142 {
1143 	tavor_state_t	*state;
1144 	tavor_cqhdl_t	cqhdl;
1145 	int		status;
1146 
1147 	TAVOR_TNF_ENTER(tavor_ci_free_cq);
1148 
1149 
1150 	/* Check for valid HCA handle */
1151 	if (hca == NULL) {
1152 		TNF_PROBE_0(tavor_ci_free_cq_invhca_fail,
1153 		    TAVOR_TNF_ERROR, "");
1154 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1155 		return (IBT_HCA_HDL_INVALID);
1156 	}
1157 
1158 	/* Check for valid CQ handle pointer */
1159 	if (cq == NULL) {
1160 		TNF_PROBE_0(tavor_ci_free_cq_invcqhdl_fail,
1161 		    TAVOR_TNF_ERROR, "");
1162 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1163 		return (IBT_CQ_HDL_INVALID);
1164 	}
1165 
1166 	/* Grab the Tavor softstate pointer and CQ handle */
1167 	state = (tavor_state_t *)hca;
1168 	cqhdl = (tavor_cqhdl_t)cq;
1169 
1170 	/* Free the CQ */
1171 	status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP);
1172 	if (status != DDI_SUCCESS) {
1173 		TNF_PROBE_1(tavor_ci_free_cq_fail, TAVOR_TNF_ERROR, "",
1174 		    tnf_uint, status, status);
1175 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1176 		return (status);
1177 	}
1178 
1179 	TAVOR_TNF_EXIT(tavor_ci_free_cq);
1180 	return (IBT_SUCCESS);
1181 }
1182 
1183 
1184 /*
1185  * tavor_ci_query_cq()
1186  *    Return the size of a Completion Queue
1187  *    Context: Can be called only from user or kernel context.
1188  */
1189 static ibt_status_t
1190 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
1191     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
1192 {
1193 	tavor_cqhdl_t	cqhdl;
1194 
1195 	TAVOR_TNF_ENTER(tavor_ci_query_cq);
1196 
1197 	/* Check for valid HCA handle */
1198 	if (hca == NULL) {
1199 		TNF_PROBE_0(tavor_ci_query_cq_invhca_fail,
1200 		    TAVOR_TNF_ERROR, "");
1201 		TAVOR_TNF_EXIT(tavor_ci_query_cq);
1202 		return (IBT_HCA_HDL_INVALID);
1203 	}
1204 
1205 	/* Check for valid CQ handle pointer */
1206 	if (cq == NULL) {
1207 		TNF_PROBE_0(tavor_ci_query_cq_invcqhdl,
1208 		    TAVOR_TNF_ERROR, "");
1209 		TAVOR_TNF_EXIT(tavor_ci_query_cq);
1210 		return (IBT_CQ_HDL_INVALID);
1211 	}
1212 
1213 	/* Grab the CQ handle */
1214 	cqhdl = (tavor_cqhdl_t)cq;
1215 
1216 	/* Query the current CQ size */
1217 	*entries_p = cqhdl->cq_bufsz;
1218 
1219 	/* interrupt moderation is not supported */
1220 	*count_p = 0;
1221 	*usec_p = 0;
1222 	*hid_p = 0;
1223 
1224 	TAVOR_TNF_EXIT(tavor_ci_query_cq);
1225 	return (IBT_SUCCESS);
1226 }
1227 
1228 
1229 /*
1230  * tavor_ci_resize_cq()
1231  *    Change the size of a Completion Queue
1232  *    Context: Can be called only from user or kernel context.
1233  */
1234 static ibt_status_t
1235 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1236     uint_t *actual_size)
1237 {
1238 	tavor_state_t		*state;
1239 	tavor_cqhdl_t		cqhdl;
1240 	int			status;
1241 
1242 	TAVOR_TNF_ENTER(tavor_ci_resize_cq);
1243 
1244 	/* Check for valid HCA handle */
1245 	if (hca == NULL) {
1246 		TNF_PROBE_0(tavor_ci_resize_cq_invhca_fail,
1247 		    TAVOR_TNF_ERROR, "");
1248 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1249 		return (IBT_HCA_HDL_INVALID);
1250 	}
1251 
1252 	/* Check for valid CQ handle pointer */
1253 	if (cq == NULL) {
1254 		TNF_PROBE_0(tavor_ci_resize_cq_invcqhdl_fail,
1255 		    TAVOR_TNF_ERROR, "");
1256 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1257 		return (IBT_CQ_HDL_INVALID);
1258 	}
1259 
1260 	/* Grab the Tavor softstate pointer and CQ handle */
1261 	state = (tavor_state_t *)hca;
1262 	cqhdl = (tavor_cqhdl_t)cq;
1263 
1264 	/* Resize the CQ */
1265 	status = tavor_cq_resize(state, cqhdl, size, actual_size,
1266 	    TAVOR_NOSLEEP);
1267 	if (status != DDI_SUCCESS) {
1268 		TNF_PROBE_1(tavor_ci_resize_cq_fail, TAVOR_TNF_ERROR, "",
1269 		    tnf_uint, status, status);
1270 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1271 		return (status);
1272 	}
1273 
1274 	TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1275 	return (IBT_SUCCESS);
1276 }
1277 
1278 /*
1279  * CQ interrupt moderation is not supported in tavor.
1280  */
1281 
1282 /* ARGSUSED */
1283 static ibt_status_t
1284 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq,
1285     uint_t count, uint_t usec, ibt_cq_handler_id_t hid)
1286 {
1287 	return (IBT_NOT_SUPPORTED);
1288 }
1289 
1290 /*
1291  * tavor_ci_alloc_cq_sched()
1292  *    Reserve a CQ scheduling class resource
1293  *    Context: Can be called only from user or kernel context.
1294  */
1295 /* ARGSUSED */
1296 static ibt_status_t
1297 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_flags_t flags,
1298     ibc_cq_handler_attr_t *handler_attr_p)
1299 {
1300 	TAVOR_TNF_ENTER(tavor_ci_alloc_cq_sched);
1301 
1302 	if (hca == NULL) {
1303 		TNF_PROBE_0(tavor_ci_alloc_cq_sched_fail,
1304 		    TAVOR_TNF_ERROR, "");
1305 		TAVOR_TNF_EXIT(tavor_ci_alloc_cq_sched);
1306 		return (IBT_HCA_HDL_INVALID);
1307 	}
1308 
1309 	/*
1310 	 * This is an unsupported interface for the Tavor driver.  Tavor
1311 	 * does not support CQ scheduling classes.
1312 	 */
1313 
1314 	TAVOR_TNF_EXIT(tavor_ci_alloc_cq_sched);
1315 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1316 	handler_attr_p->h_id = NULL;
1317 	handler_attr_p->h_pri = 0;
1318 	handler_attr_p->h_bind = NULL;
1319 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1320 	return (IBT_SUCCESS);
1321 }
1322 
1323 
1324 /*
1325  * tavor_ci_free_cq_sched()
1326  *    Free a CQ scheduling class resource
1327  *    Context: Can be called only from user or kernel context.
1328  */
1329 static ibt_status_t
1330 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibt_cq_handler_id_t handler_id)
1331 {
1332 	TAVOR_TNF_ENTER(tavor_ci_free_cq_sched);
1333 
1334 	if (hca == NULL) {
1335 		TNF_PROBE_0(tavor_ci_free_cq_sched_fail,
1336 		    TAVOR_TNF_ERROR, "");
1337 		TAVOR_TNF_EXIT(tavor_ci_free_cq_sched);
1338 		return (IBT_HCA_HDL_INVALID);
1339 	}
1340 
1341 	/*
1342 	 * This is an unsupported interface for the Tavor driver.  Tavor
1343 	 * does not support CQ scheduling classes.  Returning a NULL
1344 	 * hint is the way to treat this as unsupported.  We check for
1345 	 * the expected NULL, but do not fail in any case.
1346 	 */
1347 	if (handler_id != NULL) {
1348 		TNF_PROBE_1(tavor_ci_free_cq_sched, TAVOR_TNF_TRACE, "",
1349 		    tnf_opaque, handler_id, handler_id);
1350 	}
1351 
1352 	TAVOR_TNF_EXIT(tavor_ci_free_cq_sched);
1353 	return (IBT_SUCCESS);
1354 }
1355 
1356 
1357 /*
1358  * tavor_ci_alloc_eec()
1359  *    Allocate an End-to-End context
1360  *    Context: Can be called only from user or kernel context.
1361  */
1362 /* ARGSUSED */
1363 static ibt_status_t
1364 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1365     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1366 {
1367 	TAVOR_TNF_ENTER(tavor_ci_alloc_eec);
1368 
1369 	/*
1370 	 * This is an unsupported interface for the Tavor driver.  This
1371 	 * interface is necessary to support Reliable Datagram (RD)
1372 	 * operations.  Tavor does not support RD.
1373 	 */
1374 
1375 	TAVOR_TNF_EXIT(tavor_ci_alloc_eec);
1376 	return (IBT_NOT_SUPPORTED);
1377 }
1378 
1379 
1380 /*
1381  * tavor_ci_free_eec()
1382  *    Free an End-to-End context
1383  *    Context: Can be called only from user or kernel context.
1384  */
1385 /* ARGSUSED */
1386 static ibt_status_t
1387 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1388 {
1389 	TAVOR_TNF_ENTER(tavor_ci_free_eec);
1390 
1391 	/*
1392 	 * This is an unsupported interface for the Tavor driver.  This
1393 	 * interface is necessary to support Reliable Datagram (RD)
1394 	 * operations.  Tavor does not support RD.
1395 	 */
1396 
1397 	TAVOR_TNF_EXIT(tavor_ci_free_eec);
1398 	return (IBT_NOT_SUPPORTED);
1399 }
1400 
1401 
1402 /*
1403  * tavor_ci_query_eec()
1404  *    Query an End-to-End context
1405  *    Context: Can be called from interrupt or base context.
1406  */
1407 /* ARGSUSED */
1408 static ibt_status_t
1409 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1410     ibt_eec_query_attr_t *attr_p)
1411 {
1412 	TAVOR_TNF_ENTER(tavor_ci_query_eec);
1413 
1414 	/*
1415 	 * This is an unsupported interface for the Tavor driver.  This
1416 	 * interface is necessary to support Reliable Datagram (RD)
1417 	 * operations.  Tavor does not support RD.
1418 	 */
1419 
1420 	TAVOR_TNF_EXIT(tavor_ci_query_eec);
1421 	return (IBT_NOT_SUPPORTED);
1422 }
1423 
1424 
1425 /*
1426  * tavor_ci_modify_eec()
1427  *    Modify an End-to-End context
1428  *    Context: Can be called from interrupt or base context.
1429  */
1430 /* ARGSUSED */
1431 static ibt_status_t
1432 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1433     ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1434 {
1435 	TAVOR_TNF_ENTER(tavor_ci_query_eec);
1436 
1437 	/*
1438 	 * This is an unsupported interface for the Tavor driver.  This
1439 	 * interface is necessary to support Reliable Datagram (RD)
1440 	 * operations.  Tavor does not support RD.
1441 	 */
1442 
1443 	TAVOR_TNF_EXIT(tavor_ci_query_eec);
1444 	return (IBT_NOT_SUPPORTED);
1445 }
1446 
1447 
1448 /*
1449  * tavor_ci_register_mr()
1450  *    Prepare a virtually addressed Memory Region for use by an HCA
1451  *    Context: Can be called from interrupt or base context.
1452  */
1453 /* ARGSUSED */
1454 static ibt_status_t
1455 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1456     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1457     ibt_mr_desc_t *mr_desc)
1458 {
1459 	tavor_mr_options_t	op;
1460 	tavor_state_t		*state;
1461 	tavor_pdhdl_t		pdhdl;
1462 	tavor_mrhdl_t		mrhdl;
1463 	int			status;
1464 
1465 	TAVOR_TNF_ENTER(tavor_ci_register_mr);
1466 
1467 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1468 
1469 	ASSERT(mr_attr != NULL);
1470 	ASSERT(mr_p != NULL);
1471 	ASSERT(mr_desc != NULL);
1472 
1473 	/* Check for valid HCA handle */
1474 	if (hca == NULL) {
1475 		TNF_PROBE_0(tavor_ci_register_mr_invhca_fail,
1476 		    TAVOR_TNF_ERROR, "");
1477 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1478 		return (IBT_HCA_HDL_INVALID);
1479 	}
1480 
1481 	/* Check for valid PD handle pointer */
1482 	if (pd == NULL) {
1483 		TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail,
1484 		    TAVOR_TNF_ERROR, "");
1485 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1486 		return (IBT_PD_HDL_INVALID);
1487 	}
1488 
1489 	/*
1490 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1491 	 * require the Local Write flag to be set
1492 	 */
1493 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1494 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1495 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1496 		TNF_PROBE_0(tavor_ci_register_mr_inv_accflags_fail,
1497 		    TAVOR_TNF_ERROR, "");
1498 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1499 		return (IBT_MR_ACCESS_REQ_INVALID);
1500 	}
1501 
1502 	/* Grab the Tavor softstate pointer and PD handle */
1503 	state = (tavor_state_t *)hca;
1504 	pdhdl = (tavor_pdhdl_t)pd;
1505 
1506 	/* Register the memory region */
1507 	op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1508 	op.mro_bind_dmahdl = NULL;
1509 	op.mro_bind_override_addr = 0;
1510 	status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1511 	if (status != DDI_SUCCESS) {
1512 		TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1513 		    tnf_uint, status, status);
1514 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1515 		return (status);
1516 	}
1517 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1518 
1519 	/* Fill in the mr_desc structure */
1520 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1521 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1522 	/* Only set RKey if remote access was requested */
1523 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1524 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1525 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1526 		mr_desc->md_rkey = mrhdl->mr_rkey;
1527 	}
1528 
1529 	/*
1530 	 * If region is mapped for streaming (i.e. noncoherent), then set
1531 	 * sync is required
1532 	 */
1533 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1534 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1535 
1536 	/* Return the Tavor MR handle */
1537 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1538 
1539 	TAVOR_TNF_EXIT(tavor_ci_register_mr);
1540 	return (IBT_SUCCESS);
1541 }
1542 
1543 
1544 /*
1545  * tavor_ci_register_buf()
1546  *    Prepare a Memory Region specified by buf structure for use by an HCA
1547  *    Context: Can be called from interrupt or base context.
1548  */
1549 /* ARGSUSED */
1550 static ibt_status_t
1551 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1552     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1553     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1554 {
1555 	tavor_mr_options_t	op;
1556 	tavor_state_t		*state;
1557 	tavor_pdhdl_t		pdhdl;
1558 	tavor_mrhdl_t		mrhdl;
1559 	int			status;
1560 	ibt_mr_flags_t		flags = attrp->mr_flags;
1561 
1562 	TAVOR_TNF_ENTER(tavor_ci_register_buf);
1563 
1564 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1565 
1566 	ASSERT(mr_p != NULL);
1567 	ASSERT(mr_desc != NULL);
1568 
1569 	/* Check for valid HCA handle */
1570 	if (hca == NULL) {
1571 		TNF_PROBE_0(tavor_ci_register_buf_invhca_fail,
1572 		    TAVOR_TNF_ERROR, "");
1573 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1574 		return (IBT_HCA_HDL_INVALID);
1575 	}
1576 
1577 	/* Check for valid PD handle pointer */
1578 	if (pd == NULL) {
1579 		TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail,
1580 		    TAVOR_TNF_ERROR, "");
1581 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1582 		return (IBT_PD_HDL_INVALID);
1583 	}
1584 
1585 	/*
1586 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1587 	 * require the Local Write flag to be set
1588 	 */
1589 	if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1590 	    (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1591 	    !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1592 		TNF_PROBE_0(tavor_ci_register_buf_accflags_inv,
1593 		    TAVOR_TNF_ERROR, "");
1594 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1595 		return (IBT_MR_ACCESS_REQ_INVALID);
1596 	}
1597 
1598 	/* Grab the Tavor softstate pointer and PD handle */
1599 	state = (tavor_state_t *)hca;
1600 	pdhdl = (tavor_pdhdl_t)pd;
1601 
1602 	/* Register the memory region */
1603 	op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1604 	op.mro_bind_dmahdl = NULL;
1605 	op.mro_bind_override_addr = 0;
1606 	status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1607 	if (status != DDI_SUCCESS) {
1608 		TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1609 		    tnf_uint, status, status);
1610 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1611 		return (status);
1612 	}
1613 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1614 
1615 	/* Fill in the mr_desc structure */
1616 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1617 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1618 	/* Only set RKey if remote access was requested */
1619 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1620 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1621 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1622 		mr_desc->md_rkey = mrhdl->mr_rkey;
1623 	}
1624 
1625 	/*
1626 	 * If region is mapped for streaming (i.e. noncoherent), then set
1627 	 * sync is required
1628 	 */
1629 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1630 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1631 
1632 	/* Return the Tavor MR handle */
1633 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1634 
1635 	TAVOR_TNF_EXIT(tavor_ci_register_buf);
1636 	return (IBT_SUCCESS);
1637 }
1638 
1639 
1640 /*
1641  * tavor_ci_deregister_mr()
1642  *    Deregister a Memory Region from an HCA translation table
1643  *    Context: Can be called only from user or kernel context.
1644  */
1645 static ibt_status_t
1646 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1647 {
1648 	tavor_state_t		*state;
1649 	tavor_mrhdl_t		mrhdl;
1650 	int			status;
1651 
1652 	TAVOR_TNF_ENTER(tavor_ci_deregister_mr);
1653 
1654 	/* Check for valid HCA handle */
1655 	if (hca == NULL) {
1656 		TNF_PROBE_0(tavor_ci_deregister_mr_invhca_fail,
1657 		    TAVOR_TNF_ERROR, "");
1658 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1659 		return (IBT_HCA_HDL_INVALID);
1660 	}
1661 
1662 	/* Check for valid memory region handle */
1663 	if (mr == NULL) {
1664 		TNF_PROBE_0(tavor_ci_deregister_mr_invmrhdl_fail,
1665 		    TAVOR_TNF_ERROR, "");
1666 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1667 		return (IBT_MR_HDL_INVALID);
1668 	}
1669 
1670 	/* Grab the Tavor softstate pointer */
1671 	state = (tavor_state_t *)hca;
1672 	mrhdl = (tavor_mrhdl_t)mr;
1673 
1674 	/*
1675 	 * Deregister the memory region.
1676 	 */
1677 	status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL,
1678 	    TAVOR_NOSLEEP);
1679 	if (status != DDI_SUCCESS) {
1680 		TNF_PROBE_1(tavor_ci_deregister_mr_fail,
1681 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1682 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1683 		return (status);
1684 	}
1685 
1686 	TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1687 	return (IBT_SUCCESS);
1688 }
1689 
1690 
1691 /*
1692  * tavor_ci_query_mr()
1693  *    Retrieve information about a specified Memory Region
1694  *    Context: Can be called from interrupt or base context.
1695  */
1696 static ibt_status_t
1697 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1698     ibt_mr_query_attr_t *mr_attr)
1699 {
1700 	tavor_state_t		*state;
1701 	tavor_mrhdl_t		mrhdl;
1702 	int			status;
1703 
1704 	TAVOR_TNF_ENTER(tavor_ci_query_mr);
1705 
1706 	ASSERT(mr_attr != NULL);
1707 
1708 	/* Check for valid HCA handle */
1709 	if (hca == NULL) {
1710 		TNF_PROBE_0(tavor_ci_query_mr_invhca_fail,
1711 		    TAVOR_TNF_ERROR, "");
1712 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1713 		return (IBT_HCA_HDL_INVALID);
1714 	}
1715 
1716 	/* Check for MemRegion handle */
1717 	if (mr == NULL) {
1718 		TNF_PROBE_0(tavor_ci_query_mr_invmrhdl_fail,
1719 		    TAVOR_TNF_ERROR, "");
1720 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1721 		return (IBT_MR_HDL_INVALID);
1722 	}
1723 
1724 	/* Grab the Tavor softstate pointer and MR handle */
1725 	state = (tavor_state_t *)hca;
1726 	mrhdl = (tavor_mrhdl_t)mr;
1727 
1728 	/* Query the memory region */
1729 	status = tavor_mr_query(state, mrhdl, mr_attr);
1730 	if (status != DDI_SUCCESS) {
1731 		TNF_PROBE_1(tavor_ci_query_mr_fail, TAVOR_TNF_ERROR, "",
1732 		    tnf_uint, status, status);
1733 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1734 		return (status);
1735 	}
1736 
1737 	TAVOR_TNF_EXIT(tavor_ci_query_mr);
1738 	return (IBT_SUCCESS);
1739 }
1740 
1741 
1742 /*
1743  * tavor_ci_register_shared_mr()
1744  *    Create a shared memory region matching an existing Memory Region
1745  *    Context: Can be called from interrupt or base context.
1746  */
1747 /* ARGSUSED */
1748 static ibt_status_t
1749 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1750     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1751     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1752 {
1753 	tavor_state_t		*state;
1754 	tavor_pdhdl_t		pdhdl;
1755 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1756 	int			status;
1757 
1758 	TAVOR_TNF_ENTER(tavor_ci_register_shared_mr);
1759 
1760 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1761 
1762 	ASSERT(mr_attr != NULL);
1763 	ASSERT(mr_p != NULL);
1764 	ASSERT(mr_desc != NULL);
1765 
1766 	/* Check for valid HCA handle */
1767 	if (hca == NULL) {
1768 		TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail,
1769 		    TAVOR_TNF_ERROR, "");
1770 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1771 		return (IBT_HCA_HDL_INVALID);
1772 	}
1773 
1774 	/* Check for valid PD handle pointer */
1775 	if (pd == NULL) {
1776 		TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail,
1777 		    TAVOR_TNF_ERROR, "");
1778 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1779 		return (IBT_PD_HDL_INVALID);
1780 	}
1781 
1782 	/* Check for valid memory region handle */
1783 	if (mr == NULL) {
1784 		TNF_PROBE_0(tavor_ci_register_shared_mr_invmrhdl_fail,
1785 		    TAVOR_TNF_ERROR, "");
1786 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1787 		return (IBT_MR_HDL_INVALID);
1788 	}
1789 	/*
1790 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1791 	 * require the Local Write flag to be set
1792 	 */
1793 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1794 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1795 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1796 		TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv,
1797 		    TAVOR_TNF_ERROR, "");
1798 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1799 		return (IBT_MR_ACCESS_REQ_INVALID);
1800 	}
1801 
1802 	/* Grab the Tavor softstate pointer and handles */
1803 	state = (tavor_state_t *)hca;
1804 	pdhdl = (tavor_pdhdl_t)pd;
1805 	mrhdl = (tavor_mrhdl_t)mr;
1806 
1807 	/* Register the shared memory region */
1808 	status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1809 	    &mrhdl_new);
1810 	if (status != DDI_SUCCESS) {
1811 		TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR,
1812 		    "", tnf_uint, status, status);
1813 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1814 		return (status);
1815 	}
1816 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1817 
1818 	/* Fill in the mr_desc structure */
1819 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1820 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1821 	/* Only set RKey if remote access was requested */
1822 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1823 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1824 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1825 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1826 	}
1827 
1828 	/*
1829 	 * If shared region is mapped for streaming (i.e. noncoherent), then
1830 	 * set sync is required
1831 	 */
1832 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1833 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1834 
1835 	/* Return the Tavor MR handle */
1836 	*mr_p = (ibc_mr_hdl_t)mrhdl_new;
1837 
1838 	TAVOR_TNF_EXIT(tavor_ci_register_mr);
1839 	return (IBT_SUCCESS);
1840 }
1841 
1842 
1843 /*
1844  * tavor_ci_reregister_mr()
1845  *    Modify the attributes of an existing Memory Region
1846  *    Context: Can be called from interrupt or base context.
1847  */
1848 /* ARGSUSED */
1849 static ibt_status_t
1850 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1851     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1852     ibt_mr_desc_t *mr_desc)
1853 {
1854 	tavor_mr_options_t	op;
1855 	tavor_state_t		*state;
1856 	tavor_pdhdl_t		pdhdl;
1857 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1858 	int			status;
1859 
1860 	TAVOR_TNF_ENTER(tavor_ci_reregister_mr);
1861 
1862 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1863 
1864 	ASSERT(mr_attr != NULL);
1865 	ASSERT(mr_new != NULL);
1866 	ASSERT(mr_desc != NULL);
1867 
1868 	/* Check for valid HCA handle */
1869 	if (hca == NULL) {
1870 		TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR,
1871 		    "");
1872 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1873 		return (IBT_HCA_HDL_INVALID);
1874 	}
1875 
1876 	/* Check for valid memory region handle */
1877 	if (mr == NULL) {
1878 		TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail,
1879 		    TAVOR_TNF_ERROR, "");
1880 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1881 		return (IBT_MR_HDL_INVALID);
1882 	}
1883 
1884 	/* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1885 	state = (tavor_state_t *)hca;
1886 	mrhdl = (tavor_mrhdl_t)mr;
1887 	pdhdl = (tavor_pdhdl_t)pd;
1888 
1889 	/* Reregister the memory region */
1890 	op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1891 	status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1892 	    &mrhdl_new, &op);
1893 	if (status != DDI_SUCCESS) {
1894 		TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "",
1895 		    tnf_uint, status, status);
1896 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1897 		return (status);
1898 	}
1899 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1900 
1901 	/* Fill in the mr_desc structure */
1902 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1903 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1904 	/* Only set RKey if remote access was requested */
1905 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1906 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1907 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1908 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1909 	}
1910 
1911 	/*
1912 	 * If region is mapped for streaming (i.e. noncoherent), then set
1913 	 * sync is required
1914 	 */
1915 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1916 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1917 
1918 	/* Return the Tavor MR handle */
1919 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1920 
1921 	TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1922 	return (IBT_SUCCESS);
1923 }
1924 
1925 
1926 /*
1927  * tavor_ci_reregister_buf()
1928  *    Modify the attributes of an existing Memory Region
1929  *    Context: Can be called from interrupt or base context.
1930  */
1931 /* ARGSUSED */
1932 static ibt_status_t
1933 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1934     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1935     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1936 {
1937 	tavor_mr_options_t	op;
1938 	tavor_state_t		*state;
1939 	tavor_pdhdl_t		pdhdl;
1940 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1941 	int			status;
1942 	ibt_mr_flags_t		flags = attrp->mr_flags;
1943 
1944 	TAVOR_TNF_ENTER(tavor_ci_reregister_buf);
1945 
1946 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1947 
1948 	ASSERT(mr_new != NULL);
1949 	ASSERT(mr_desc != NULL);
1950 
1951 	/* Check for valid HCA handle */
1952 	if (hca == NULL) {
1953 		TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR,
1954 		    "");
1955 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1956 		return (IBT_HCA_HDL_INVALID);
1957 	}
1958 
1959 	/* Check for valid memory region handle */
1960 	if (mr == NULL) {
1961 		TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail,
1962 		    TAVOR_TNF_ERROR, "");
1963 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1964 		return (IBT_MR_HDL_INVALID);
1965 	}
1966 
1967 	/* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1968 	state = (tavor_state_t *)hca;
1969 	mrhdl = (tavor_mrhdl_t)mr;
1970 	pdhdl = (tavor_pdhdl_t)pd;
1971 
1972 	/* Reregister the memory region */
1973 	op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1974 	status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1975 	    &mrhdl_new, &op);
1976 	if (status != DDI_SUCCESS) {
1977 		TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "",
1978 		    tnf_uint, status, status);
1979 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1980 		return (status);
1981 	}
1982 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1983 
1984 	/* Fill in the mr_desc structure */
1985 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1986 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1987 	/* Only set RKey if remote access was requested */
1988 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1989 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1990 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1991 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1992 	}
1993 
1994 	/*
1995 	 * If region is mapped for streaming (i.e. noncoherent), then set
1996 	 * sync is required
1997 	 */
1998 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1999 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2000 
2001 	/* Return the Tavor MR handle */
2002 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
2003 
2004 	TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
2005 	return (IBT_SUCCESS);
2006 }
2007 
2008 /*
2009  * tavor_ci_sync_mr()
2010  *    Synchronize access to a Memory Region
2011  *    Context: Can be called from interrupt or base context.
2012  */
2013 static ibt_status_t
2014 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
2015 {
2016 	tavor_state_t		*state;
2017 	int			status;
2018 
2019 	TAVOR_TNF_ENTER(tavor_ci_sync_mr);
2020 
2021 	ASSERT(mr_segs != NULL);
2022 
2023 	/* Check for valid HCA handle */
2024 	if (hca == NULL) {
2025 		TNF_PROBE_0(tavor_ci_sync_mr_invhca_fail,
2026 		    TAVOR_TNF_ERROR, "");
2027 		TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2028 		return (IBT_HCA_HDL_INVALID);
2029 	}
2030 
2031 	/* Grab the Tavor softstate pointer */
2032 	state = (tavor_state_t *)hca;
2033 
2034 	/* Sync the memory region */
2035 	status = tavor_mr_sync(state, mr_segs, num_segs);
2036 	if (status != DDI_SUCCESS) {
2037 		TNF_PROBE_1(tavor_ci_sync_mr_fail, TAVOR_TNF_ERROR, "",
2038 		    tnf_uint, status, status);
2039 		TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2040 		return (status);
2041 	}
2042 
2043 	TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2044 	return (IBT_SUCCESS);
2045 }
2046 
2047 
2048 /*
2049  * tavor_ci_alloc_mw()
2050  *    Allocate a Memory Window
2051  *    Context: Can be called from interrupt or base context.
2052  */
2053 static ibt_status_t
2054 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
2055     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
2056 {
2057 	tavor_state_t		*state;
2058 	tavor_pdhdl_t		pdhdl;
2059 	tavor_mwhdl_t		mwhdl;
2060 	int			status;
2061 
2062 	TAVOR_TNF_ENTER(tavor_ci_alloc_mw);
2063 
2064 	ASSERT(mw_p != NULL);
2065 	ASSERT(rkey_p != NULL);
2066 
2067 	/* Check for valid HCA handle */
2068 	if (hca == NULL) {
2069 		TNF_PROBE_0(tavor_ci_alloc_mw_invhca_fail,
2070 		    TAVOR_TNF_ERROR, "");
2071 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2072 		return (IBT_HCA_HDL_INVALID);
2073 	}
2074 
2075 	/* Check for valid PD handle pointer */
2076 	if (pd == NULL) {
2077 		TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail,
2078 		    TAVOR_TNF_ERROR, "");
2079 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2080 		return (IBT_PD_HDL_INVALID);
2081 	}
2082 
2083 	/* Grab the Tavor softstate pointer and PD handle */
2084 	state = (tavor_state_t *)hca;
2085 	pdhdl = (tavor_pdhdl_t)pd;
2086 
2087 	/* Allocate the memory window */
2088 	status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
2089 	if (status != DDI_SUCCESS) {
2090 		TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "",
2091 		    tnf_uint, status, status);
2092 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2093 		return (status);
2094 	}
2095 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
2096 
2097 	/* Return the MW handle and RKey */
2098 	*mw_p = (ibc_mw_hdl_t)mwhdl;
2099 	*rkey_p = mwhdl->mr_rkey;
2100 
2101 	TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2102 	return (IBT_SUCCESS);
2103 }
2104 
2105 
2106 /*
2107  * tavor_ci_free_mw()
2108  *    Free a Memory Window
2109  *    Context: Can be called from interrupt or base context.
2110  */
2111 static ibt_status_t
2112 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
2113 {
2114 	tavor_state_t		*state;
2115 	tavor_mwhdl_t		mwhdl;
2116 	int			status;
2117 
2118 	TAVOR_TNF_ENTER(tavor_ci_free_mw);
2119 
2120 	/* Check for valid HCA handle */
2121 	if (hca == NULL) {
2122 		TNF_PROBE_0(tavor_ci_free_mw_invhca_fail,
2123 		    TAVOR_TNF_ERROR, "");
2124 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2125 		return (IBT_HCA_HDL_INVALID);
2126 	}
2127 
2128 	/* Check for valid MW handle */
2129 	if (mw == NULL) {
2130 		TNF_PROBE_0(tavor_ci_free_mw_invmwhdl_fail,
2131 		    TAVOR_TNF_ERROR, "");
2132 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2133 		return (IBT_MW_HDL_INVALID);
2134 	}
2135 
2136 	/* Grab the Tavor softstate pointer and MW handle */
2137 	state = (tavor_state_t *)hca;
2138 	mwhdl = (tavor_mwhdl_t)mw;
2139 
2140 	/* Free the memory window */
2141 	status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP);
2142 	if (status != DDI_SUCCESS) {
2143 		TNF_PROBE_1(tavor_ci_free_mw_fail, TAVOR_TNF_ERROR, "",
2144 		    tnf_uint, status, status);
2145 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2146 		return (status);
2147 	}
2148 
2149 	TAVOR_TNF_EXIT(tavor_ci_free_mw);
2150 	return (IBT_SUCCESS);
2151 }
2152 
2153 
2154 /*
2155  * tavor_ci_query_mw()
2156  *    Return the attributes of the specified Memory Window
2157  *    Context: Can be called from interrupt or base context.
2158  */
2159 static ibt_status_t
2160 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
2161     ibt_mw_query_attr_t *mw_attr_p)
2162 {
2163 	tavor_mwhdl_t		mwhdl;
2164 
2165 	TAVOR_TNF_ENTER(tavor_ci_query_mw);
2166 
2167 	ASSERT(mw_attr_p != NULL);
2168 
2169 	/* Check for valid HCA handle */
2170 	if (hca == NULL) {
2171 		TNF_PROBE_0(tavor_ci_query_mw_invhca_fail,
2172 		    TAVOR_TNF_ERROR, "");
2173 		TAVOR_TNF_EXIT(tavor_ci_query_mw);
2174 		return (IBT_HCA_HDL_INVALID);
2175 	}
2176 
2177 	/* Check for valid MemWin handle */
2178 	if (mw == NULL) {
2179 		TNF_PROBE_0(tavor_ci_query_mw_inc_mwhdl_fail,
2180 		    TAVOR_TNF_ERROR, "");
2181 		TAVOR_TNF_EXIT(tavor_ci_query_mw);
2182 		return (IBT_MW_HDL_INVALID);
2183 	}
2184 
2185 	/* Query the memory window pointer and fill in the return values */
2186 	mwhdl = (tavor_mwhdl_t)mw;
2187 	mutex_enter(&mwhdl->mr_lock);
2188 	mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
2189 	mw_attr_p->mw_rkey = mwhdl->mr_rkey;
2190 	mutex_exit(&mwhdl->mr_lock);
2191 
2192 	TAVOR_TNF_EXIT(tavor_ci_query_mw);
2193 	return (IBT_SUCCESS);
2194 }
2195 
2196 
2197 /*
2198  * tavor_ci_attach_mcg()
2199  *    Attach a Queue Pair to a Multicast Group
2200  *    Context: Can be called only from user or kernel context.
2201  */
2202 static ibt_status_t
2203 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2204     ib_lid_t lid)
2205 {
2206 	tavor_state_t		*state;
2207 	tavor_qphdl_t		qphdl;
2208 	int			status;
2209 
2210 	TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2211 
2212 	/* Check for valid HCA handle */
2213 	if (hca == NULL) {
2214 		TNF_PROBE_0(tavor_ci_attach_mcg_invhca_fail,
2215 		    TAVOR_TNF_ERROR, "");
2216 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2217 		return (IBT_HCA_HDL_INVALID);
2218 	}
2219 
2220 	/* Check for valid QP handle pointer */
2221 	if (qp == NULL) {
2222 		TNF_PROBE_0(tavor_ci_attach_mcg_invqphdl_fail,
2223 		    TAVOR_TNF_ERROR, "");
2224 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2225 		return (IBT_QP_HDL_INVALID);
2226 	}
2227 
2228 	/* Grab the Tavor softstate pointer and QP handles */
2229 	state = (tavor_state_t *)hca;
2230 	qphdl = (tavor_qphdl_t)qp;
2231 
2232 	/* Attach the QP to the multicast group */
2233 	status = tavor_mcg_attach(state, qphdl, gid, lid);
2234 	if (status != DDI_SUCCESS) {
2235 		TNF_PROBE_1(tavor_ci_attach_mcg_fail, TAVOR_TNF_ERROR, "",
2236 		    tnf_uint, status, status);
2237 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2238 		return (status);
2239 	}
2240 
2241 	TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2242 	return (IBT_SUCCESS);
2243 }
2244 
2245 
2246 /*
2247  * tavor_ci_detach_mcg()
2248  *    Detach a Queue Pair to a Multicast Group
2249  *    Context: Can be called only from user or kernel context.
2250  */
2251 static ibt_status_t
2252 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2253     ib_lid_t lid)
2254 {
2255 	tavor_state_t		*state;
2256 	tavor_qphdl_t		qphdl;
2257 	int			status;
2258 
2259 	TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2260 
2261 	/* Check for valid HCA handle */
2262 	if (hca == NULL) {
2263 		TNF_PROBE_0(tavor_ci_detach_mcg_invhca_fail,
2264 		    TAVOR_TNF_ERROR, "");
2265 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2266 		return (IBT_HCA_HDL_INVALID);
2267 	}
2268 
2269 	/* Check for valid QP handle pointer */
2270 	if (qp == NULL) {
2271 		TNF_PROBE_0(tavor_ci_detach_mcg_invqphdl_fail,
2272 		    TAVOR_TNF_ERROR, "");
2273 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2274 		return (IBT_QP_HDL_INVALID);
2275 	}
2276 
2277 	/* Grab the Tavor softstate pointer and QP handle */
2278 	state = (tavor_state_t *)hca;
2279 	qphdl = (tavor_qphdl_t)qp;
2280 
2281 	/* Detach the QP from the multicast group */
2282 	status = tavor_mcg_detach(state, qphdl, gid, lid);
2283 	if (status != DDI_SUCCESS) {
2284 		TNF_PROBE_1(tavor_ci_detach_mcg_fail, TAVOR_TNF_ERROR, "",
2285 		    tnf_uint, status, status);
2286 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2287 		return (status);
2288 	}
2289 
2290 	TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2291 	return (IBT_SUCCESS);
2292 }
2293 
2294 
2295 /*
2296  * tavor_ci_post_send()
2297  *    Post send work requests to the send queue on the specified QP
2298  *    Context: Can be called from interrupt or base context.
2299  */
2300 static ibt_status_t
2301 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
2302     uint_t num_wr, uint_t *num_posted_p)
2303 {
2304 	tavor_state_t		*state;
2305 	tavor_qphdl_t		qphdl;
2306 	int			status;
2307 
2308 	TAVOR_TNF_ENTER(tavor_ci_post_send);
2309 
2310 	ASSERT(wr_p != NULL);
2311 	ASSERT(num_wr != 0);
2312 
2313 	/* Check for valid HCA handle */
2314 	if (hca == NULL) {
2315 		TNF_PROBE_0(tavor_ci_post_send_invhca_fail,
2316 		    TAVOR_TNF_ERROR, "");
2317 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2318 		return (IBT_HCA_HDL_INVALID);
2319 	}
2320 
2321 	/* Check for valid QP handle pointer */
2322 	if (qp == NULL) {
2323 		TNF_PROBE_0(tavor_ci_post_send_invqphdl_fail,
2324 		    TAVOR_TNF_ERROR, "");
2325 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2326 		return (IBT_QP_HDL_INVALID);
2327 	}
2328 
2329 	/* Grab the Tavor softstate pointer and QP handle */
2330 	state = (tavor_state_t *)hca;
2331 	qphdl = (tavor_qphdl_t)qp;
2332 
2333 	/* Post the send WQEs */
2334 	status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
2335 	if (status != DDI_SUCCESS) {
2336 		TNF_PROBE_1(tavor_ci_post_send_fail, TAVOR_TNF_ERROR, "",
2337 		    tnf_uint, status, status);
2338 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2339 		return (status);
2340 	}
2341 
2342 	TAVOR_TNF_EXIT(tavor_ci_post_send);
2343 	return (IBT_SUCCESS);
2344 }
2345 
2346 
2347 /*
2348  * tavor_ci_post_recv()
2349  *    Post receive work requests to the receive queue on the specified QP
2350  *    Context: Can be called from interrupt or base context.
2351  */
2352 static ibt_status_t
2353 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
2354     uint_t num_wr, uint_t *num_posted_p)
2355 {
2356 	tavor_state_t		*state;
2357 	tavor_qphdl_t		qphdl;
2358 	int			status;
2359 
2360 	TAVOR_TNF_ENTER(tavor_ci_post_recv);
2361 
2362 	ASSERT(wr_p != NULL);
2363 	ASSERT(num_wr != 0);
2364 
2365 	/* Check for valid HCA handle */
2366 	if (hca == NULL) {
2367 		TNF_PROBE_0(tavor_ci_post_recv_invhca_fail,
2368 		    TAVOR_TNF_ERROR, "");
2369 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2370 		return (IBT_HCA_HDL_INVALID);
2371 	}
2372 
2373 	/* Check for valid QP handle pointer */
2374 	if (qp == NULL) {
2375 		TNF_PROBE_0(tavor_ci_post_recv_invqphdl_fail,
2376 		    TAVOR_TNF_ERROR, "");
2377 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2378 		return (IBT_QP_HDL_INVALID);
2379 	}
2380 
2381 	/* Grab the Tavor softstate pointer and QP handle */
2382 	state = (tavor_state_t *)hca;
2383 	qphdl = (tavor_qphdl_t)qp;
2384 
2385 	/* Post the receive WQEs */
2386 	status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
2387 	if (status != DDI_SUCCESS) {
2388 		TNF_PROBE_1(tavor_ci_post_recv_fail, TAVOR_TNF_ERROR, "",
2389 		    tnf_uint, status, status);
2390 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2391 		return (status);
2392 	}
2393 
2394 	TAVOR_TNF_EXIT(tavor_ci_post_recv);
2395 	return (IBT_SUCCESS);
2396 }
2397 
2398 
2399 /*
2400  * tavor_ci_poll_cq()
2401  *    Poll for a work request completion
2402  *    Context: Can be called from interrupt or base context.
2403  */
2404 static ibt_status_t
2405 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
2406     uint_t num_wc, uint_t *num_polled)
2407 {
2408 	tavor_state_t		*state;
2409 	tavor_cqhdl_t		cqhdl;
2410 	uint_t			polled;
2411 	int			status;
2412 
2413 	TAVOR_TNF_ENTER(tavor_ci_poll_cq);
2414 
2415 	ASSERT(wc_p != NULL);
2416 
2417 	/* Check for valid HCA handle */
2418 	if (hca == NULL) {
2419 		TNF_PROBE_0(tavor_ci_poll_cq_invhca_fail,
2420 		    TAVOR_TNF_ERROR, "");
2421 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2422 		return (IBT_HCA_HDL_INVALID);
2423 	}
2424 
2425 	/* Check for valid CQ handle pointer */
2426 	if (cq == NULL) {
2427 		TNF_PROBE_0(tavor_ci_poll_cq_invcqhdl_fail,
2428 		    TAVOR_TNF_ERROR, "");
2429 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2430 		return (IBT_CQ_HDL_INVALID);
2431 	}
2432 
2433 	/* Check for valid num_wc field */
2434 	if (num_wc == 0) {
2435 		TNF_PROBE_0(tavor_ci_poll_cq_num_wc_fail,
2436 		    TAVOR_TNF_ERROR, "");
2437 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2438 		return (IBT_INVALID_PARAM);
2439 	}
2440 
2441 	/* Grab the Tavor softstate pointer and CQ handle */
2442 	state = (tavor_state_t *)hca;
2443 	cqhdl = (tavor_cqhdl_t)cq;
2444 
2445 	/* Poll for work request completions */
2446 	status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled);
2447 
2448 	/* First fill in "num_polled" argument (only when valid) */
2449 	if (num_polled) {
2450 		*num_polled = polled;
2451 	}
2452 
2453 	/*
2454 	 * Check the status code;
2455 	 *   If empty, we return empty.
2456 	 *   If error, we print out an error and then return
2457 	 *   If success (something was polled), we return success
2458 	 */
2459 	if (status != DDI_SUCCESS) {
2460 		if (status != IBT_CQ_EMPTY) {
2461 			TNF_PROBE_1(tavor_ci_poll_cq_fail, TAVOR_TNF_ERROR, "",
2462 			    tnf_uint, status, status);
2463 		}
2464 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2465 		return (status);
2466 	}
2467 
2468 	TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2469 	return (IBT_SUCCESS);
2470 }
2471 
2472 
2473 /*
2474  * tavor_ci_notify_cq()
2475  *    Enable notification events on the specified CQ
2476  *    Context: Can be called from interrupt or base context.
2477  */
2478 static ibt_status_t
2479 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
2480     ibt_cq_notify_flags_t flags)
2481 {
2482 	tavor_state_t		*state;
2483 	tavor_cqhdl_t		cqhdl;
2484 	int			status;
2485 
2486 	TAVOR_TNF_ENTER(tavor_ci_notify_cq);
2487 
2488 	/* Check for valid HCA handle */
2489 	if (hca == NULL) {
2490 		TNF_PROBE_0(tavor_ci_notify_cq_invhca_fail,
2491 		    TAVOR_TNF_ERROR, "");
2492 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2493 		return (IBT_HCA_HDL_INVALID);
2494 	}
2495 
2496 	/* Check for valid CQ handle pointer */
2497 	if (cq_hdl == NULL) {
2498 		TNF_PROBE_0(tavor_ci_notify_cq_invcqhdl_fail,
2499 		    TAVOR_TNF_ERROR, "");
2500 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2501 		return (IBT_CQ_HDL_INVALID);
2502 	}
2503 
2504 	/* Grab the Tavor softstate pointer and CQ handle */
2505 	state = (tavor_state_t *)hca;
2506 	cqhdl = (tavor_cqhdl_t)cq_hdl;
2507 
2508 	/* Enable the CQ notification */
2509 	status = tavor_cq_notify(state, cqhdl, flags);
2510 	if (status != DDI_SUCCESS) {
2511 		TNF_PROBE_1(tavor_ci_notify_cq_fail, TAVOR_TNF_ERROR, "",
2512 		    tnf_uint, status, status);
2513 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2514 		return (status);
2515 	}
2516 
2517 	TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2518 	return (IBT_SUCCESS);
2519 }
2520 
2521 /*
2522  * tavor_ci_ci_data_in()
2523  *    Exchange CI-specific data.
2524  *    Context: Can be called only from user or kernel context.
2525  */
2526 static ibt_status_t
2527 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2528     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2529     size_t data_sz)
2530 {
2531 	tavor_state_t		*state;
2532 	int			status;
2533 
2534 	TAVOR_TNF_ENTER(tavor_ci_ci_data_in);
2535 
2536 	/* Check for valid HCA handle */
2537 	if (hca == NULL) {
2538 		TNF_PROBE_0(tavor_ci_ci_data_in_invhca_fail,
2539 		    TAVOR_TNF_ERROR, "");
2540 		TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2541 		return (IBT_HCA_HDL_INVALID);
2542 	}
2543 
2544 	/* Grab the Tavor softstate pointer */
2545 	state = (tavor_state_t *)hca;
2546 
2547 	/* Get the Tavor userland mapping information */
2548 	status = tavor_umap_ci_data_in(state, flags, object,
2549 	    ibc_object_handle, data_p, data_sz);
2550 	if (status != DDI_SUCCESS) {
2551 		TNF_PROBE_1(tavor_ci_ci_data_in_umap_fail, TAVOR_TNF_ERROR,
2552 		    "", tnf_uint, status, status);
2553 		TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2554 		return (status);
2555 	}
2556 
2557 	TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2558 	return (IBT_SUCCESS);
2559 }
2560 
2561 /*
2562  * tavor_ci_ci_data_out()
2563  *    Exchange CI-specific data.
2564  *    Context: Can be called only from user or kernel context.
2565  */
2566 static ibt_status_t
2567 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2568     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2569     size_t data_sz)
2570 {
2571 	tavor_state_t		*state;
2572 	int			status;
2573 
2574 	TAVOR_TNF_ENTER(tavor_ci_ci_data_out);
2575 
2576 	/* Check for valid HCA handle */
2577 	if (hca == NULL) {
2578 		TNF_PROBE_0(tavor_ci_ci_data_out_invhca_fail,
2579 		    TAVOR_TNF_ERROR, "");
2580 		TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2581 		return (IBT_HCA_HDL_INVALID);
2582 	}
2583 
2584 	/* Grab the Tavor softstate pointer */
2585 	state = (tavor_state_t *)hca;
2586 
2587 	/* Get the Tavor userland mapping information */
2588 	status = tavor_umap_ci_data_out(state, flags, object,
2589 	    ibc_object_handle, data_p, data_sz);
2590 	if (status != DDI_SUCCESS) {
2591 		TNF_PROBE_1(tavor_ci_ci_data_out_umap_fail, TAVOR_TNF_ERROR,
2592 		    "", tnf_uint, status, status);
2593 		TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2594 		return (status);
2595 	}
2596 
2597 	TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2598 	return (IBT_SUCCESS);
2599 }
2600 
2601 
2602 /*
2603  * tavor_ci_alloc_srq()
2604  *    Allocate a Shared Receive Queue (SRQ)
2605  *    Context: Can be called only from user or kernel context
2606  */
2607 static ibt_status_t
2608 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2609     ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2610     ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2611 {
2612 	tavor_state_t		*state;
2613 	tavor_pdhdl_t		pdhdl;
2614 	tavor_srqhdl_t		srqhdl;
2615 	tavor_srq_info_t	srqinfo;
2616 	tavor_srq_options_t	op;
2617 	int			status;
2618 
2619 	TAVOR_TNF_ENTER(tavor_ci_alloc_srq);
2620 
2621 	/* Check for valid HCA handle */
2622 	if (hca == NULL) {
2623 		TNF_PROBE_0(tavor_ci_alloc_srq_invhca_fail,
2624 		    TAVOR_TNF_ERROR, "");
2625 		TAVOR_TNF_EXIT(tavor_alloc_srq);
2626 		return (IBT_HCA_HDL_INVALID);
2627 	}
2628 
2629 	state = (tavor_state_t *)hca;
2630 
2631 	/* Check if SRQ is even supported */
2632 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2633 		TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2634 		    TAVOR_TNF_ERROR, "");
2635 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2636 		return (IBT_NOT_SUPPORTED);
2637 	}
2638 
2639 	/* Check for valid PD handle pointer */
2640 	if (pd == NULL) {
2641 		TNF_PROBE_0(tavor_ci_alloc_srq_invpdhdl_fail,
2642 		    TAVOR_TNF_ERROR, "");
2643 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2644 		return (IBT_PD_HDL_INVALID);
2645 	}
2646 
2647 	pdhdl = (tavor_pdhdl_t)pd;
2648 
2649 	srqinfo.srqi_ibt_srqhdl = ibt_srq;
2650 	srqinfo.srqi_pd		= pdhdl;
2651 	srqinfo.srqi_sizes	= sizes;
2652 	srqinfo.srqi_real_sizes	= ret_sizes_p;
2653 	srqinfo.srqi_srqhdl	= &srqhdl;
2654 	srqinfo.srqi_flags	= flags;
2655 	op.srqo_wq_loc		= state->ts_cfg_profile->cp_srq_wq_inddr;
2656 	status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op);
2657 	if (status != DDI_SUCCESS) {
2658 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2659 		return (status);
2660 	}
2661 
2662 	*ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2663 
2664 	TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2665 	return (IBT_SUCCESS);
2666 }
2667 
2668 /*
2669  * tavor_ci_free_srq()
2670  *    Free a Shared Receive Queue (SRQ)
2671  *    Context: Can be called only from user or kernel context
2672  */
2673 static ibt_status_t
2674 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2675 {
2676 	tavor_state_t	*state;
2677 	tavor_srqhdl_t	srqhdl;
2678 	int		status;
2679 
2680 	TAVOR_TNF_ENTER(tavor_ci_free_srq);
2681 
2682 	/* Check for valid HCA handle */
2683 	if (hca == NULL) {
2684 		TNF_PROBE_0(tavor_ci_free_srq_invhca_fail,
2685 		    TAVOR_TNF_ERROR, "");
2686 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2687 		return (IBT_HCA_HDL_INVALID);
2688 	}
2689 
2690 	state = (tavor_state_t *)hca;
2691 
2692 	/* Check if SRQ is even supported */
2693 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2694 		TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2695 		    TAVOR_TNF_ERROR, "");
2696 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2697 		return (IBT_NOT_SUPPORTED);
2698 	}
2699 
2700 	/* Check for valid SRQ handle pointer */
2701 	if (srq == NULL) {
2702 		TNF_PROBE_0(tavor_ci_free_srq_invsrqhdl_fail,
2703 		    TAVOR_TNF_ERROR, "");
2704 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2705 		return (IBT_SRQ_HDL_INVALID);
2706 	}
2707 
2708 	srqhdl = (tavor_srqhdl_t)srq;
2709 
2710 	/* Free the SRQ */
2711 	status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP);
2712 	if (status != DDI_SUCCESS) {
2713 		TNF_PROBE_1(tavor_ci_free_srq_fail, TAVOR_TNF_ERROR, "",
2714 		    tnf_uint, status, status);
2715 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2716 		return (status);
2717 	}
2718 
2719 	TAVOR_TNF_EXIT(tavor_ci_free_srq);
2720 	return (IBT_SUCCESS);
2721 }
2722 
2723 /*
2724  * tavor_ci_query_srq()
2725  *    Query properties of a Shared Receive Queue (SRQ)
2726  *    Context: Can be called from interrupt or base context.
2727  */
2728 static ibt_status_t
2729 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2730     ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2731 {
2732 	tavor_state_t	*state;
2733 	tavor_srqhdl_t	srqhdl;
2734 
2735 	TAVOR_TNF_ENTER(tavor_ci_query_srq);
2736 
2737 	/* Check for valid HCA handle */
2738 	if (hca == NULL) {
2739 		TNF_PROBE_0(tavor_ci_query_srq_invhca_fail,
2740 		    TAVOR_TNF_ERROR, "");
2741 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2742 		return (IBT_HCA_HDL_INVALID);
2743 	}
2744 
2745 	state = (tavor_state_t *)hca;
2746 
2747 	/* Check if SRQ is even supported */
2748 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2749 		TNF_PROBE_0(tavor_ci_query_srq_not_supported_fail,
2750 		    TAVOR_TNF_ERROR, "");
2751 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2752 		return (IBT_NOT_SUPPORTED);
2753 	}
2754 
2755 	/* Check for valid SRQ handle pointer */
2756 	if (srq == NULL) {
2757 		TNF_PROBE_0(tavor_ci_query_srq_invsrqhdl_fail,
2758 		    TAVOR_TNF_ERROR, "");
2759 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2760 		return (IBT_SRQ_HDL_INVALID);
2761 	}
2762 
2763 	srqhdl = (tavor_srqhdl_t)srq;
2764 
2765 	mutex_enter(&srqhdl->srq_lock);
2766 	if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2767 		mutex_exit(&srqhdl->srq_lock);
2768 		TNF_PROBE_0(tavor_ci_query_srq_error_state,
2769 		    TAVOR_TNF_ERROR, "");
2770 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2771 		return (IBT_SRQ_ERROR_STATE);
2772 	}
2773 
2774 	*pd_p   = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2775 	sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz;
2776 	sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2777 	mutex_exit(&srqhdl->srq_lock);
2778 	*limit_p  = 0;
2779 
2780 	TAVOR_TNF_EXIT(tavor_ci_query_srq);
2781 	return (IBT_SUCCESS);
2782 }
2783 
2784 /*
2785  * tavor_ci_modify_srq()
2786  *    Modify properties of a Shared Receive Queue (SRQ)
2787  *    Context: Can be called from interrupt or base context.
2788  */
2789 /* ARGSUSED */
2790 static ibt_status_t
2791 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2792     ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2793 {
2794 	tavor_state_t	*state;
2795 	tavor_srqhdl_t	srqhdl;
2796 	uint_t		resize_supported, cur_srq_size;
2797 	int		status;
2798 
2799 	TAVOR_TNF_ENTER(tavor_ci_modify_srq);
2800 
2801 	/* Check for valid HCA handle */
2802 	if (hca == NULL) {
2803 		TNF_PROBE_0(tavor_ci_modify_srq_invhca_fail,
2804 		    TAVOR_TNF_ERROR, "");
2805 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2806 		return (IBT_HCA_HDL_INVALID);
2807 	}
2808 
2809 	state = (tavor_state_t *)hca;
2810 
2811 	/* Check if SRQ is even supported */
2812 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2813 		TNF_PROBE_0(tavor_ci_modify_srq_not_supported_fail,
2814 		    TAVOR_TNF_ERROR, "");
2815 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2816 		return (IBT_NOT_SUPPORTED);
2817 	}
2818 
2819 	/* Check for valid SRQ handle pointer */
2820 	if (srq == NULL) {
2821 		TNF_PROBE_0(tavor_ci_modify_srq_invcqhdl_fail,
2822 		    TAVOR_TNF_ERROR, "");
2823 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2824 		return (IBT_SRQ_HDL_INVALID);
2825 	}
2826 
2827 	srqhdl = (tavor_srqhdl_t)srq;
2828 
2829 	/*
2830 	 * Check Error State of SRQ.
2831 	 * Also, while we are holding the lock we save away the current SRQ
2832 	 * size for later use.
2833 	 */
2834 	mutex_enter(&srqhdl->srq_lock);
2835 	cur_srq_size = srqhdl->srq_wq_bufsz;
2836 	if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2837 		mutex_exit(&srqhdl->srq_lock);
2838 		TNF_PROBE_0(tavor_ci_modify_srq_error_state,
2839 		    TAVOR_TNF_ERROR, "");
2840 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2841 		return (IBT_SRQ_ERROR_STATE);
2842 	}
2843 	mutex_exit(&srqhdl->srq_lock);
2844 
2845 	/*
2846 	 * Setting the limit watermark is not currently supported.  This is a
2847 	 * tavor hardware (firmware) limitation.  We return NOT_SUPPORTED here,
2848 	 * and have the limit code commented out for now.
2849 	 *
2850 	 * XXX If we enable the limit watermark support, we need to do checks
2851 	 * and set the 'srq->srq_wr_limit' here, instead of returning not
2852 	 * supported.  The 'tavor_srq_modify' operation below is for resizing
2853 	 * the SRQ only, the limit work should be done here.  If this is
2854 	 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2855 	 * function should also be removed at that time.
2856 	 */
2857 	if (flags & IBT_SRQ_SET_LIMIT) {
2858 		TNF_PROBE_0(tavor_ci_modify_srq_limit_not_supported,
2859 		    TAVOR_TNF_ERROR, "");
2860 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2861 		return (IBT_NOT_SUPPORTED);
2862 	}
2863 
2864 	/*
2865 	 * Check the SET_SIZE flag.  If not set, we simply return success here.
2866 	 * However if it is set, we check if resize is supported and only then
2867 	 * do we continue on with our resize processing.
2868 	 */
2869 	if (!(flags & IBT_SRQ_SET_SIZE)) {
2870 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2871 		return (IBT_SUCCESS);
2872 	}
2873 
2874 	resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags &
2875 	    IBT_HCA_RESIZE_SRQ;
2876 
2877 	if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2878 		TNF_PROBE_0(tavor_ci_modify_srq_resize_not_supp_fail,
2879 		    TAVOR_TNF_ERROR, "");
2880 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2881 		return (IBT_NOT_SUPPORTED);
2882 	}
2883 
2884 	/*
2885 	 * We do not support resizing an SRQ to be smaller than it's current
2886 	 * size.  If a smaller (or equal) size is requested, then we simply
2887 	 * return success, and do nothing.
2888 	 */
2889 	if (size <= cur_srq_size) {
2890 		*ret_size_p = cur_srq_size;
2891 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2892 		return (IBT_SUCCESS);
2893 	}
2894 
2895 	status = tavor_srq_modify(state, srqhdl, size, ret_size_p,
2896 	    TAVOR_NOSLEEP);
2897 	if (status != DDI_SUCCESS) {
2898 		/* Set return value to current SRQ size */
2899 		*ret_size_p = cur_srq_size;
2900 		TNF_PROBE_1(tavor_ci_modify_srq_fail, TAVOR_TNF_ERROR, "",
2901 		    tnf_uint, status, status);
2902 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2903 		return (status);
2904 	}
2905 
2906 	TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2907 	return (IBT_SUCCESS);
2908 }
2909 
2910 /*
2911  * tavor_ci_post_srq()
2912  *    Post a Work Request to the specified Shared Receive Queue (SRQ)
2913  *    Context: Can be called from interrupt or base context.
2914  */
2915 static ibt_status_t
2916 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2917     ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2918 {
2919 	tavor_state_t	*state;
2920 	tavor_srqhdl_t	srqhdl;
2921 	int		status;
2922 
2923 	TAVOR_TNF_ENTER(tavor_ci_post_srq);
2924 
2925 	/* Check for valid HCA handle */
2926 	if (hca == NULL) {
2927 		TNF_PROBE_0(tavor_ci_post_srq_invhca_fail,
2928 		    TAVOR_TNF_ERROR, "");
2929 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
2930 		return (IBT_HCA_HDL_INVALID);
2931 	}
2932 
2933 	state = (tavor_state_t *)hca;
2934 
2935 	/* Check if SRQ is even supported */
2936 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2937 		TNF_PROBE_0(tavor_ci_post_srq_not_supported_fail,
2938 		    TAVOR_TNF_ERROR, "");
2939 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
2940 		return (IBT_NOT_SUPPORTED);
2941 	}
2942 
2943 	/* Check for valid SRQ handle pointer */
2944 	if (srq == NULL) {
2945 		TNF_PROBE_0(tavor_ci_post_srq_invsrqhdl_fail,
2946 		    TAVOR_TNF_ERROR, "");
2947 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
2948 		return (IBT_SRQ_HDL_INVALID);
2949 	}
2950 
2951 	srqhdl = (tavor_srqhdl_t)srq;
2952 
2953 	status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2954 	if (status != DDI_SUCCESS) {
2955 		TNF_PROBE_1(tavor_ci_post_srq_fail, TAVOR_TNF_ERROR, "",
2956 		    tnf_uint, status, status);
2957 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
2958 		return (status);
2959 	}
2960 
2961 	TAVOR_TNF_EXIT(tavor_ci_post_srq);
2962 	return (IBT_SUCCESS);
2963 }
2964 
2965 /* Address translation */
2966 /*
2967  * tavor_ci_map_mem_area()
2968  *    Context: Can be called from interrupt or base context.
2969  */
2970 /* ARGSUSED */
2971 static ibt_status_t
2972 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2973     void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2974     ibc_ma_hdl_t *ibc_ma_hdl_p)
2975 {
2976 	return (IBT_NOT_SUPPORTED);
2977 }
2978 
2979 /*
2980  * tavor_ci_unmap_mem_area()
2981  * Unmap the memory area
2982  *    Context: Can be called from interrupt or base context.
2983  */
2984 /* ARGSUSED */
2985 static ibt_status_t
2986 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2987 {
2988 	return (IBT_NOT_SUPPORTED);
2989 }
2990 
2991 /* ARGSUSED */
2992 static ibt_status_t
2993 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov,
2994     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2995 {
2996 	return (IBT_NOT_SUPPORTED);
2997 }
2998 
2999 /* ARGSUSED */
3000 static ibt_status_t
3001 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
3002 {
3003 	return (IBT_NOT_SUPPORTED);
3004 }
3005 
3006 /* Allocate L_Key */
3007 /*
3008  * tavor_ci_alloc_lkey()
3009  */
3010 /* ARGSUSED */
3011 static ibt_status_t
3012 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3013     ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p,
3014     ibt_pmr_desc_t *mem_desc_p)
3015 {
3016 	TAVOR_TNF_ENTER(tavor_ci_alloc_lkey);
3017 	TAVOR_TNF_EXIT(tavor_ci_alloc_lkey);
3018 	return (IBT_NOT_SUPPORTED);
3019 }
3020 
3021 /* Physical Register Memory Region */
3022 /*
3023  * tavor_ci_register_physical_mr()
3024  */
3025 /* ARGSUSED */
3026 static ibt_status_t
3027 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3028     ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
3029     ibt_pmr_desc_t *mem_desc_p)
3030 {
3031 	TAVOR_TNF_ENTER(tavor_ci_register_physical_mr);
3032 	TAVOR_TNF_EXIT(tavor_ci_register_physical_mr);
3033 	return (IBT_NOT_SUPPORTED);
3034 }
3035 
3036 /*
3037  * tavor_ci_reregister_physical_mr()
3038  */
3039 /* ARGSUSED */
3040 static ibt_status_t
3041 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
3042     ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
3043     ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
3044 {
3045 	TAVOR_TNF_ENTER(tavor_ci_reregister_physical_mr);
3046 	TAVOR_TNF_EXIT(tavor_ci_reregister_physical_mr);
3047 	return (IBT_NOT_SUPPORTED);
3048 }
3049 
3050 /* Mellanox FMR Support */
3051 /*
3052  * tavor_ci_create_fmr_pool()
3053  * Creates a pool of memory regions suitable for FMR registration
3054  *    Context: Can be called from base context only
3055  */
3056 /* ARGSUSED */
3057 static ibt_status_t
3058 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3059     ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
3060 {
3061 	return (IBT_NOT_SUPPORTED);
3062 }
3063 
3064 /*
3065  * tavor_ci_destroy_fmr_pool()
3066  * Free all resources associated with an FMR pool.
3067  *    Context: Can be called from base context only.
3068  */
3069 /* ARGSUSED */
3070 static ibt_status_t
3071 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3072 {
3073 	return (IBT_NOT_SUPPORTED);
3074 }
3075 
3076 /*
3077  * tavor_ci_flush_fmr_pool()
3078  * Force a flush of the memory tables, cleaning up used FMR resources.
3079  *    Context: Can be called from interrupt or base context.
3080  */
3081 /* ARGSUSED */
3082 static ibt_status_t
3083 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3084 {
3085 	return (IBT_NOT_SUPPORTED);
3086 }
3087 
3088 /*
3089  * tavor_ci_register_physical_fmr()
3090  * From the 'pool' of FMR regions passed in, performs register physical
3091  * operation.
3092  *    Context: Can be called from interrupt or base context.
3093  */
3094 /* ARGSUSED */
3095 static ibt_status_t
3096 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
3097     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
3098     void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
3099 {
3100 	return (IBT_NOT_SUPPORTED);
3101 }
3102 
3103 /*
3104  * tavor_ci_deregister_fmr()
3105  * Moves an FMR (specified by 'mr') to the deregistered state.
3106  *    Context: Can be called from base context only.
3107  */
3108 /* ARGSUSED */
3109 static ibt_status_t
3110 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
3111 {
3112 	return (IBT_NOT_SUPPORTED);
3113 }
3114 
3115 /*
3116  * tavor_ci_alloc_io_mem()
3117  *     Allocate dmable memory
3118  *
3119  */
3120 ibt_status_t
3121 tavor_ci_alloc_io_mem(
3122 	ibc_hca_hdl_t hca,
3123 	size_t size,
3124 	ibt_mr_flags_t mr_flag,
3125 	caddr_t *kaddrp,
3126 	ibc_mem_alloc_hdl_t *mem_alloc_hdl)
3127 {
3128 	tavor_state_t	*state;
3129 	int		status;
3130 
3131 	TAVOR_TNF_ENTER(tavor_ci_alloc_io_mem);
3132 
3133 	/* Check for valid HCA handle */
3134 	if (hca == NULL) {
3135 		TNF_PROBE_0(tavor_ci_alloc_io_mem_invhca_fail,
3136 		    TAVOR_TNF_ERROR, "");
3137 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3138 		return (IBT_HCA_HDL_INVALID);
3139 	}
3140 
3141 	/* Check for valid mem_alloc_hdl handle pointer */
3142 	if (mem_alloc_hdl == NULL) {
3143 		TNF_PROBE_0(tavor_ci_alloc_io_mem_hdl_fail,
3144 		    TAVOR_TNF_ERROR, "");
3145 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3146 		return (IBT_MEM_ALLOC_HDL_INVALID);
3147 	}
3148 
3149 	/* Grab the Tavor softstate pointer and mem handle */
3150 	state = (tavor_state_t *)hca;
3151 
3152 	/* Allocate the AH */
3153 	status = tavor_mem_alloc(state, size, mr_flag, kaddrp,
3154 	    (tavor_mem_alloc_hdl_t *)mem_alloc_hdl);
3155 
3156 	if (status != DDI_SUCCESS) {
3157 		TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
3158 		    tnf_uint, status, status);
3159 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3160 		return (status);
3161 	}
3162 
3163 	TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3164 	return (IBT_SUCCESS);
3165 }
3166 
3167 
3168 /*
3169  * tavor_ci_free_io_mem()
3170  * free the memory
3171  */
3172 ibt_status_t
3173 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
3174 {
3175 	tavor_mem_alloc_hdl_t	memhdl;
3176 
3177 	TAVOR_TNF_ENTER(tavor_ci_free_io_mem);
3178 
3179 	/* Check for valid HCA handle */
3180 	if (hca == NULL) {
3181 		TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail,
3182 		    TAVOR_TNF_ERROR, "");
3183 		TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3184 		return (IBT_HCA_HDL_INVALID);
3185 	}
3186 
3187 	/* Check for valid mem_alloc_hdl handle pointer */
3188 	if (mem_alloc_hdl == NULL) {
3189 		TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail,
3190 		    TAVOR_TNF_ERROR, "");
3191 		TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3192 		return (IBT_MEM_ALLOC_HDL_INVALID);
3193 	}
3194 
3195 	memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
3196 
3197 	/* free the memory */
3198 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl))
3199 	ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
3200 	ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
3201 
3202 	kmem_free(memhdl, sizeof (*memhdl));
3203 	TAVOR_TNF_EXIT(tavor_dma_free);
3204 	return (IBT_SUCCESS);
3205 }
3206 
3207 
3208 int
3209 tavor_mem_alloc(
3210 	tavor_state_t *state,
3211 	size_t size,
3212 	ibt_mr_flags_t flags,
3213 	caddr_t *kaddrp,
3214 	tavor_mem_alloc_hdl_t *mem_hdl)
3215 {
3216 	ddi_dma_handle_t	dma_hdl;
3217 	ddi_dma_attr_t		dma_attr;
3218 	ddi_acc_handle_t	acc_hdl;
3219 	size_t			real_len;
3220 	int			status;
3221 	int 			(*ddi_cb)(caddr_t);
3222 
3223 	TAVOR_TNF_ENTER(tavor_mem_alloc);
3224 
3225 	tavor_dma_attr_init(&dma_attr);
3226 
3227 	ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3228 
3229 	/* Allocate a DMA handle */
3230 	status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb,
3231 	    NULL, &dma_hdl);
3232 	if (status != DDI_SUCCESS) {
3233 		TNF_PROBE_0(tavor_dma_alloc_handle_fail, TAVOR_TNF_ERROR, "");
3234 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3235 		return (DDI_FAILURE);
3236 	}
3237 
3238 	/* Allocate DMA memory */
3239 	status = ddi_dma_mem_alloc(dma_hdl, size,
3240 	    &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3241 	    NULL,
3242 	    kaddrp, &real_len, &acc_hdl);
3243 	if (status != DDI_SUCCESS) {
3244 		ddi_dma_free_handle(&dma_hdl);
3245 		TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3246 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3247 		return (DDI_FAILURE);
3248 	}
3249 
3250 	/* Package the tavor_dma_info contents and return */
3251 	*mem_hdl = kmem_alloc(sizeof (**mem_hdl),
3252 	    flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
3253 	if (*mem_hdl == NULL) {
3254 		ddi_dma_mem_free(&acc_hdl);
3255 		ddi_dma_free_handle(&dma_hdl);
3256 		TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3257 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3258 		return (DDI_FAILURE);
3259 	}
3260 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl))
3261 	(*mem_hdl)->tavor_dma_hdl = dma_hdl;
3262 	(*mem_hdl)->tavor_acc_hdl = acc_hdl;
3263 
3264 	TAVOR_TNF_EXIT(tavor_mem_alloc);
3265 	return (DDI_SUCCESS);
3266 }
3267