xref: /illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/hermon_ci.c (revision f7327bbd956f5bf6b97f9b90e0c81e8344d8835f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * hermon_ci.c
28  *    Hermon Channel Interface (CI) Routines
29  *
30  *    Implements all the routines necessary to interface with the IBTF.
31  *    Pointers to all of these functions are passed to the IBTF at attach()
32  *    time in the ibc_operations_t structure.  These functions include all
33  *    of the necessary routines to implement the required InfiniBand "verbs"
34  *    and additional IBTF-specific interfaces.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 
42 #include <sys/ib/adapters/hermon/hermon.h>
43 
44 extern uint32_t hermon_kernel_data_ro;
45 extern uint32_t hermon_user_data_ro;
46 
47 /* HCA and port related operations */
48 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
49     ibt_hca_portinfo_t *);
50 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
51     ibt_port_modify_flags_t, uint8_t);
52 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
53 
54 /* Protection Domains */
55 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
56     ibc_pd_hdl_t *);
57 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
58 
59 /* Reliable Datagram Domains */
60 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
61     ibc_rdd_hdl_t *);
62 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
63 
64 /* Address Handles */
65 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
66     ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
67 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
68 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69     ibc_pd_hdl_t *, ibt_adds_vect_t *);
70 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
71     ibt_adds_vect_t *);
72 
73 /* Queue Pairs */
74 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
75     ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
76     ibc_qp_hdl_t *);
77 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
78     ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
79     ibt_chan_sizes_t *, ibc_qp_hdl_t *);
80 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
81     ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
82     ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
83 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84     ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
85 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
86 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87     ibt_qp_query_attr_t *);
88 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
89     ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
90 
91 /* Completion Queues */
92 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
93     ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
94 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
95 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96     uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
97 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98     uint_t, uint_t *);
99 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
100     uint_t, uint_t, ibt_cq_handler_id_t);
101 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
102     ibt_cq_sched_flags_t, ibc_cq_handler_attr_t *);
103 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibt_cq_handler_id_t);
104 
105 /* EE Contexts */
106 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
107     ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
108 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
109 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
110     ibt_eec_query_attr_t *);
111 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
112     ibt_cep_modify_flags_t, ibt_eec_info_t *);
113 
114 /* Memory Registration */
115 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
116     ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
117 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
118     ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
120     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
121     ibc_mr_hdl_t *, ibt_mr_desc_t *);
122 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
123 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
124     ibt_mr_query_attr_t *);
125 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
126     ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
127     ibt_mr_desc_t *);
128 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
129     ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
130     ibt_mr_desc_t *);
131 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
132 
133 /* Memory Windows */
134 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
135     ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
136 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
137 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
138     ibt_mw_query_attr_t *);
139 
140 /* Multicast Groups */
141 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
142     ib_gid_t, ib_lid_t);
143 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
144     ib_gid_t, ib_lid_t);
145 
146 /* Work Request and Completion Processing */
147 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
148     ibt_send_wr_t *, uint_t, uint_t *);
149 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
150     ibt_recv_wr_t *, uint_t, uint_t *);
151 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
152     ibt_wc_t *, uint_t, uint_t *);
153 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
154     ibt_cq_notify_flags_t);
155 
156 /* CI Object Private Data */
157 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
158     ibt_object_type_t, void *, void *, size_t);
159 
160 /* CI Object Private Data */
161 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
162     ibt_object_type_t, void *, void *, size_t);
163 
164 /* Shared Receive Queues */
165 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
166     ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
167     ibt_srq_sizes_t *);
168 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
169 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
170     ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
171 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
172     ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
173 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
174     ibt_recv_wr_t *, uint_t, uint_t *);
175 
176 /* Address translation */
177 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
178     void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
179 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
180 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
181     ibt_all_wr_t *, ibc_mi_hdl_t *);
182 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
183 
184 /* Allocate L_Key */
185 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
186     ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
187 
188 /* Physical Register Memory Region */
189 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
190     ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
191 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
192     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
193     ibt_pmr_desc_t *);
194 
195 /* Mellanox FMR */
196 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
197     ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
198     ibc_fmr_pool_hdl_t *fmr_pool);
199 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
200     ibc_fmr_pool_hdl_t fmr_pool);
201 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
202     ibc_fmr_pool_hdl_t fmr_pool);
203 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
204     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
205     void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
206 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
207     ibc_mr_hdl_t mr);
208 
209 /* Memory Allocation/Deallocation */
210 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
211     ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
212     ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
213 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
214     ibc_mem_alloc_hdl_t mem_alloc_hdl);
215 
216 /*
217  * This ibc_operations_t structure includes pointers to all the entry points
218  * provided by the Hermon driver.  This structure is passed to the IBTF at
219  * driver attach time, using the ibc_attach() call.
220  */
221 ibc_operations_t hermon_ibc_ops = {
222 	/* HCA and port related operations */
223 	hermon_ci_query_hca_ports,
224 	hermon_ci_modify_ports,
225 	hermon_ci_modify_system_image,
226 
227 	/* Protection Domains */
228 	hermon_ci_alloc_pd,
229 	hermon_ci_free_pd,
230 
231 	/* Reliable Datagram Domains */
232 	hermon_ci_alloc_rdd,
233 	hermon_ci_free_rdd,
234 
235 	/* Address Handles */
236 	hermon_ci_alloc_ah,
237 	hermon_ci_free_ah,
238 	hermon_ci_query_ah,
239 	hermon_ci_modify_ah,
240 
241 	/* Queue Pairs */
242 	hermon_ci_alloc_qp,
243 	hermon_ci_alloc_special_qp,
244 	hermon_ci_alloc_qp_range,
245 	hermon_ci_free_qp,
246 	hermon_ci_release_qpn,
247 	hermon_ci_query_qp,
248 	hermon_ci_modify_qp,
249 
250 	/* Completion Queues */
251 	hermon_ci_alloc_cq,
252 	hermon_ci_free_cq,
253 	hermon_ci_query_cq,
254 	hermon_ci_resize_cq,
255 	hermon_ci_modify_cq,
256 	hermon_ci_alloc_cq_sched,
257 	hermon_ci_free_cq_sched,
258 
259 	/* EE Contexts */
260 	hermon_ci_alloc_eec,
261 	hermon_ci_free_eec,
262 	hermon_ci_query_eec,
263 	hermon_ci_modify_eec,
264 
265 	/* Memory Registration */
266 	hermon_ci_register_mr,
267 	hermon_ci_register_buf,
268 	hermon_ci_register_shared_mr,
269 	hermon_ci_deregister_mr,
270 	hermon_ci_query_mr,
271 	hermon_ci_reregister_mr,
272 	hermon_ci_reregister_buf,
273 	hermon_ci_sync_mr,
274 
275 	/* Memory Windows */
276 	hermon_ci_alloc_mw,
277 	hermon_ci_free_mw,
278 	hermon_ci_query_mw,
279 
280 	/* Multicast Groups */
281 	hermon_ci_attach_mcg,
282 	hermon_ci_detach_mcg,
283 
284 	/* Work Request and Completion Processing */
285 	hermon_ci_post_send,
286 	hermon_ci_post_recv,
287 	hermon_ci_poll_cq,
288 	hermon_ci_notify_cq,
289 
290 	/* CI Object Mapping Data */
291 	hermon_ci_ci_data_in,
292 	hermon_ci_ci_data_out,
293 
294 	/* Shared Receive Queue */
295 	hermon_ci_alloc_srq,
296 	hermon_ci_free_srq,
297 	hermon_ci_query_srq,
298 	hermon_ci_modify_srq,
299 	hermon_ci_post_srq,
300 
301 	/* Address translation */
302 	hermon_ci_map_mem_area,
303 	hermon_ci_unmap_mem_area,
304 	hermon_ci_map_mem_iov,
305 	hermon_ci_unmap_mem_iov,
306 
307 	/* Allocate L_key */
308 	hermon_ci_alloc_lkey,
309 
310 	/* Physical Register Memory Region */
311 	hermon_ci_register_physical_mr,
312 	hermon_ci_reregister_physical_mr,
313 
314 	/* Mellanox FMR */
315 	hermon_ci_create_fmr_pool,
316 	hermon_ci_destroy_fmr_pool,
317 	hermon_ci_flush_fmr_pool,
318 	hermon_ci_register_physical_fmr,
319 	hermon_ci_deregister_fmr,
320 
321 	/* Memory allocation */
322 	hermon_ci_alloc_io_mem,
323 	hermon_ci_free_io_mem,
324 };
325 
326 
327 /*
328  * hermon_ci_query_hca_ports()
329  *    Returns HCA port attributes for either one or all of the HCA's ports.
330  *    Context: Can be called only from user or kernel context.
331  */
332 static ibt_status_t
333 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
334     ibt_hca_portinfo_t *info_p)
335 {
336 	hermon_state_t	*state;
337 	uint_t		start, end, port;
338 	int		status, indx;
339 
340 	/* Check for valid HCA handle */
341 	if (hca == NULL) {
342 		return (IBT_HCA_HDL_INVALID);
343 	}
344 
345 	/* Grab the Hermon softstate pointer */
346 	state = (hermon_state_t *)hca;
347 
348 	/*
349 	 * If the specified port is zero, then we are supposed to query all
350 	 * ports.  Otherwise, we query only the port number specified.
351 	 * Setup the start and end port numbers as appropriate for the loop
352 	 * below.  Note:  The first Hermon port is port number one (1).
353 	 */
354 	if (query_port == 0) {
355 		start = 1;
356 		end = start + (state->hs_cfg_profile->cp_num_ports - 1);
357 	} else {
358 		end = start = query_port;
359 	}
360 
361 	/* Query the port(s) */
362 	for (port = start, indx = 0; port <= end; port++, indx++) {
363 		status = hermon_port_query(state, port, &info_p[indx]);
364 		if (status != DDI_SUCCESS) {
365 			return (status);
366 		}
367 	}
368 	return (IBT_SUCCESS);
369 }
370 
371 
372 /*
373  * hermon_ci_modify_ports()
374  *    Modify HCA port attributes
375  *    Context: Can be called only from user or kernel context.
376  */
377 static ibt_status_t
378 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
379     ibt_port_modify_flags_t flags, uint8_t init_type)
380 {
381 	hermon_state_t	*state;
382 	int		status;
383 
384 	/* Check for valid HCA handle */
385 	if (hca == NULL) {
386 		return (IBT_HCA_HDL_INVALID);
387 	}
388 
389 	/* Grab the Hermon softstate pointer */
390 	state = (hermon_state_t *)hca;
391 
392 	/* Modify the port(s) */
393 	status = hermon_port_modify(state, port, flags, init_type);
394 	return (status);
395 }
396 
397 /*
398  * hermon_ci_modify_system_image()
399  *    Modify the System Image GUID
400  *    Context: Can be called only from user or kernel context.
401  */
402 /* ARGSUSED */
403 static ibt_status_t
404 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
405 {
406 	/*
407 	 * This is an unsupported interface for the Hermon driver.  This
408 	 * interface is necessary to support modification of the System
409 	 * Image GUID.  Hermon is only capable of modifying this parameter
410 	 * once (during driver initialization).
411 	 */
412 	return (IBT_NOT_SUPPORTED);
413 }
414 
415 /*
416  * hermon_ci_alloc_pd()
417  *    Allocate a Protection Domain
418  *    Context: Can be called only from user or kernel context.
419  */
420 /* ARGSUSED */
421 static ibt_status_t
422 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
423 {
424 	hermon_state_t	*state;
425 	hermon_pdhdl_t	pdhdl;
426 	int		status;
427 
428 	ASSERT(pd_p != NULL);
429 
430 	/* Check for valid HCA handle */
431 	if (hca == NULL) {
432 		return (IBT_HCA_HDL_INVALID);
433 	}
434 
435 	/* Grab the Hermon softstate pointer */
436 	state = (hermon_state_t *)hca;
437 
438 	/* Allocate the PD */
439 	status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
440 	if (status != DDI_SUCCESS) {
441 		return (status);
442 	}
443 
444 	/* Return the Hermon PD handle */
445 	*pd_p = (ibc_pd_hdl_t)pdhdl;
446 
447 	return (IBT_SUCCESS);
448 }
449 
450 
451 /*
452  * hermon_ci_free_pd()
453  *    Free a Protection Domain
454  *    Context: Can be called only from user or kernel context
455  */
456 static ibt_status_t
457 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
458 {
459 	hermon_state_t		*state;
460 	hermon_pdhdl_t		pdhdl;
461 	int			status;
462 
463 	/* Check for valid HCA handle */
464 	if (hca == NULL) {
465 		return (IBT_HCA_HDL_INVALID);
466 	}
467 
468 	/* Check for valid PD handle pointer */
469 	if (pd == NULL) {
470 		return (IBT_PD_HDL_INVALID);
471 	}
472 
473 	/* Grab the Hermon softstate pointer and PD handle */
474 	state = (hermon_state_t *)hca;
475 	pdhdl = (hermon_pdhdl_t)pd;
476 
477 	/* Free the PD */
478 	status = hermon_pd_free(state, &pdhdl);
479 	return (status);
480 }
481 
482 
483 /*
484  * hermon_ci_alloc_rdd()
485  *    Allocate a Reliable Datagram Domain
486  *    Context: Can be called only from user or kernel context.
487  */
488 /* ARGSUSED */
489 static ibt_status_t
490 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
491     ibc_rdd_hdl_t *rdd_p)
492 {
493 	/*
494 	 * This is an unsupported interface for the Hermon driver.  This
495 	 * interface is necessary to support Reliable Datagram (RD)
496 	 * operations.  Hermon does not support RD.
497 	 */
498 	return (IBT_NOT_SUPPORTED);
499 }
500 
501 
502 /*
503  * hermon_free_rdd()
504  *    Free a Reliable Datagram Domain
505  *    Context: Can be called only from user or kernel context.
506  */
507 /* ARGSUSED */
508 static ibt_status_t
509 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
510 {
511 	/*
512 	 * This is an unsupported interface for the Hermon driver.  This
513 	 * interface is necessary to support Reliable Datagram (RD)
514 	 * operations.  Hermon does not support RD.
515 	 */
516 	return (IBT_NOT_SUPPORTED);
517 }
518 
519 
520 /*
521  * hermon_ci_alloc_ah()
522  *    Allocate an Address Handle
523  *    Context: Can be called only from user or kernel context.
524  */
525 /* ARGSUSED */
526 static ibt_status_t
527 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
528     ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
529 {
530 	hermon_state_t	*state;
531 	hermon_ahhdl_t	ahhdl;
532 	hermon_pdhdl_t	pdhdl;
533 	int		status;
534 
535 	/* Check for valid HCA handle */
536 	if (hca == NULL) {
537 		return (IBT_HCA_HDL_INVALID);
538 	}
539 
540 	/* Check for valid PD handle pointer */
541 	if (pd == NULL) {
542 		return (IBT_PD_HDL_INVALID);
543 	}
544 
545 	/* Grab the Hermon softstate pointer and PD handle */
546 	state = (hermon_state_t *)hca;
547 	pdhdl = (hermon_pdhdl_t)pd;
548 
549 	/* Allocate the AH */
550 	status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
551 	if (status != DDI_SUCCESS) {
552 		return (status);
553 	}
554 
555 	/* Return the Hermon AH handle */
556 	*ah_p = (ibc_ah_hdl_t)ahhdl;
557 
558 	return (IBT_SUCCESS);
559 }
560 
561 
562 /*
563  * hermon_ci_free_ah()
564  *    Free an Address Handle
565  *    Context: Can be called only from user or kernel context.
566  */
567 static ibt_status_t
568 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
569 {
570 	hermon_state_t	*state;
571 	hermon_ahhdl_t	ahhdl;
572 	int		status;
573 
574 	/* Check for valid HCA handle */
575 	if (hca == NULL) {
576 		return (IBT_HCA_HDL_INVALID);
577 	}
578 
579 	/* Check for valid address handle pointer */
580 	if (ah == NULL) {
581 		return (IBT_AH_HDL_INVALID);
582 	}
583 
584 	/* Grab the Hermon softstate pointer and AH handle */
585 	state = (hermon_state_t *)hca;
586 	ahhdl = (hermon_ahhdl_t)ah;
587 
588 	/* Free the AH */
589 	status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
590 
591 	return (status);
592 }
593 
594 
595 /*
596  * hermon_ci_query_ah()
597  *    Return the Address Vector information for a specified Address Handle
598  *    Context: Can be called from interrupt or base context.
599  */
600 static ibt_status_t
601 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
602     ibt_adds_vect_t *attr_p)
603 {
604 	hermon_state_t	*state;
605 	hermon_ahhdl_t	ahhdl;
606 	hermon_pdhdl_t	pdhdl;
607 	int		status;
608 
609 	/* Check for valid HCA handle */
610 	if (hca == NULL) {
611 		return (IBT_HCA_HDL_INVALID);
612 	}
613 
614 	/* Check for valid address handle pointer */
615 	if (ah == NULL) {
616 		return (IBT_AH_HDL_INVALID);
617 	}
618 
619 	/* Grab the Hermon softstate pointer and AH handle */
620 	state = (hermon_state_t *)hca;
621 	ahhdl = (hermon_ahhdl_t)ah;
622 
623 	/* Query the AH */
624 	status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
625 	if (status != DDI_SUCCESS) {
626 		return (status);
627 	}
628 
629 	/* Return the Hermon PD handle */
630 	*pd_p = (ibc_pd_hdl_t)pdhdl;
631 
632 	return (IBT_SUCCESS);
633 }
634 
635 
636 /*
637  * hermon_ci_modify_ah()
638  *    Modify the Address Vector information of a specified Address Handle
639  *    Context: Can be called from interrupt or base context.
640  */
641 static ibt_status_t
642 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
643 {
644 	hermon_state_t	*state;
645 	hermon_ahhdl_t	ahhdl;
646 	int		status;
647 
648 	/* Check for valid HCA handle */
649 	if (hca == NULL) {
650 		return (IBT_HCA_HDL_INVALID);
651 	}
652 
653 	/* Check for valid address handle pointer */
654 	if (ah == NULL) {
655 		return (IBT_AH_HDL_INVALID);
656 	}
657 
658 	/* Grab the Hermon softstate pointer and AH handle */
659 	state = (hermon_state_t *)hca;
660 	ahhdl = (hermon_ahhdl_t)ah;
661 
662 	/* Modify the AH */
663 	status = hermon_ah_modify(state, ahhdl, attr_p);
664 
665 	return (status);
666 }
667 
668 
669 /*
670  * hermon_ci_alloc_qp()
671  *    Allocate a Queue Pair
672  *    Context: Can be called only from user or kernel context.
673  */
674 static ibt_status_t
675 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
676     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
677     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
678 {
679 	hermon_state_t		*state;
680 	hermon_qp_info_t		qpinfo;
681 	int			status;
682 
683 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
684 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
685 
686 	/* Check for valid HCA handle */
687 	if (hca == NULL) {
688 		return (IBT_HCA_HDL_INVALID);
689 	}
690 
691 	/* Grab the Hermon softstate pointer */
692 	state = (hermon_state_t *)hca;
693 
694 	/* Allocate the QP */
695 	qpinfo.qpi_attrp	= attr_p;
696 	qpinfo.qpi_type		= type;
697 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
698 	qpinfo.qpi_queueszp	= queue_sizes_p;
699 	qpinfo.qpi_qpn		= qpn;
700 	status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
701 	if (status != DDI_SUCCESS) {
702 		return (status);
703 	}
704 
705 	/* Return the Hermon QP handle */
706 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
707 
708 	return (IBT_SUCCESS);
709 }
710 
711 
712 /*
713  * hermon_ci_alloc_special_qp()
714  *    Allocate a Special Queue Pair
715  *    Context: Can be called only from user or kernel context.
716  */
717 static ibt_status_t
718 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
719     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
720     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
721     ibc_qp_hdl_t *qp_p)
722 {
723 	hermon_state_t		*state;
724 	hermon_qp_info_t		qpinfo;
725 	int			status;
726 
727 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
728 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
729 
730 	/* Check for valid HCA handle */
731 	if (hca == NULL) {
732 		return (IBT_HCA_HDL_INVALID);
733 	}
734 
735 	/* Grab the Hermon softstate pointer */
736 	state = (hermon_state_t *)hca;
737 
738 	/* Allocate the Special QP */
739 	qpinfo.qpi_attrp	= attr_p;
740 	qpinfo.qpi_type		= type;
741 	qpinfo.qpi_port		= port;
742 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
743 	qpinfo.qpi_queueszp	= queue_sizes_p;
744 	status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
745 	if (status != DDI_SUCCESS) {
746 		return (status);
747 	}
748 	/* Return the Hermon QP handle */
749 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
750 
751 	return (IBT_SUCCESS);
752 }
753 
754 /* ARGSUSED */
755 static ibt_status_t
756 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
757     ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
758     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
759     ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
760     ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
761 {
762 	return (IBT_NOT_SUPPORTED);
763 }
764 
765 /*
766  * hermon_ci_free_qp()
767  *    Free a Queue Pair
768  *    Context: Can be called only from user or kernel context.
769  */
770 static ibt_status_t
771 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
772     ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
773 {
774 	hermon_state_t	*state;
775 	hermon_qphdl_t	qphdl;
776 	int		status;
777 
778 	/* Check for valid HCA handle */
779 	if (hca == NULL) {
780 		return (IBT_HCA_HDL_INVALID);
781 	}
782 
783 	/* Check for valid QP handle pointer */
784 	if (qp == NULL) {
785 		return (IBT_QP_HDL_INVALID);
786 	}
787 
788 	/* Grab the Hermon softstate pointer and QP handle */
789 	state = (hermon_state_t *)hca;
790 	qphdl = (hermon_qphdl_t)qp;
791 
792 	/* Free the QP */
793 	status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
794 	    HERMON_NOSLEEP);
795 
796 	return (status);
797 }
798 
799 
800 /*
801  * hermon_ci_release_qpn()
802  *    Release a Queue Pair Number (QPN)
803  *    Context: Can be called only from user or kernel context.
804  */
805 static ibt_status_t
806 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
807 {
808 	hermon_state_t		*state;
809 	hermon_qpn_entry_t	*entry;
810 
811 	/* Check for valid HCA handle */
812 	if (hca == NULL) {
813 		return (IBT_HCA_HDL_INVALID);
814 	}
815 
816 	/* Check for valid QP handle pointer */
817 	if (qpnh == NULL) {
818 		return (IBT_QP_HDL_INVALID);
819 	}
820 
821 	/* Grab the Hermon softstate pointer and QP handle */
822 	state = (hermon_state_t *)hca;
823 	entry = (hermon_qpn_entry_t *)qpnh;
824 
825 	/* Release the QP number */
826 	hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
827 
828 	return (IBT_SUCCESS);
829 }
830 
831 
832 /*
833  * hermon_ci_query_qp()
834  *    Query a Queue Pair
835  *    Context: Can be called from interrupt or base context.
836  */
837 static ibt_status_t
838 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
839     ibt_qp_query_attr_t *attr_p)
840 {
841 	hermon_state_t	*state;
842 	hermon_qphdl_t	qphdl;
843 	int		status;
844 
845 	/* Check for valid HCA handle */
846 	if (hca == NULL) {
847 		return (IBT_HCA_HDL_INVALID);
848 	}
849 
850 	/* Check for valid QP handle */
851 	if (qp == NULL) {
852 		return (IBT_QP_HDL_INVALID);
853 	}
854 
855 	/* Grab the Hermon softstate pointer and QP handle */
856 	state = (hermon_state_t *)hca;
857 	qphdl = (hermon_qphdl_t)qp;
858 
859 	/* Query the QP */
860 	status = hermon_qp_query(state, qphdl, attr_p);
861 	return (status);
862 }
863 
864 
865 /*
866  * hermon_ci_modify_qp()
867  *    Modify a Queue Pair
868  *    Context: Can be called from interrupt or base context.
869  */
870 static ibt_status_t
871 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
872     ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
873     ibt_queue_sizes_t *actual_sz)
874 {
875 	hermon_state_t	*state;
876 	hermon_qphdl_t	qphdl;
877 	int		status;
878 
879 	/* Check for valid HCA handle */
880 	if (hca == NULL) {
881 		return (IBT_HCA_HDL_INVALID);
882 	}
883 
884 	/* Check for valid QP handle */
885 	if (qp == NULL) {
886 		return (IBT_QP_HDL_INVALID);
887 	}
888 
889 	/* Grab the Hermon softstate pointer and QP handle */
890 	state = (hermon_state_t *)hca;
891 	qphdl = (hermon_qphdl_t)qp;
892 
893 	/* Modify the QP */
894 	status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
895 	return (status);
896 }
897 
898 
899 /*
900  * hermon_ci_alloc_cq()
901  *    Allocate a Completion Queue
902  *    Context: Can be called only from user or kernel context.
903  */
904 /* ARGSUSED */
905 static ibt_status_t
906 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
907     ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
908 {
909 	hermon_state_t	*state;
910 	hermon_cqhdl_t	cqhdl;
911 	int		status;
912 
913 	/* Check for valid HCA handle */
914 	if (hca == NULL) {
915 		return (IBT_HCA_HDL_INVALID);
916 	}
917 	/* Grab the Hermon softstate pointer */
918 	state = (hermon_state_t *)hca;
919 
920 
921 	/* Allocate the CQ */
922 	status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
923 	    &cqhdl, HERMON_NOSLEEP);
924 	if (status != DDI_SUCCESS) {
925 		return (status);
926 	}
927 
928 	/* Return the Hermon CQ handle */
929 	*cq_p = (ibc_cq_hdl_t)cqhdl;
930 
931 	return (IBT_SUCCESS);
932 }
933 
934 
935 /*
936  * hermon_ci_free_cq()
937  *    Free a Completion Queue
938  *    Context: Can be called only from user or kernel context.
939  */
940 static ibt_status_t
941 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
942 {
943 	hermon_state_t	*state;
944 	hermon_cqhdl_t	cqhdl;
945 	int		status;
946 
947 	/* Check for valid HCA handle */
948 	if (hca == NULL) {
949 		return (IBT_HCA_HDL_INVALID);
950 	}
951 
952 	/* Check for valid CQ handle pointer */
953 	if (cq == NULL) {
954 		return (IBT_CQ_HDL_INVALID);
955 	}
956 
957 	/* Grab the Hermon softstate pointer and CQ handle */
958 	state = (hermon_state_t *)hca;
959 	cqhdl = (hermon_cqhdl_t)cq;
960 
961 
962 	/* Free the CQ */
963 	status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
964 	return (status);
965 }
966 
967 
968 /*
969  * hermon_ci_query_cq()
970  *    Return the size of a Completion Queue
971  *    Context: Can be called only from user or kernel context.
972  */
973 static ibt_status_t
974 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
975     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
976 {
977 	hermon_cqhdl_t	cqhdl;
978 
979 	/* Check for valid HCA handle */
980 	if (hca == NULL) {
981 		return (IBT_HCA_HDL_INVALID);
982 	}
983 
984 	/* Check for valid CQ handle pointer */
985 	if (cq == NULL) {
986 		return (IBT_CQ_HDL_INVALID);
987 	}
988 
989 	/* Grab the CQ handle */
990 	cqhdl = (hermon_cqhdl_t)cq;
991 
992 	/* Query the current CQ size */
993 	*entries_p = cqhdl->cq_bufsz;
994 	*count_p = cqhdl->cq_intmod_count;
995 	*usec_p = cqhdl->cq_intmod_usec;
996 	*hid_p = 0;
997 
998 	return (IBT_SUCCESS);
999 }
1000 
1001 
1002 /*
1003  * hermon_ci_resize_cq()
1004  *    Change the size of a Completion Queue
1005  *    Context: Can be called only from user or kernel context.
1006  */
1007 static ibt_status_t
1008 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1009     uint_t *actual_size)
1010 {
1011 	hermon_state_t		*state;
1012 	hermon_cqhdl_t		cqhdl;
1013 	int			status;
1014 
1015 	/* Check for valid HCA handle */
1016 	if (hca == NULL) {
1017 		return (IBT_HCA_HDL_INVALID);
1018 	}
1019 
1020 	/* Check for valid CQ handle pointer */
1021 	if (cq == NULL) {
1022 		return (IBT_CQ_HDL_INVALID);
1023 	}
1024 
1025 	/* Grab the Hermon softstate pointer and CQ handle */
1026 	state = (hermon_state_t *)hca;
1027 	cqhdl = (hermon_cqhdl_t)cq;
1028 
1029 	/* Resize the CQ */
1030 	status = hermon_cq_resize(state, cqhdl, size, actual_size,
1031 	    HERMON_NOSLEEP);
1032 	if (status != DDI_SUCCESS) {
1033 		return (status);
1034 	}
1035 	return (IBT_SUCCESS);
1036 }
1037 
1038 /*
1039  * hermon_ci_modify_cq()
1040  *    Change the interrupt moderation values of a Completion Queue
1041  *    Context: Can be called only from user or kernel context.
1042  */
1043 static ibt_status_t
1044 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
1045     uint_t usec, ibt_cq_handler_id_t hid)
1046 {
1047 	hermon_state_t		*state;
1048 	hermon_cqhdl_t		cqhdl;
1049 	int			status;
1050 
1051 	/* Check for valid HCA handle */
1052 	if (hca == NULL) {
1053 		return (IBT_HCA_HDL_INVALID);
1054 	}
1055 
1056 	/* Check for valid CQ handle pointer */
1057 	if (cq == NULL) {
1058 		return (IBT_CQ_HDL_INVALID);
1059 	}
1060 
1061 	/* Grab the Hermon softstate pointer and CQ handle */
1062 	state = (hermon_state_t *)hca;
1063 	cqhdl = (hermon_cqhdl_t)cq;
1064 
1065 	/* Resize the CQ */
1066 	status = hermon_cq_modify(state, cqhdl, count, usec, hid,
1067 	    HERMON_NOSLEEP);
1068 	return (status);
1069 }
1070 
1071 
1072 /*
1073  * hermon_ci_alloc_cq_sched()
1074  *    Reserve a CQ scheduling class resource
1075  *    Context: Can be called only from user or kernel context.
1076  */
1077 /* ARGSUSED */
1078 static ibt_status_t
1079 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_flags_t flags,
1080     ibc_cq_handler_attr_t *handler_attr_p)
1081 {
1082 	if (hca == NULL) {
1083 		return (IBT_HCA_HDL_INVALID);
1084 	}
1085 
1086 	/*
1087 	 * This is an unsupported interface for the Hermon driver.  Hermon
1088 	 * does not support CQ scheduling classes.
1089 	 */
1090 
1091 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1092 	handler_attr_p->h_id = NULL;
1093 	handler_attr_p->h_pri = 0;
1094 	handler_attr_p->h_bind = NULL;
1095 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1096 	return (IBT_SUCCESS);
1097 }
1098 
1099 
1100 /*
1101  * hermon_ci_free_cq_sched()
1102  *    Free a CQ scheduling class resource
1103  *    Context: Can be called only from user or kernel context.
1104  */
1105 static ibt_status_t
1106 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibt_cq_handler_id_t handler_id)
1107 {
1108 	if (hca == NULL) {
1109 		return (IBT_HCA_HDL_INVALID);
1110 	}
1111 
1112 	/*
1113 	 * This is an unsupported interface for the Hermon driver.  Hermon
1114 	 * does not support CQ scheduling classes.  Returning a NULL
1115 	 * hint is the way to treat this as unsupported.  We check for
1116 	 * the expected NULL, but do not fail in any case.
1117 	 */
1118 	if (handler_id != NULL) {
1119 		cmn_err(CE_NOTE, "hermon_ci_free_cq_sched: unexpected "
1120 		    "non-NULL handler_id\n");
1121 	}
1122 	return (IBT_SUCCESS);
1123 }
1124 
1125 
1126 /*
1127  * hermon_ci_alloc_eec()
1128  *    Allocate an End-to-End context
1129  *    Context: Can be called only from user or kernel context.
1130  */
1131 /* ARGSUSED */
1132 static ibt_status_t
1133 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1134     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1135 {
1136 	/*
1137 	 * This is an unsupported interface for the Hermon driver.  This
1138 	 * interface is necessary to support Reliable Datagram (RD)
1139 	 * operations.  Hermon does not support RD.
1140 	 */
1141 	return (IBT_NOT_SUPPORTED);
1142 }
1143 
1144 
1145 /*
1146  * hermon_ci_free_eec()
1147  *    Free an End-to-End context
1148  *    Context: Can be called only from user or kernel context.
1149  */
1150 /* ARGSUSED */
1151 static ibt_status_t
1152 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1153 {
1154 	/*
1155 	 * This is an unsupported interface for the Hermon driver.  This
1156 	 * interface is necessary to support Reliable Datagram (RD)
1157 	 * operations.  Hermon does not support RD.
1158 	 */
1159 	return (IBT_NOT_SUPPORTED);
1160 }
1161 
1162 
1163 /*
1164  * hermon_ci_query_eec()
1165  *    Query an End-to-End context
1166  *    Context: Can be called from interrupt or base context.
1167  */
1168 /* ARGSUSED */
1169 static ibt_status_t
1170 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1171     ibt_eec_query_attr_t *attr_p)
1172 {
1173 	/*
1174 	 * This is an unsupported interface for the Hermon driver.  This
1175 	 * interface is necessary to support Reliable Datagram (RD)
1176 	 * operations.  Hermon does not support RD.
1177 	 */
1178 	return (IBT_NOT_SUPPORTED);
1179 }
1180 
1181 
1182 /*
1183  * hermon_ci_modify_eec()
1184  *    Modify an End-to-End context
1185  *    Context: Can be called from interrupt or base context.
1186  */
1187 /* ARGSUSED */
1188 static ibt_status_t
1189 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1190     ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1191 {
1192 	/*
1193 	 * This is an unsupported interface for the Hermon driver.  This
1194 	 * interface is necessary to support Reliable Datagram (RD)
1195 	 * operations.  Hermon does not support RD.
1196 	 */
1197 	return (IBT_NOT_SUPPORTED);
1198 }
1199 
1200 
1201 /*
1202  * hermon_ci_register_mr()
1203  *    Prepare a virtually addressed Memory Region for use by an HCA
1204  *    Context: Can be called from interrupt or base context.
1205  */
1206 /* ARGSUSED */
1207 static ibt_status_t
1208 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1209     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1210     ibt_mr_desc_t *mr_desc)
1211 {
1212 	hermon_mr_options_t	op;
1213 	hermon_state_t		*state;
1214 	hermon_pdhdl_t		pdhdl;
1215 	hermon_mrhdl_t		mrhdl;
1216 	int			status;
1217 
1218 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1219 
1220 	ASSERT(mr_attr != NULL);
1221 	ASSERT(mr_p != NULL);
1222 	ASSERT(mr_desc != NULL);
1223 
1224 	/* Check for valid HCA handle */
1225 	if (hca == NULL) {
1226 		return (IBT_HCA_HDL_INVALID);
1227 	}
1228 
1229 	/* Check for valid PD handle pointer */
1230 	if (pd == NULL) {
1231 		return (IBT_PD_HDL_INVALID);
1232 	}
1233 
1234 	/*
1235 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1236 	 * require the Local Write flag to be set
1237 	 */
1238 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1239 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1240 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1241 		return (IBT_MR_ACCESS_REQ_INVALID);
1242 	}
1243 
1244 	/* Grab the Hermon softstate pointer and PD handle */
1245 	state = (hermon_state_t *)hca;
1246 	pdhdl = (hermon_pdhdl_t)pd;
1247 
1248 	/* Register the memory region */
1249 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1250 	op.mro_bind_dmahdl = NULL;
1251 	op.mro_bind_override_addr = 0;
1252 	status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1253 	    &op, HERMON_MPT_DMPT);
1254 	if (status != DDI_SUCCESS) {
1255 		return (status);
1256 	}
1257 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1258 
1259 	/* Fill in the mr_desc structure */
1260 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1261 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1262 	/* Only set RKey if remote access was requested */
1263 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1264 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1265 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1266 		mr_desc->md_rkey = mrhdl->mr_rkey;
1267 	}
1268 
1269 	/*
1270 	 * If region is mapped for streaming (i.e. noncoherent), then set
1271 	 * sync is required
1272 	 */
1273 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1274 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1275 
1276 	/* Return the Hermon MR handle */
1277 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1278 
1279 	return (IBT_SUCCESS);
1280 }
1281 
1282 
1283 /*
1284  * hermon_ci_register_buf()
1285  *    Prepare a Memory Region specified by buf structure for use by an HCA
1286  *    Context: Can be called from interrupt or base context.
1287  */
1288 /* ARGSUSED */
1289 static ibt_status_t
1290 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1291     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1292     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1293 {
1294 	hermon_mr_options_t	op;
1295 	hermon_state_t		*state;
1296 	hermon_pdhdl_t		pdhdl;
1297 	hermon_mrhdl_t		mrhdl;
1298 	int			status;
1299 	ibt_mr_flags_t		flags = attrp->mr_flags;
1300 
1301 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1302 
1303 	ASSERT(mr_p != NULL);
1304 	ASSERT(mr_desc != NULL);
1305 
1306 	/* Check for valid HCA handle */
1307 	if (hca == NULL) {
1308 		return (IBT_HCA_HDL_INVALID);
1309 	}
1310 
1311 	/* Check for valid PD handle pointer */
1312 	if (pd == NULL) {
1313 		return (IBT_PD_HDL_INVALID);
1314 	}
1315 
1316 	/*
1317 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1318 	 * require the Local Write flag to be set
1319 	 */
1320 	if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1321 	    (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1322 	    !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1323 		return (IBT_MR_ACCESS_REQ_INVALID);
1324 	}
1325 
1326 	/* Grab the Hermon softstate pointer and PD handle */
1327 	state = (hermon_state_t *)hca;
1328 	pdhdl = (hermon_pdhdl_t)pd;
1329 
1330 	/* Register the memory region */
1331 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1332 	op.mro_bind_dmahdl = NULL;
1333 	op.mro_bind_override_addr = 0;
1334 	status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1335 	    &mrhdl, &op, HERMON_MPT_DMPT);
1336 	if (status != DDI_SUCCESS) {
1337 		return (status);
1338 	}
1339 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1340 
1341 	/* Fill in the mr_desc structure */
1342 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1343 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1344 	/* Only set RKey if remote access was requested */
1345 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1346 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1347 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1348 		mr_desc->md_rkey = mrhdl->mr_rkey;
1349 	}
1350 
1351 	/*
1352 	 * If region is mapped for streaming (i.e. noncoherent), then set
1353 	 * sync is required
1354 	 */
1355 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1356 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1357 
1358 	/* Return the Hermon MR handle */
1359 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1360 
1361 	return (IBT_SUCCESS);
1362 }
1363 
1364 
1365 /*
1366  * hermon_ci_deregister_mr()
1367  *    Deregister a Memory Region from an HCA translation table
1368  *    Context: Can be called only from user or kernel context.
1369  */
1370 static ibt_status_t
1371 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1372 {
1373 	hermon_state_t		*state;
1374 	hermon_mrhdl_t		mrhdl;
1375 	int			status;
1376 
1377 	/* Check for valid HCA handle */
1378 	if (hca == NULL) {
1379 		return (IBT_HCA_HDL_INVALID);
1380 	}
1381 
1382 	/* Check for valid memory region handle */
1383 	if (mr == NULL) {
1384 		return (IBT_MR_HDL_INVALID);
1385 	}
1386 
1387 	/* Grab the Hermon softstate pointer */
1388 	state = (hermon_state_t *)hca;
1389 	mrhdl = (hermon_mrhdl_t)mr;
1390 
1391 	/*
1392 	 * Deregister the memory region.
1393 	 */
1394 	status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1395 	    HERMON_NOSLEEP);
1396 	return (status);
1397 }
1398 
1399 
1400 /*
1401  * hermon_ci_query_mr()
1402  *    Retrieve information about a specified Memory Region
1403  *    Context: Can be called from interrupt or base context.
1404  */
1405 static ibt_status_t
1406 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1407     ibt_mr_query_attr_t *mr_attr)
1408 {
1409 	hermon_state_t		*state;
1410 	hermon_mrhdl_t		mrhdl;
1411 	int			status;
1412 
1413 	ASSERT(mr_attr != NULL);
1414 
1415 	/* Check for valid HCA handle */
1416 	if (hca == NULL) {
1417 		return (IBT_HCA_HDL_INVALID);
1418 	}
1419 
1420 	/* Check for MemRegion handle */
1421 	if (mr == NULL) {
1422 		return (IBT_MR_HDL_INVALID);
1423 	}
1424 
1425 	/* Grab the Hermon softstate pointer and MR handle */
1426 	state = (hermon_state_t *)hca;
1427 	mrhdl = (hermon_mrhdl_t)mr;
1428 
1429 	/* Query the memory region */
1430 	status = hermon_mr_query(state, mrhdl, mr_attr);
1431 	return (status);
1432 }
1433 
1434 
1435 /*
1436  * hermon_ci_register_shared_mr()
1437  *    Create a shared memory region matching an existing Memory Region
1438  *    Context: Can be called from interrupt or base context.
1439  */
1440 /* ARGSUSED */
1441 static ibt_status_t
1442 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1443     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1444     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1445 {
1446 	hermon_state_t		*state;
1447 	hermon_pdhdl_t		pdhdl;
1448 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1449 	int			status;
1450 
1451 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1452 
1453 	ASSERT(mr_attr != NULL);
1454 	ASSERT(mr_p != NULL);
1455 	ASSERT(mr_desc != NULL);
1456 
1457 	/* Check for valid HCA handle */
1458 	if (hca == NULL) {
1459 		return (IBT_HCA_HDL_INVALID);
1460 	}
1461 
1462 	/* Check for valid PD handle pointer */
1463 	if (pd == NULL) {
1464 		return (IBT_PD_HDL_INVALID);
1465 	}
1466 
1467 	/* Check for valid memory region handle */
1468 	if (mr == NULL) {
1469 		return (IBT_MR_HDL_INVALID);
1470 	}
1471 	/*
1472 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1473 	 * require the Local Write flag to be set
1474 	 */
1475 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1476 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1477 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1478 		return (IBT_MR_ACCESS_REQ_INVALID);
1479 	}
1480 
1481 	/* Grab the Hermon softstate pointer and handles */
1482 	state = (hermon_state_t *)hca;
1483 	pdhdl = (hermon_pdhdl_t)pd;
1484 	mrhdl = (hermon_mrhdl_t)mr;
1485 
1486 	/* Register the shared memory region */
1487 	status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1488 	    &mrhdl_new);
1489 	if (status != DDI_SUCCESS) {
1490 		return (status);
1491 	}
1492 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1493 
1494 	/* Fill in the mr_desc structure */
1495 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1496 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1497 	/* Only set RKey if remote access was requested */
1498 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1499 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1500 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1501 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1502 	}
1503 
1504 	/*
1505 	 * If shared region is mapped for streaming (i.e. noncoherent), then
1506 	 * set sync is required
1507 	 */
1508 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1509 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1510 
1511 	/* Return the Hermon MR handle */
1512 	*mr_p = (ibc_mr_hdl_t)mrhdl_new;
1513 
1514 	return (IBT_SUCCESS);
1515 }
1516 
1517 
1518 /*
1519  * hermon_ci_reregister_mr()
1520  *    Modify the attributes of an existing Memory Region
1521  *    Context: Can be called from interrupt or base context.
1522  */
1523 /* ARGSUSED */
1524 static ibt_status_t
1525 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1526     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1527     ibt_mr_desc_t *mr_desc)
1528 {
1529 	hermon_mr_options_t	op;
1530 	hermon_state_t		*state;
1531 	hermon_pdhdl_t		pdhdl;
1532 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1533 	int			status;
1534 
1535 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1536 
1537 	ASSERT(mr_attr != NULL);
1538 	ASSERT(mr_new != NULL);
1539 	ASSERT(mr_desc != NULL);
1540 
1541 	/* Check for valid HCA handle */
1542 	if (hca == NULL) {
1543 		return (IBT_HCA_HDL_INVALID);
1544 	}
1545 
1546 	/* Check for valid memory region handle */
1547 	if (mr == NULL) {
1548 		return (IBT_MR_HDL_INVALID);
1549 	}
1550 
1551 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1552 	state = (hermon_state_t *)hca;
1553 	mrhdl = (hermon_mrhdl_t)mr;
1554 	pdhdl = (hermon_pdhdl_t)pd;
1555 
1556 	/* Reregister the memory region */
1557 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1558 	status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1559 	    &mrhdl_new, &op);
1560 	if (status != DDI_SUCCESS) {
1561 		return (status);
1562 	}
1563 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1564 
1565 	/* Fill in the mr_desc structure */
1566 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1567 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1568 	/* Only set RKey if remote access was requested */
1569 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1570 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1571 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1572 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1573 	}
1574 
1575 	/*
1576 	 * If region is mapped for streaming (i.e. noncoherent), then set
1577 	 * sync is required
1578 	 */
1579 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1580 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1581 
1582 	/* Return the Hermon MR handle */
1583 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1584 
1585 	return (IBT_SUCCESS);
1586 }
1587 
1588 
1589 /*
1590  * hermon_ci_reregister_buf()
1591  *    Modify the attributes of an existing Memory Region
1592  *    Context: Can be called from interrupt or base context.
1593  */
1594 /* ARGSUSED */
1595 static ibt_status_t
1596 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1597     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1598     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1599 {
1600 	hermon_mr_options_t	op;
1601 	hermon_state_t		*state;
1602 	hermon_pdhdl_t		pdhdl;
1603 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1604 	int			status;
1605 	ibt_mr_flags_t		flags = attrp->mr_flags;
1606 
1607 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1608 
1609 	ASSERT(mr_new != NULL);
1610 	ASSERT(mr_desc != NULL);
1611 
1612 	/* Check for valid HCA handle */
1613 	if (hca == NULL) {
1614 		return (IBT_HCA_HDL_INVALID);
1615 	}
1616 
1617 	/* Check for valid memory region handle */
1618 	if (mr == NULL) {
1619 		return (IBT_MR_HDL_INVALID);
1620 	}
1621 
1622 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1623 	state = (hermon_state_t *)hca;
1624 	mrhdl = (hermon_mrhdl_t)mr;
1625 	pdhdl = (hermon_pdhdl_t)pd;
1626 
1627 	/* Reregister the memory region */
1628 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1629 	status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1630 	    &mrhdl_new, &op);
1631 	if (status != DDI_SUCCESS) {
1632 		return (status);
1633 	}
1634 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1635 
1636 	/* Fill in the mr_desc structure */
1637 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1638 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1639 	/* Only set RKey if remote access was requested */
1640 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1641 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1642 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1643 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1644 	}
1645 
1646 	/*
1647 	 * If region is mapped for streaming (i.e. noncoherent), then set
1648 	 * sync is required
1649 	 */
1650 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1651 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1652 
1653 	/* Return the Hermon MR handle */
1654 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1655 
1656 	return (IBT_SUCCESS);
1657 }
1658 
1659 /*
1660  * hermon_ci_sync_mr()
1661  *    Synchronize access to a Memory Region
1662  *    Context: Can be called from interrupt or base context.
1663  */
1664 static ibt_status_t
1665 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1666 {
1667 	hermon_state_t		*state;
1668 	int			status;
1669 
1670 	ASSERT(mr_segs != NULL);
1671 
1672 	/* Check for valid HCA handle */
1673 	if (hca == NULL) {
1674 		return (IBT_HCA_HDL_INVALID);
1675 	}
1676 
1677 	/* Grab the Hermon softstate pointer */
1678 	state = (hermon_state_t *)hca;
1679 
1680 	/* Sync the memory region */
1681 	status = hermon_mr_sync(state, mr_segs, num_segs);
1682 	return (status);
1683 }
1684 
1685 
1686 /*
1687  * hermon_ci_alloc_mw()
1688  *    Allocate a Memory Window
1689  *    Context: Can be called from interrupt or base context.
1690  */
1691 static ibt_status_t
1692 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1693     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1694 {
1695 	hermon_state_t		*state;
1696 	hermon_pdhdl_t		pdhdl;
1697 	hermon_mwhdl_t		mwhdl;
1698 	int			status;
1699 
1700 	ASSERT(mw_p != NULL);
1701 	ASSERT(rkey_p != NULL);
1702 
1703 	/* Check for valid HCA handle */
1704 	if (hca == NULL) {
1705 		return (IBT_HCA_HDL_INVALID);
1706 	}
1707 
1708 	/* Check for valid PD handle pointer */
1709 	if (pd == NULL) {
1710 		return (IBT_PD_HDL_INVALID);
1711 	}
1712 
1713 	/* Grab the Hermon softstate pointer and PD handle */
1714 	state = (hermon_state_t *)hca;
1715 	pdhdl = (hermon_pdhdl_t)pd;
1716 
1717 	/* Allocate the memory window */
1718 	status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1719 	if (status != DDI_SUCCESS) {
1720 		return (status);
1721 	}
1722 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1723 
1724 	/* Return the MW handle and RKey */
1725 	*mw_p = (ibc_mw_hdl_t)mwhdl;
1726 	*rkey_p = mwhdl->mr_rkey;
1727 
1728 	return (IBT_SUCCESS);
1729 }
1730 
1731 
1732 /*
1733  * hermon_ci_free_mw()
1734  *    Free a Memory Window
1735  *    Context: Can be called from interrupt or base context.
1736  */
1737 static ibt_status_t
1738 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1739 {
1740 	hermon_state_t		*state;
1741 	hermon_mwhdl_t		mwhdl;
1742 	int			status;
1743 
1744 	/* Check for valid HCA handle */
1745 	if (hca == NULL) {
1746 		return (IBT_HCA_HDL_INVALID);
1747 	}
1748 
1749 	/* Check for valid MW handle */
1750 	if (mw == NULL) {
1751 		return (IBT_MW_HDL_INVALID);
1752 	}
1753 
1754 	/* Grab the Hermon softstate pointer and MW handle */
1755 	state = (hermon_state_t *)hca;
1756 	mwhdl = (hermon_mwhdl_t)mw;
1757 
1758 	/* Free the memory window */
1759 	status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1760 	return (status);
1761 }
1762 
1763 
1764 /*
1765  * hermon_ci_query_mw()
1766  *    Return the attributes of the specified Memory Window
1767  *    Context: Can be called from interrupt or base context.
1768  */
1769 static ibt_status_t
1770 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1771     ibt_mw_query_attr_t *mw_attr_p)
1772 {
1773 	hermon_mwhdl_t		mwhdl;
1774 
1775 	ASSERT(mw_attr_p != NULL);
1776 
1777 	/* Check for valid HCA handle */
1778 	if (hca == NULL) {
1779 		return (IBT_HCA_HDL_INVALID);
1780 	}
1781 
1782 	/* Check for valid MemWin handle */
1783 	if (mw == NULL) {
1784 		return (IBT_MW_HDL_INVALID);
1785 	}
1786 
1787 	/* Query the memory window pointer and fill in the return values */
1788 	mwhdl = (hermon_mwhdl_t)mw;
1789 	mutex_enter(&mwhdl->mr_lock);
1790 	mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1791 	mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1792 	mutex_exit(&mwhdl->mr_lock);
1793 
1794 	return (IBT_SUCCESS);
1795 }
1796 
1797 
1798 /*
1799  * hermon_ci_attach_mcg()
1800  *    Attach a Queue Pair to a Multicast Group
1801  *    Context: Can be called only from user or kernel context.
1802  */
1803 static ibt_status_t
1804 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1805     ib_lid_t lid)
1806 {
1807 	hermon_state_t		*state;
1808 	hermon_qphdl_t		qphdl;
1809 	int			status;
1810 
1811 	/* Check for valid HCA handle */
1812 	if (hca == NULL) {
1813 		return (IBT_HCA_HDL_INVALID);
1814 	}
1815 
1816 	/* Check for valid QP handle pointer */
1817 	if (qp == NULL) {
1818 		return (IBT_QP_HDL_INVALID);
1819 	}
1820 
1821 	/* Grab the Hermon softstate pointer and QP handles */
1822 	state = (hermon_state_t *)hca;
1823 	qphdl = (hermon_qphdl_t)qp;
1824 
1825 	/* Attach the QP to the multicast group */
1826 	status = hermon_mcg_attach(state, qphdl, gid, lid);
1827 	return (status);
1828 }
1829 
1830 
1831 /*
1832  * hermon_ci_detach_mcg()
1833  *    Detach a Queue Pair to a Multicast Group
1834  *    Context: Can be called only from user or kernel context.
1835  */
1836 static ibt_status_t
1837 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1838     ib_lid_t lid)
1839 {
1840 	hermon_state_t		*state;
1841 	hermon_qphdl_t		qphdl;
1842 	int			status;
1843 
1844 	/* Check for valid HCA handle */
1845 	if (hca == NULL) {
1846 		return (IBT_HCA_HDL_INVALID);
1847 	}
1848 
1849 	/* Check for valid QP handle pointer */
1850 	if (qp == NULL) {
1851 		return (IBT_QP_HDL_INVALID);
1852 	}
1853 
1854 	/* Grab the Hermon softstate pointer and QP handle */
1855 	state = (hermon_state_t *)hca;
1856 	qphdl = (hermon_qphdl_t)qp;
1857 
1858 	/* Detach the QP from the multicast group */
1859 	status = hermon_mcg_detach(state, qphdl, gid, lid);
1860 	return (status);
1861 }
1862 
1863 
1864 /*
1865  * hermon_ci_post_send()
1866  *    Post send work requests to the send queue on the specified QP
1867  *    Context: Can be called from interrupt or base context.
1868  */
1869 static ibt_status_t
1870 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1871     uint_t num_wr, uint_t *num_posted_p)
1872 {
1873 	hermon_state_t		*state;
1874 	hermon_qphdl_t		qphdl;
1875 	int			status;
1876 
1877 	ASSERT(wr_p != NULL);
1878 	ASSERT(num_wr != 0);
1879 
1880 	/* Check for valid HCA handle */
1881 	if (hca == NULL) {
1882 		return (IBT_HCA_HDL_INVALID);
1883 	}
1884 
1885 	/* Check for valid QP handle pointer */
1886 	if (qp == NULL) {
1887 		return (IBT_QP_HDL_INVALID);
1888 	}
1889 
1890 	/* Grab the Hermon softstate pointer and QP handle */
1891 	state = (hermon_state_t *)hca;
1892 	qphdl = (hermon_qphdl_t)qp;
1893 
1894 	/* Post the send WQEs */
1895 	status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1896 	return (status);
1897 }
1898 
1899 
1900 /*
1901  * hermon_ci_post_recv()
1902  *    Post receive work requests to the receive queue on the specified QP
1903  *    Context: Can be called from interrupt or base context.
1904  */
1905 static ibt_status_t
1906 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1907     uint_t num_wr, uint_t *num_posted_p)
1908 {
1909 	hermon_state_t		*state;
1910 	hermon_qphdl_t		qphdl;
1911 	int			status;
1912 
1913 	ASSERT(wr_p != NULL);
1914 	ASSERT(num_wr != 0);
1915 
1916 	state = (hermon_state_t *)hca;
1917 	qphdl = (hermon_qphdl_t)qp;
1918 
1919 	if (state == NULL) {
1920 		return (IBT_HCA_HDL_INVALID);
1921 	}
1922 
1923 	/* Check for valid QP handle pointer */
1924 	if (qphdl == NULL) {
1925 		return (IBT_QP_HDL_INVALID);
1926 	}
1927 
1928 	/* Post the receive WQEs */
1929 	status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1930 	return (status);
1931 }
1932 
1933 
1934 /*
1935  * hermon_ci_poll_cq()
1936  *    Poll for a work request completion
1937  *    Context: Can be called from interrupt or base context.
1938  */
1939 static ibt_status_t
1940 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1941     uint_t num_wc, uint_t *num_polled)
1942 {
1943 	hermon_state_t		*state;
1944 	hermon_cqhdl_t		cqhdl;
1945 	int			status;
1946 
1947 	ASSERT(wc_p != NULL);
1948 
1949 	/* Check for valid HCA handle */
1950 	if (hca == NULL) {
1951 		return (IBT_HCA_HDL_INVALID);
1952 	}
1953 
1954 	/* Check for valid CQ handle pointer */
1955 	if (cq == NULL) {
1956 		return (IBT_CQ_HDL_INVALID);
1957 	}
1958 
1959 	/* Check for valid num_wc field */
1960 	if (num_wc == 0) {
1961 		return (IBT_INVALID_PARAM);
1962 	}
1963 
1964 	/* Grab the Hermon softstate pointer and CQ handle */
1965 	state = (hermon_state_t *)hca;
1966 	cqhdl = (hermon_cqhdl_t)cq;
1967 
1968 	/* Poll for work request completions */
1969 	status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1970 	return (status);
1971 }
1972 
1973 
1974 /*
1975  * hermon_ci_notify_cq()
1976  *    Enable notification events on the specified CQ
1977  *    Context: Can be called from interrupt or base context.
1978  */
1979 static ibt_status_t
1980 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1981     ibt_cq_notify_flags_t flags)
1982 {
1983 	hermon_state_t		*state;
1984 	hermon_cqhdl_t		cqhdl;
1985 	int			status;
1986 
1987 	/* Check for valid HCA handle */
1988 	if (hca == NULL) {
1989 		return (IBT_HCA_HDL_INVALID);
1990 	}
1991 
1992 	/* Check for valid CQ handle pointer */
1993 	if (cq_hdl == NULL) {
1994 		return (IBT_CQ_HDL_INVALID);
1995 	}
1996 
1997 	/* Grab the Hermon softstate pointer and CQ handle */
1998 	state = (hermon_state_t *)hca;
1999 	cqhdl = (hermon_cqhdl_t)cq_hdl;
2000 
2001 	/* Enable the CQ notification */
2002 	status = hermon_cq_notify(state, cqhdl, flags);
2003 	return (status);
2004 }
2005 
2006 /*
2007  * hermon_ci_ci_data_in()
2008  *    Exchange CI-specific data.
2009  *    Context: Can be called only from user or kernel context.
2010  */
2011 static ibt_status_t
2012 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2013     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2014     size_t data_sz)
2015 {
2016 	hermon_state_t		*state;
2017 	int			status;
2018 
2019 	/* Check for valid HCA handle */
2020 	if (hca == NULL) {
2021 		return (IBT_HCA_HDL_INVALID);
2022 	}
2023 
2024 	/* Grab the Hermon softstate pointer */
2025 	state = (hermon_state_t *)hca;
2026 
2027 	/* Get the Hermon userland mapping information */
2028 	status = hermon_umap_ci_data_in(state, flags, object,
2029 	    ibc_object_handle, data_p, data_sz);
2030 	return (status);
2031 }
2032 
2033 /*
2034  * hermon_ci_ci_data_out()
2035  *    Exchange CI-specific data.
2036  *    Context: Can be called only from user or kernel context.
2037  */
2038 static ibt_status_t
2039 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2040     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2041     size_t data_sz)
2042 {
2043 	hermon_state_t		*state;
2044 	int			status;
2045 
2046 	/* Check for valid HCA handle */
2047 	if (hca == NULL) {
2048 		return (IBT_HCA_HDL_INVALID);
2049 	}
2050 
2051 	/* Grab the Hermon softstate pointer */
2052 	state = (hermon_state_t *)hca;
2053 
2054 	/* Get the Hermon userland mapping information */
2055 	status = hermon_umap_ci_data_out(state, flags, object,
2056 	    ibc_object_handle, data_p, data_sz);
2057 	return (status);
2058 }
2059 
2060 
2061 /*
2062  * hermon_ci_alloc_srq()
2063  *    Allocate a Shared Receive Queue (SRQ)
2064  *    Context: Can be called only from user or kernel context
2065  */
2066 static ibt_status_t
2067 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2068     ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2069     ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2070 {
2071 	hermon_state_t		*state;
2072 	hermon_pdhdl_t		pdhdl;
2073 	hermon_srqhdl_t		srqhdl;
2074 	hermon_srq_info_t	srqinfo;
2075 	int			status;
2076 
2077 	/* Check for valid HCA handle */
2078 	if (hca == NULL) {
2079 		return (IBT_HCA_HDL_INVALID);
2080 	}
2081 
2082 	state = (hermon_state_t *)hca;
2083 
2084 	/* Check for valid PD handle pointer */
2085 	if (pd == NULL) {
2086 		return (IBT_PD_HDL_INVALID);
2087 	}
2088 
2089 	pdhdl = (hermon_pdhdl_t)pd;
2090 
2091 	srqinfo.srqi_ibt_srqhdl = ibt_srq;
2092 	srqinfo.srqi_pd		= pdhdl;
2093 	srqinfo.srqi_sizes	= sizes;
2094 	srqinfo.srqi_real_sizes	= ret_sizes_p;
2095 	srqinfo.srqi_srqhdl	= &srqhdl;
2096 	srqinfo.srqi_flags	= flags;
2097 
2098 	status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
2099 	if (status != DDI_SUCCESS) {
2100 		return (status);
2101 	}
2102 
2103 	*ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2104 
2105 	return (IBT_SUCCESS);
2106 }
2107 
2108 /*
2109  * hermon_ci_free_srq()
2110  *    Free a Shared Receive Queue (SRQ)
2111  *    Context: Can be called only from user or kernel context
2112  */
2113 static ibt_status_t
2114 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2115 {
2116 	hermon_state_t	*state;
2117 	hermon_srqhdl_t	srqhdl;
2118 	int		status;
2119 
2120 	/* Check for valid HCA handle */
2121 	if (hca == NULL) {
2122 		return (IBT_HCA_HDL_INVALID);
2123 	}
2124 
2125 	state = (hermon_state_t *)hca;
2126 
2127 	/* Check for valid SRQ handle pointer */
2128 	if (srq == NULL) {
2129 		return (IBT_SRQ_HDL_INVALID);
2130 	}
2131 
2132 	srqhdl = (hermon_srqhdl_t)srq;
2133 
2134 	/* Free the SRQ */
2135 	status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
2136 	return (status);
2137 }
2138 
2139 /*
2140  * hermon_ci_query_srq()
2141  *    Query properties of a Shared Receive Queue (SRQ)
2142  *    Context: Can be called from interrupt or base context.
2143  */
2144 static ibt_status_t
2145 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2146     ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2147 {
2148 	hermon_srqhdl_t	srqhdl;
2149 
2150 	/* Check for valid HCA handle */
2151 	if (hca == NULL) {
2152 		return (IBT_HCA_HDL_INVALID);
2153 	}
2154 
2155 	/* Check for valid SRQ handle pointer */
2156 	if (srq == NULL) {
2157 		return (IBT_SRQ_HDL_INVALID);
2158 	}
2159 
2160 	srqhdl = (hermon_srqhdl_t)srq;
2161 
2162 	mutex_enter(&srqhdl->srq_lock);
2163 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2164 		mutex_exit(&srqhdl->srq_lock);
2165 		return (IBT_SRQ_ERROR_STATE);
2166 	}
2167 
2168 	*pd_p   = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2169 	sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
2170 	sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2171 	mutex_exit(&srqhdl->srq_lock);
2172 	*limit_p  = 0;
2173 
2174 	return (IBT_SUCCESS);
2175 }
2176 
2177 /*
2178  * hermon_ci_modify_srq()
2179  *    Modify properties of a Shared Receive Queue (SRQ)
2180  *    Context: Can be called from interrupt or base context.
2181  */
2182 /* ARGSUSED */
2183 static ibt_status_t
2184 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2185     ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2186 {
2187 	hermon_state_t	*state;
2188 	hermon_srqhdl_t	srqhdl;
2189 	uint_t		resize_supported, cur_srq_size;
2190 	int		status;
2191 
2192 	/* Check for valid HCA handle */
2193 	if (hca == NULL) {
2194 		return (IBT_HCA_HDL_INVALID);
2195 	}
2196 
2197 	state = (hermon_state_t *)hca;
2198 
2199 	/* Check for valid SRQ handle pointer */
2200 	if (srq == NULL) {
2201 		return (IBT_SRQ_HDL_INVALID);
2202 	}
2203 
2204 	srqhdl = (hermon_srqhdl_t)srq;
2205 
2206 	/*
2207 	 * Check Error State of SRQ.
2208 	 * Also, while we are holding the lock we save away the current SRQ
2209 	 * size for later use.
2210 	 */
2211 	mutex_enter(&srqhdl->srq_lock);
2212 	cur_srq_size = srqhdl->srq_wq_bufsz;
2213 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2214 		mutex_exit(&srqhdl->srq_lock);
2215 		return (IBT_SRQ_ERROR_STATE);
2216 	}
2217 	mutex_exit(&srqhdl->srq_lock);
2218 
2219 	/*
2220 	 * Setting the limit watermark is not currently supported.  This is a
2221 	 * hermon hardware (firmware) limitation.  We return NOT_SUPPORTED here,
2222 	 * and have the limit code commented out for now.
2223 	 *
2224 	 * XXX If we enable the limit watermark support, we need to do checks
2225 	 * and set the 'srq->srq_wr_limit' here, instead of returning not
2226 	 * supported.  The 'hermon_srq_modify' operation below is for resizing
2227 	 * the SRQ only, the limit work should be done here.  If this is
2228 	 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2229 	 * function should also be removed at that time.
2230 	 */
2231 	if (flags & IBT_SRQ_SET_LIMIT) {
2232 		return (IBT_NOT_SUPPORTED);
2233 	}
2234 
2235 	/*
2236 	 * Check the SET_SIZE flag.  If not set, we simply return success here.
2237 	 * However if it is set, we check if resize is supported and only then
2238 	 * do we continue on with our resize processing.
2239 	 */
2240 	if (!(flags & IBT_SRQ_SET_SIZE)) {
2241 		return (IBT_SUCCESS);
2242 	}
2243 
2244 	resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
2245 	    IBT_HCA_RESIZE_SRQ;
2246 
2247 	if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2248 		return (IBT_NOT_SUPPORTED);
2249 	}
2250 
2251 	/*
2252 	 * We do not support resizing an SRQ to be smaller than it's current
2253 	 * size.  If a smaller (or equal) size is requested, then we simply
2254 	 * return success, and do nothing.
2255 	 */
2256 	if (size <= cur_srq_size) {
2257 		*ret_size_p = cur_srq_size;
2258 		return (IBT_SUCCESS);
2259 	}
2260 
2261 	status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2262 	    HERMON_NOSLEEP);
2263 	if (status != DDI_SUCCESS) {
2264 		/* Set return value to current SRQ size */
2265 		*ret_size_p = cur_srq_size;
2266 		return (status);
2267 	}
2268 
2269 	return (IBT_SUCCESS);
2270 }
2271 
2272 /*
2273  * hermon_ci_post_srq()
2274  *    Post a Work Request to the specified Shared Receive Queue (SRQ)
2275  *    Context: Can be called from interrupt or base context.
2276  */
2277 static ibt_status_t
2278 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2279     ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2280 {
2281 	hermon_state_t	*state;
2282 	hermon_srqhdl_t	srqhdl;
2283 	int		status;
2284 
2285 	/* Check for valid HCA handle */
2286 	if (hca == NULL) {
2287 		return (IBT_HCA_HDL_INVALID);
2288 	}
2289 
2290 	state = (hermon_state_t *)hca;
2291 
2292 	/* Check for valid SRQ handle pointer */
2293 	if (srq == NULL) {
2294 		return (IBT_SRQ_HDL_INVALID);
2295 	}
2296 
2297 	srqhdl = (hermon_srqhdl_t)srq;
2298 
2299 	status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2300 	return (status);
2301 }
2302 
2303 /* Address translation */
2304 
2305 struct ibc_ma_s {
2306 	int			h_ma_addr_list_len;
2307 	void			*h_ma_addr_list;
2308 	ddi_dma_handle_t	h_ma_dmahdl;
2309 	ddi_dma_handle_t	h_ma_list_hdl;
2310 	ddi_acc_handle_t	h_ma_list_acc_hdl;
2311 	size_t			h_ma_real_len;
2312 	caddr_t			h_ma_kaddr;
2313 	ibt_phys_addr_t		h_ma_list_cookie;
2314 };
2315 
2316 static ibt_status_t
2317 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2318     uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2319 {
2320 	int			status;
2321 	ibt_status_t		ibt_status;
2322 	ibc_ma_hdl_t		ma_hdl;
2323 	ib_memlen_t		len;
2324 	ddi_dma_attr_t		dma_attr;
2325 	uint_t			cookie_cnt;
2326 	ddi_dma_cookie_t	dmacookie;
2327 	hermon_state_t		*state;
2328 	uint64_t		*kaddr;
2329 	uint64_t		addr, endaddr, pagesize;
2330 	int			i, kmflag;
2331 	int			(*callback)(caddr_t);
2332 
2333 	if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2334 		return (IBT_NOT_SUPPORTED);	/* XXX - not yet implemented */
2335 	}
2336 
2337 	state = (hermon_state_t *)hca;
2338 	hermon_dma_attr_init(state, &dma_attr);
2339 	if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2340 		kmflag = KM_NOSLEEP;
2341 		callback = DDI_DMA_DONTWAIT;
2342 	} else {
2343 		kmflag = KM_SLEEP;
2344 		callback = DDI_DMA_SLEEP;
2345 	}
2346 
2347 	ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2348 	if (ma_hdl == NULL) {
2349 		return (IBT_INSUFF_RESOURCE);
2350 	}
2351 #ifdef	__sparc
2352 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2353 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2354 
2355 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2356 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2357 #endif
2358 
2359 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2360 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2361 	    callback, NULL, &ma_hdl->h_ma_dmahdl);
2362 	if (status != DDI_SUCCESS) {
2363 		kmem_free(ma_hdl, sizeof (*ma_hdl));
2364 		return (IBT_INSUFF_RESOURCE);
2365 	}
2366 	status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2367 	    va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2368 	    callback, NULL, &dmacookie, &cookie_cnt);
2369 	if (status != DDI_DMA_MAPPED) {
2370 		status = ibc_get_ci_failure(0);
2371 		goto marea_fail3;
2372 	}
2373 
2374 	ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2375 	ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2376 	if (ma_hdl->h_ma_kaddr == NULL) {
2377 		ibt_status = IBT_INSUFF_RESOURCE;
2378 		goto marea_fail4;
2379 	}
2380 
2381 	i = 0;
2382 	len = 0;
2383 	pagesize = PAGESIZE;
2384 	kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2385 	while (cookie_cnt-- > 0) {
2386 		addr	= dmacookie.dmac_laddress;
2387 		len	+= dmacookie.dmac_size;
2388 		endaddr	= addr + (dmacookie.dmac_size - 1);
2389 		addr	= addr & ~(pagesize - 1);
2390 		while (addr <= endaddr) {
2391 			if (i >= list_len) {
2392 				status = IBT_PBL_TOO_SMALL;
2393 				goto marea_fail5;
2394 			}
2395 			kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2396 			i++;
2397 			addr += pagesize;
2398 			if (addr == 0) {
2399 				static int do_once = 1;
2400 				_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2401 				    do_once))
2402 				if (do_once) {
2403 					do_once = 0;
2404 					cmn_err(CE_NOTE, "probable error in "
2405 					    "dma_cookie address: map_mem_area");
2406 				}
2407 				break;
2408 			}
2409 		}
2410 		if (cookie_cnt != 0)
2411 			ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2412 	}
2413 
2414 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2415 	pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2416 	pmr->pmr_iova = va_attrs->va_vaddr;
2417 	pmr->pmr_len = len;
2418 	pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2419 	pmr->pmr_buf_sz = PAGESHIFT;	/* PRM says "Page Sice", but... */
2420 	pmr->pmr_num_buf = i;
2421 	pmr->pmr_ma = ma_hdl;
2422 
2423 	*ma_hdl_p = ma_hdl;
2424 	return (IBT_SUCCESS);
2425 
2426 marea_fail5:
2427 	kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2428 marea_fail4:
2429 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2430 marea_fail3:
2431 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2432 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2433 	*ma_hdl_p = NULL;
2434 	return (ibt_status);
2435 }
2436 
2437 /*
2438  * hermon_ci_map_mem_area()
2439  *    Context: Can be called from interrupt or base context.
2440  *
2441  *	Creates the memory mapping suitable for a subsequent posting of an
2442  *	FRWR work request.  All the info about the memory area for the
2443  *	FRWR work request (wr member of "union ibt_reg_req_u") is filled
2444  *	such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2445  *	and then fill in the additional information only it knows.
2446  *
2447  *	Alternatively, creates the memory mapping for FMR.
2448  */
2449 /* ARGSUSED */
2450 static ibt_status_t
2451 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2452     void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2453     ibc_ma_hdl_t *ma_hdl_p)
2454 {
2455 	ibt_status_t		ibt_status;
2456 	int			status;
2457 	ibc_ma_hdl_t		ma_hdl;
2458 	ibt_wr_reg_pmr_t	*pmr;
2459 	ib_memlen_t		len;
2460 	ddi_dma_attr_t		dma_attr;
2461 	ddi_dma_handle_t	khdl;
2462 	uint_t			cookie_cnt;
2463 	ddi_dma_cookie_t	dmacookie, kcookie;
2464 	hermon_state_t		*state;
2465 	uint64_t		*kaddr;
2466 	uint64_t		addr, endaddr, pagesize, kcookie_paddr;
2467 	int			i, j, kmflag;
2468 	int			(*callback)(caddr_t);
2469 
2470 	if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2471 		/* delegate FMR and Physical Register to other function */
2472 		return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2473 		    &reg_req->fn_arg, ma_hdl_p));
2474 	}
2475 
2476 	/* FRWR */
2477 
2478 	state = (hermon_state_t *)hca;
2479 	hermon_dma_attr_init(state, &dma_attr);
2480 #ifdef	__sparc
2481 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2482 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2483 
2484 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2485 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2486 #endif
2487 	if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2488 		kmflag = KM_NOSLEEP;
2489 		callback = DDI_DMA_DONTWAIT;
2490 	} else {
2491 		kmflag = KM_SLEEP;
2492 		callback = DDI_DMA_SLEEP;
2493 	}
2494 
2495 	ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2496 	if (ma_hdl == NULL) {
2497 		return (IBT_INSUFF_RESOURCE);
2498 	}
2499 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2500 
2501 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2502 	    callback, NULL, &ma_hdl->h_ma_dmahdl);
2503 	if (status != DDI_SUCCESS) {
2504 		ibt_status = IBT_INSUFF_RESOURCE;
2505 		goto marea_fail0;
2506 	}
2507 	dma_attr.dma_attr_align = 64;	/* as per PRM */
2508 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2509 	    callback, NULL, &ma_hdl->h_ma_list_hdl);
2510 	if (status != DDI_SUCCESS) {
2511 		ibt_status = IBT_INSUFF_RESOURCE;
2512 		goto marea_fail1;
2513 	}
2514 	/*
2515 	 * Entries in the list in the last slot on each page cannot be used,
2516 	 * so 1 extra ibt_phys_addr_t is allocated per page.  We add 1 more
2517 	 * to deal with the possibility of a less than 1 page allocation
2518 	 * across a page boundary.
2519 	 */
2520 	status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2521 	    list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2522 	    sizeof (ibt_phys_addr_t),
2523 	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2524 	    &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2525 	    &ma_hdl->h_ma_list_acc_hdl);
2526 	if (status != DDI_SUCCESS) {
2527 		ibt_status = IBT_INSUFF_RESOURCE;
2528 		goto marea_fail2;
2529 	}
2530 	status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2531 	    ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2532 	    DDI_DMA_CONSISTENT, callback, NULL,
2533 	    &kcookie, &cookie_cnt);
2534 	if (status != DDI_SUCCESS) {
2535 		ibt_status = IBT_INSUFF_RESOURCE;
2536 		goto marea_fail3;
2537 	}
2538 	if ((kcookie.dmac_laddress & 0x3f) != 0) {
2539 		cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2540 		ibt_status = ibc_get_ci_failure(0);
2541 		goto marea_fail4;
2542 	}
2543 	ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2544 
2545 	if (va_attrs->va_flags & IBT_VA_BUF) {
2546 		status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2547 		    va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2548 		    callback, NULL, &dmacookie, &cookie_cnt);
2549 	} else {
2550 		status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2551 		    va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2552 		    va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2553 		    callback, NULL, &dmacookie, &cookie_cnt);
2554 	}
2555 	if (status != DDI_DMA_MAPPED) {
2556 		ibt_status = ibc_get_ci_failure(0);
2557 		goto marea_fail4;
2558 	}
2559 	i = 0;	/* count the number of pbl entries */
2560 	j = 0;	/* count the number of links to next HERMON_PAGE */
2561 	len = 0;
2562 	pagesize = PAGESIZE;
2563 	kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2564 	kcookie_paddr = kcookie.dmac_laddress + HERMON_PAGEMASK;
2565 	khdl = ma_hdl->h_ma_list_hdl;
2566 	while (cookie_cnt-- > 0) {
2567 		addr	= dmacookie.dmac_laddress;
2568 		len	+= dmacookie.dmac_size;
2569 		endaddr	= addr + (dmacookie.dmac_size - 1);
2570 		addr	= addr & ~(pagesize - 1);
2571 		while (addr <= endaddr) {
2572 			if (i >= list_len) {
2573 				ibt_status = IBT_PBL_TOO_SMALL;
2574 				goto marea_fail5;
2575 			}
2576 			/* Deal with last entry on page. */
2577 			if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2578 				if (kcookie.dmac_size > HERMON_PAGESIZE) {
2579 					kcookie_paddr += HERMON_PAGESIZE;
2580 					kcookie.dmac_size -= HERMON_PAGESIZE;
2581 				} else {
2582 					ddi_dma_nextcookie(khdl, &kcookie);
2583 					kcookie_paddr = kcookie.dmac_laddress;
2584 				}
2585 				kaddr[i+j] = htonll(kcookie_paddr);
2586 				j++;
2587 			}
2588 			kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2589 			i++;
2590 			addr += pagesize;
2591 			if (addr == 0) {
2592 				static int do_once = 1;
2593 				_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2594 				    do_once))
2595 				if (do_once) {
2596 					do_once = 0;
2597 					cmn_err(CE_NOTE, "probable error in "
2598 					    "dma_cookie address: map_mem_area");
2599 				}
2600 				break;
2601 			}
2602 		}
2603 		if (cookie_cnt != 0)
2604 			ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2605 	}
2606 
2607 	pmr = &reg_req->wr;
2608 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2609 	pmr->pmr_len = len;
2610 	pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2611 	pmr->pmr_buf_sz = PAGESHIFT;	/* PRM says "Page Size", but... */
2612 	pmr->pmr_num_buf = i;
2613 	pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2614 
2615 	*ma_hdl_p = ma_hdl;
2616 	return (IBT_SUCCESS);
2617 
2618 marea_fail5:
2619 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2620 	if (status != DDI_SUCCESS)
2621 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2622 marea_fail4:
2623 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2624 	if (status != DDI_SUCCESS)
2625 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2626 marea_fail3:
2627 	ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2628 marea_fail2:
2629 	ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2630 marea_fail1:
2631 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2632 marea_fail0:
2633 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2634 	*ma_hdl_p = NULL;
2635 	return (ibt_status);
2636 }
2637 
2638 /*
2639  * hermon_ci_unmap_mem_area()
2640  * Unmap the memory area
2641  *    Context: Can be called from interrupt or base context.
2642  */
2643 /* ARGSUSED */
2644 static ibt_status_t
2645 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2646 {
2647 	int			status;
2648 	hermon_state_t		*state;
2649 
2650 	if (ma_hdl == NULL) {
2651 		return (IBT_MA_HDL_INVALID);
2652 	}
2653 	state = (hermon_state_t *)hca;
2654 	if (ma_hdl->h_ma_list_hdl != NULL) {
2655 		status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2656 		if (status != DDI_SUCCESS)
2657 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2658 		ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2659 		ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2660 	} else {
2661 		kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2662 	}
2663 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2664 	if (status != DDI_SUCCESS)
2665 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2666 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2667 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2668 	return (IBT_SUCCESS);
2669 }
2670 
2671 struct ibc_mi_s {
2672 	int			imh_len;
2673 	ddi_dma_handle_t	imh_dmahandle[1];
2674 };
2675 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2676     ibc_mi_s::imh_len
2677     ibc_mi_s::imh_dmahandle))
2678 
2679 
2680 /*
2681  * hermon_ci_map_mem_iov()
2682  * Map the memory
2683  *    Context: Can be called from interrupt or base context.
2684  */
2685 /* ARGSUSED */
2686 static ibt_status_t
2687 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2688     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2689 {
2690 	int			status;
2691 	int			i, j, nds, max_nds;
2692 	uint_t			len;
2693 	ibt_status_t		ibt_status;
2694 	ddi_dma_handle_t	dmahdl;
2695 	ddi_dma_cookie_t	dmacookie;
2696 	ddi_dma_attr_t		dma_attr;
2697 	uint_t			cookie_cnt;
2698 	ibc_mi_hdl_t		mi_hdl;
2699 	ibt_lkey_t		rsvd_lkey;
2700 	ibt_wr_ds_t		*sgl;
2701 	hermon_state_t		*state;
2702 	int			kmflag;
2703 	int			(*callback)(caddr_t);
2704 
2705 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2706 
2707 	if (mi_hdl_p == NULL)
2708 		return (IBT_MI_HDL_INVALID);
2709 
2710 	/* Check for valid HCA handle */
2711 	if (hca == NULL)
2712 		return (IBT_HCA_HDL_INVALID);
2713 
2714 	state = (hermon_state_t *)hca;
2715 	hermon_dma_attr_init(state, &dma_attr);
2716 #ifdef	__sparc
2717 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2718 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2719 
2720 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2721 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2722 #endif
2723 
2724 	nds = 0;
2725 	max_nds = iov_attr->iov_wr_nds;
2726 	if (iov_attr->iov_lso_hdr_sz)
2727 		max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2728 		    0xf) >> 4;	/* 0xf is for rounding up to a multiple of 16 */
2729 	rsvd_lkey = state->hs_devlim.rsv_lkey;
2730 	if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2731 		kmflag = KM_SLEEP;
2732 		callback = DDI_DMA_SLEEP;
2733 	} else {
2734 		kmflag = KM_NOSLEEP;
2735 		callback = DDI_DMA_DONTWAIT;
2736 	}
2737 
2738 	if (iov_attr->iov_flags & IBT_IOV_BUF) {
2739 		mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2740 		if (mi_hdl == NULL)
2741 			return (IBT_INSUFF_RESOURCE);
2742 		sgl = wr->send.wr_sgl;
2743 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2744 
2745 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2746 		    callback, NULL, &dmahdl);
2747 		if (status != DDI_SUCCESS) {
2748 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2749 			return (IBT_INSUFF_RESOURCE);
2750 		}
2751 		status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2752 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2753 		    &dmacookie, &cookie_cnt);
2754 		if (status != DDI_DMA_MAPPED) {
2755 			ddi_dma_free_handle(&dmahdl);
2756 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2757 			return (ibc_get_ci_failure(0));
2758 		}
2759 		while (cookie_cnt-- > 0) {
2760 			if (nds > max_nds) {
2761 				status = ddi_dma_unbind_handle(dmahdl);
2762 				if (status != DDI_SUCCESS)
2763 					HERMON_WARNING(state, "failed to "
2764 					    "unbind DMA mapping");
2765 				ddi_dma_free_handle(&dmahdl);
2766 				return (IBT_SGL_TOO_SMALL);
2767 			}
2768 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2769 			sgl[nds].ds_key = rsvd_lkey;
2770 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2771 			nds++;
2772 			if (cookie_cnt != 0)
2773 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2774 		}
2775 		wr->send.wr_nds = nds;
2776 		mi_hdl->imh_len = 1;
2777 		mi_hdl->imh_dmahandle[0] = dmahdl;
2778 		*mi_hdl_p = mi_hdl;
2779 		return (IBT_SUCCESS);
2780 	}
2781 
2782 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2783 		sgl = wr->recv.wr_sgl;
2784 	else
2785 		sgl = wr->send.wr_sgl;
2786 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2787 
2788 	len = iov_attr->iov_list_len;
2789 	for (i = 0, j = 0; j < len; j++) {
2790 		if (iov_attr->iov[j].iov_len == 0)
2791 			continue;
2792 		i++;
2793 	}
2794 	mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2795 	    (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2796 	if (mi_hdl == NULL)
2797 		return (IBT_INSUFF_RESOURCE);
2798 	mi_hdl->imh_len = i;
2799 	for (i = 0, j = 0; j < len; j++) {
2800 		if (iov_attr->iov[j].iov_len == 0)
2801 			continue;
2802 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2803 		    callback, NULL, &dmahdl);
2804 		if (status != DDI_SUCCESS) {
2805 			ibt_status = IBT_INSUFF_RESOURCE;
2806 			goto fail2;
2807 		}
2808 		status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2809 		    iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2810 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2811 		    &dmacookie, &cookie_cnt);
2812 		if (status != DDI_DMA_MAPPED) {
2813 			ibt_status = ibc_get_ci_failure(0);
2814 			goto fail1;
2815 		}
2816 		if (nds + cookie_cnt > max_nds) {
2817 			ibt_status = IBT_SGL_TOO_SMALL;
2818 			goto fail2;
2819 		}
2820 		while (cookie_cnt-- > 0) {
2821 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2822 			sgl[nds].ds_key = rsvd_lkey;
2823 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2824 			nds++;
2825 			if (cookie_cnt != 0)
2826 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2827 		}
2828 		mi_hdl->imh_dmahandle[i] = dmahdl;
2829 		i++;
2830 	}
2831 
2832 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2833 		wr->recv.wr_nds = nds;
2834 	else
2835 		wr->send.wr_nds = nds;
2836 	*mi_hdl_p = mi_hdl;
2837 	return (IBT_SUCCESS);
2838 
2839 fail1:
2840 	ddi_dma_free_handle(&dmahdl);
2841 fail2:
2842 	while (--i >= 0) {
2843 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2844 		if (status != DDI_SUCCESS)
2845 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2846 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2847 	}
2848 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2849 	    (len - 1) * sizeof (ddi_dma_handle_t));
2850 	*mi_hdl_p = NULL;
2851 	return (ibt_status);
2852 }
2853 
2854 /*
2855  * hermon_ci_unmap_mem_iov()
2856  * Unmap the memory
2857  *    Context: Can be called from interrupt or base context.
2858  */
2859 static ibt_status_t
2860 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2861 {
2862 	int		status, i;
2863 	hermon_state_t	*state;
2864 
2865 	state = (hermon_state_t *)hca;
2866 
2867 	for (i = mi_hdl->imh_len; --i >= 0; ) {
2868 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2869 		if (status != DDI_SUCCESS)
2870 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2871 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2872 	}
2873 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2874 	    (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2875 	return (IBT_SUCCESS);
2876 }
2877 
2878 /* Allocate L_Key */
2879 /*
2880  * hermon_ci_alloc_lkey()
2881  */
2882 /* ARGSUSED */
2883 static ibt_status_t
2884 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2885     ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2886     ibt_pmr_desc_t *mem_desc_p)
2887 {
2888 	return (IBT_NOT_SUPPORTED);
2889 }
2890 
2891 /* Physical Register Memory Region */
2892 /*
2893  * hermon_ci_register_physical_mr()
2894  */
2895 /* ARGSUSED */
2896 static ibt_status_t
2897 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2898     ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2899     ibt_pmr_desc_t *mem_desc_p)
2900 {
2901 	return (IBT_NOT_SUPPORTED);
2902 }
2903 
2904 /*
2905  * hermon_ci_reregister_physical_mr()
2906  */
2907 /* ARGSUSED */
2908 static ibt_status_t
2909 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2910     ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2911     ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2912 {
2913 	return (IBT_NOT_SUPPORTED);
2914 }
2915 
2916 /* Mellanox FMR Support */
2917 /*
2918  * hermon_ci_create_fmr_pool()
2919  * Creates a pool of memory regions suitable for FMR registration
2920  *    Context: Can be called from base context only
2921  */
2922 static ibt_status_t
2923 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2924     ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2925 {
2926 	hermon_state_t	*state;
2927 	hermon_pdhdl_t	pdhdl;
2928 	hermon_fmrhdl_t	fmrpoolhdl;
2929 	int		status;
2930 
2931 	/* Check for valid HCA handle */
2932 	if (hca == NULL) {
2933 		return (IBT_HCA_HDL_INVALID);
2934 	}
2935 
2936 	state = (hermon_state_t *)hca;
2937 
2938 	/* Check for valid PD handle pointer */
2939 	if (pd == NULL) {
2940 		return (IBT_PD_HDL_INVALID);
2941 	}
2942 
2943 	pdhdl = (hermon_pdhdl_t)pd;
2944 
2945 	/*
2946 	 * Validate the access flags.  Both Remote Write and Remote Atomic
2947 	 * require the Local Write flag to be set
2948 	 */
2949 	if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2950 	    (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2951 	    !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2952 		return (IBT_MR_ACCESS_REQ_INVALID);
2953 	}
2954 
2955 	status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2956 	if (status != DDI_SUCCESS) {
2957 		return (status);
2958 	}
2959 
2960 	/* Set fmr_pool from hermon handle */
2961 	*fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2962 
2963 	return (IBT_SUCCESS);
2964 }
2965 
2966 /*
2967  * hermon_ci_destroy_fmr_pool()
2968  * Free all resources associated with an FMR pool.
2969  *    Context: Can be called from base context only.
2970  */
2971 static ibt_status_t
2972 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2973 {
2974 	hermon_state_t	*state;
2975 	hermon_fmrhdl_t	fmrpoolhdl;
2976 	int		status;
2977 
2978 	/* Check for valid HCA handle */
2979 	if (hca == NULL) {
2980 		return (IBT_HCA_HDL_INVALID);
2981 	}
2982 
2983 	state = (hermon_state_t *)hca;
2984 
2985 	/* Check for valid FMR Pool handle */
2986 	if (fmr_pool == NULL) {
2987 		return (IBT_FMR_POOL_HDL_INVALID);
2988 	}
2989 
2990 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2991 
2992 	status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2993 	return (status);
2994 }
2995 
2996 /*
2997  * hermon_ci_flush_fmr_pool()
2998  * Force a flush of the memory tables, cleaning up used FMR resources.
2999  *    Context: Can be called from interrupt or base context.
3000  */
3001 static ibt_status_t
3002 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3003 {
3004 	hermon_state_t	*state;
3005 	hermon_fmrhdl_t	fmrpoolhdl;
3006 	int		status;
3007 
3008 	/* Check for valid HCA handle */
3009 	if (hca == NULL) {
3010 		return (IBT_HCA_HDL_INVALID);
3011 	}
3012 
3013 	state = (hermon_state_t *)hca;
3014 
3015 	/* Check for valid FMR Pool handle */
3016 	if (fmr_pool == NULL) {
3017 		return (IBT_FMR_POOL_HDL_INVALID);
3018 	}
3019 
3020 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
3021 
3022 	status = hermon_flush_fmr_pool(state, fmrpoolhdl);
3023 	return (status);
3024 }
3025 
3026 /*
3027  * hermon_ci_register_physical_fmr()
3028  * From the 'pool' of FMR regions passed in, performs register physical
3029  * operation.
3030  *    Context: Can be called from interrupt or base context.
3031  */
3032 /* ARGSUSED */
3033 static ibt_status_t
3034 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
3035     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
3036     void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
3037 {
3038 	hermon_state_t		*state;
3039 	hermon_mrhdl_t		mrhdl;
3040 	hermon_fmrhdl_t		fmrpoolhdl;
3041 	int			status;
3042 
3043 	ASSERT(mem_pattr != NULL);
3044 	ASSERT(mr_p != NULL);
3045 	ASSERT(mem_desc_p != NULL);
3046 
3047 	/* Check for valid HCA handle */
3048 	if (hca == NULL) {
3049 		return (IBT_HCA_HDL_INVALID);
3050 	}
3051 
3052 	/* Grab the Hermon softstate pointer */
3053 	state = (hermon_state_t *)hca;
3054 
3055 	/* Check for valid FMR Pool handle */
3056 	if (fmr_pool == NULL) {
3057 		return (IBT_FMR_POOL_HDL_INVALID);
3058 	}
3059 
3060 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
3061 
3062 	status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
3063 	    &mrhdl, mem_desc_p);
3064 	if (status != DDI_SUCCESS) {
3065 		return (status);
3066 	}
3067 
3068 	/*
3069 	 * If region is mapped for streaming (i.e. noncoherent), then set
3070 	 * sync is required
3071 	 */
3072 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
3073 	mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
3074 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
3075 	if (mem_desc_p->pmd_sync_required == B_TRUE) {
3076 		/* Fill in DMA handle for future sync operations */
3077 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
3078 		mrhdl->mr_bindinfo.bi_dmahdl =
3079 		    (ddi_dma_handle_t)mem_pattr->pmr_ma;
3080 	}
3081 
3082 	/* Return the Hermon MR handle */
3083 	*mr_p = (ibc_mr_hdl_t)mrhdl;
3084 
3085 	return (IBT_SUCCESS);
3086 }
3087 
3088 /*
3089  * hermon_ci_deregister_fmr()
3090  * Moves an FMR (specified by 'mr') to the deregistered state.
3091  *    Context: Can be called from base context only.
3092  */
3093 static ibt_status_t
3094 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
3095 {
3096 	hermon_state_t		*state;
3097 	hermon_mrhdl_t		mrhdl;
3098 	int			status;
3099 
3100 	/* Check for valid HCA handle */
3101 	if (hca == NULL) {
3102 		return (IBT_HCA_HDL_INVALID);
3103 	}
3104 
3105 	/* Check for valid memory region handle */
3106 	if (mr == NULL) {
3107 		return (IBT_MR_HDL_INVALID);
3108 	}
3109 
3110 	/* Grab the Hermon softstate pointer */
3111 	state = (hermon_state_t *)hca;
3112 	mrhdl = (hermon_mrhdl_t)mr;
3113 
3114 	/*
3115 	 * Deregister the memory region, either "unmap" the FMR or deregister
3116 	 * the normal memory region.
3117 	 */
3118 	status = hermon_deregister_fmr(state, mrhdl);
3119 	return (status);
3120 }
3121 
3122 static int
3123 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
3124     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
3125 {
3126 	ddi_dma_handle_t	dma_hdl;
3127 	ddi_dma_attr_t		dma_attr;
3128 	ddi_acc_handle_t	acc_hdl;
3129 	size_t			real_len;
3130 	int			status;
3131 	int			(*ddi_cb)(caddr_t);
3132 	ibc_mem_alloc_hdl_t	mem_alloc_hdl;
3133 
3134 	hermon_dma_attr_init(state, &dma_attr);
3135 
3136 	ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3137 
3138 	/* Allocate a DMA handle */
3139 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
3140 	    NULL, &dma_hdl);
3141 	if (status != DDI_SUCCESS) {
3142 		return (DDI_FAILURE);
3143 	}
3144 
3145 	/* Allocate DMA memory */
3146 	status = ddi_dma_mem_alloc(dma_hdl, size,
3147 	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3148 	    NULL, kaddrp, &real_len, &acc_hdl);
3149 	if (status != DDI_SUCCESS) {
3150 		ddi_dma_free_handle(&dma_hdl);
3151 		return (DDI_FAILURE);
3152 	}
3153 
3154 	/* Package the hermon_dma_info contents and return */
3155 	mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
3156 	    (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
3157 	if (mem_alloc_hdl == NULL) {
3158 		ddi_dma_mem_free(&acc_hdl);
3159 		ddi_dma_free_handle(&dma_hdl);
3160 		return (DDI_FAILURE);
3161 	}
3162 	mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
3163 	mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
3164 
3165 	*mem_hdl = mem_alloc_hdl;
3166 
3167 	return (DDI_SUCCESS);
3168 }
3169 
3170 /*
3171  * hermon_ci_alloc_io_mem()
3172  *	Allocate dma-able memory
3173  *
3174  */
3175 static ibt_status_t
3176 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
3177     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
3178 {
3179 	hermon_state_t	*state;
3180 	int		status;
3181 
3182 	/* Check for valid HCA handle */
3183 	if (hca == NULL) {
3184 		return (IBT_HCA_HDL_INVALID);
3185 	}
3186 
3187 	/* Check for valid mem_alloc_hdl_p handle pointer */
3188 	if (mem_alloc_hdl_p == NULL) {
3189 		return (IBT_MEM_ALLOC_HDL_INVALID);
3190 	}
3191 
3192 	/* Grab the Hermon softstate pointer and mem handle */
3193 	state = (hermon_state_t *)hca;
3194 
3195 	/* Allocate the memory and handles */
3196 	status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
3197 	    mem_alloc_hdl_p);
3198 
3199 	if (status != DDI_SUCCESS) {
3200 		*mem_alloc_hdl_p = NULL;
3201 		*kaddrp = NULL;
3202 		return (status);
3203 	}
3204 
3205 	return (IBT_SUCCESS);
3206 }
3207 
3208 
3209 /*
3210  * hermon_ci_free_io_mem()
3211  * Unbind handl and free the memory
3212  */
3213 static ibt_status_t
3214 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
3215 {
3216 	/* Check for valid HCA handle */
3217 	if (hca == NULL) {
3218 		return (IBT_HCA_HDL_INVALID);
3219 	}
3220 
3221 	/* Check for valid mem_alloc_hdl handle pointer */
3222 	if (mem_alloc_hdl == NULL) {
3223 		return (IBT_MEM_ALLOC_HDL_INVALID);
3224 	}
3225 
3226 	/* Unbind the handles and free the memory */
3227 	(void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
3228 	ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
3229 	ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
3230 	kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
3231 
3232 	return (IBT_SUCCESS);
3233 }
3234