xref: /illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/hermon_ci.c (revision c7facc54c4abed9e554ff80225311e6b7048d3c9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * hermon_ci.c
29  *    Hermon Channel Interface (CI) Routines
30  *
31  *    Implements all the routines necessary to interface with the IBTF.
32  *    Pointers to all of these functions are passed to the IBTF at attach()
33  *    time in the ibc_operations_t structure.  These functions include all
34  *    of the necessary routines to implement the required InfiniBand "verbs"
35  *    and additional IBTF-specific interfaces.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/conf.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 
43 #include <sys/ib/adapters/hermon/hermon.h>
44 
45 extern uint32_t hermon_kernel_data_ro;
46 extern uint32_t hermon_user_data_ro;
47 
48 /* HCA and port related operations */
49 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
50     ibt_hca_portinfo_t *);
51 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
52     ibt_port_modify_flags_t, uint8_t);
53 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
54 
55 /* Protection Domains */
56 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
57     ibc_pd_hdl_t *);
58 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
59 
60 /* Reliable Datagram Domains */
61 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
62     ibc_rdd_hdl_t *);
63 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
64 
65 /* Address Handles */
66 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
67     ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
68 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
69 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
70     ibc_pd_hdl_t *, ibt_adds_vect_t *);
71 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
72     ibt_adds_vect_t *);
73 
74 /* Queue Pairs */
75 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
76     ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
77     ibc_qp_hdl_t *);
78 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
79     ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
80     ibt_chan_sizes_t *, ibc_qp_hdl_t *);
81 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
82     ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
83     ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
84 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
85     ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
86 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
87 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
88     ibt_qp_query_attr_t *);
89 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
90     ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
91 
92 /* Completion Queues */
93 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
94     ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
95 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
96 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
97     uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
98 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
99     uint_t, uint_t *);
100 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
101     uint_t, uint_t, ibt_cq_handler_id_t);
102 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
103     ibt_cq_sched_flags_t, ibc_cq_handler_attr_t *);
104 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibt_cq_handler_id_t);
105 
106 /* EE Contexts */
107 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
108     ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
109 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
110 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
111     ibt_eec_query_attr_t *);
112 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
113     ibt_cep_modify_flags_t, ibt_eec_info_t *);
114 
115 /* Memory Registration */
116 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
117     ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
118 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
119     ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
120 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
121     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
122     ibc_mr_hdl_t *, ibt_mr_desc_t *);
123 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
124 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
125     ibt_mr_query_attr_t *);
126 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
127     ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
128     ibt_mr_desc_t *);
129 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
130     ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
131     ibt_mr_desc_t *);
132 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
133 
134 /* Memory Windows */
135 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
136     ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
137 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
138 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
139     ibt_mw_query_attr_t *);
140 
141 /* Multicast Groups */
142 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
143     ib_gid_t, ib_lid_t);
144 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
145     ib_gid_t, ib_lid_t);
146 
147 /* Work Request and Completion Processing */
148 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
149     ibt_send_wr_t *, uint_t, uint_t *);
150 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
151     ibt_recv_wr_t *, uint_t, uint_t *);
152 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
153     ibt_wc_t *, uint_t, uint_t *);
154 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
155     ibt_cq_notify_flags_t);
156 
157 /* CI Object Private Data */
158 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
159     ibt_object_type_t, void *, void *, size_t);
160 
161 /* CI Object Private Data */
162 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
163     ibt_object_type_t, void *, void *, size_t);
164 
165 /* Shared Receive Queues */
166 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
167     ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
168     ibt_srq_sizes_t *);
169 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
170 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
171     ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
172 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
173     ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
174 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
175     ibt_recv_wr_t *, uint_t, uint_t *);
176 
177 /* Address translation */
178 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
179     void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
180 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
181 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
182     ibt_all_wr_t *, ibc_mi_hdl_t *);
183 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
184 
185 /* Allocate L_Key */
186 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
187     ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
188 
189 /* Physical Register Memory Region */
190 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
191     ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
192 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
193     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
194     ibt_pmr_desc_t *);
195 
196 /* Mellanox FMR */
197 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
198     ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
199     ibc_fmr_pool_hdl_t *fmr_pool);
200 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
201     ibc_fmr_pool_hdl_t fmr_pool);
202 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
203     ibc_fmr_pool_hdl_t fmr_pool);
204 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
205     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
206     void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
207 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
208     ibc_mr_hdl_t mr);
209 
210 /* Memory Allocation/Deallocation */
211 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
212     ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
213     ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
214 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
215     ibc_mem_alloc_hdl_t mem_alloc_hdl);
216 
217 /*
218  * This ibc_operations_t structure includes pointers to all the entry points
219  * provided by the Hermon driver.  This structure is passed to the IBTF at
220  * driver attach time, using the ibc_attach() call.
221  */
222 ibc_operations_t hermon_ibc_ops = {
223 	/* HCA and port related operations */
224 	hermon_ci_query_hca_ports,
225 	hermon_ci_modify_ports,
226 	hermon_ci_modify_system_image,
227 
228 	/* Protection Domains */
229 	hermon_ci_alloc_pd,
230 	hermon_ci_free_pd,
231 
232 	/* Reliable Datagram Domains */
233 	hermon_ci_alloc_rdd,
234 	hermon_ci_free_rdd,
235 
236 	/* Address Handles */
237 	hermon_ci_alloc_ah,
238 	hermon_ci_free_ah,
239 	hermon_ci_query_ah,
240 	hermon_ci_modify_ah,
241 
242 	/* Queue Pairs */
243 	hermon_ci_alloc_qp,
244 	hermon_ci_alloc_special_qp,
245 	hermon_ci_alloc_qp_range,
246 	hermon_ci_free_qp,
247 	hermon_ci_release_qpn,
248 	hermon_ci_query_qp,
249 	hermon_ci_modify_qp,
250 
251 	/* Completion Queues */
252 	hermon_ci_alloc_cq,
253 	hermon_ci_free_cq,
254 	hermon_ci_query_cq,
255 	hermon_ci_resize_cq,
256 	hermon_ci_modify_cq,
257 	hermon_ci_alloc_cq_sched,
258 	hermon_ci_free_cq_sched,
259 
260 	/* EE Contexts */
261 	hermon_ci_alloc_eec,
262 	hermon_ci_free_eec,
263 	hermon_ci_query_eec,
264 	hermon_ci_modify_eec,
265 
266 	/* Memory Registration */
267 	hermon_ci_register_mr,
268 	hermon_ci_register_buf,
269 	hermon_ci_register_shared_mr,
270 	hermon_ci_deregister_mr,
271 	hermon_ci_query_mr,
272 	hermon_ci_reregister_mr,
273 	hermon_ci_reregister_buf,
274 	hermon_ci_sync_mr,
275 
276 	/* Memory Windows */
277 	hermon_ci_alloc_mw,
278 	hermon_ci_free_mw,
279 	hermon_ci_query_mw,
280 
281 	/* Multicast Groups */
282 	hermon_ci_attach_mcg,
283 	hermon_ci_detach_mcg,
284 
285 	/* Work Request and Completion Processing */
286 	hermon_ci_post_send,
287 	hermon_ci_post_recv,
288 	hermon_ci_poll_cq,
289 	hermon_ci_notify_cq,
290 
291 	/* CI Object Mapping Data */
292 	hermon_ci_ci_data_in,
293 	hermon_ci_ci_data_out,
294 
295 	/* Shared Receive Queue */
296 	hermon_ci_alloc_srq,
297 	hermon_ci_free_srq,
298 	hermon_ci_query_srq,
299 	hermon_ci_modify_srq,
300 	hermon_ci_post_srq,
301 
302 	/* Address translation */
303 	hermon_ci_map_mem_area,
304 	hermon_ci_unmap_mem_area,
305 	hermon_ci_map_mem_iov,
306 	hermon_ci_unmap_mem_iov,
307 
308 	/* Allocate L_key */
309 	hermon_ci_alloc_lkey,
310 
311 	/* Physical Register Memory Region */
312 	hermon_ci_register_physical_mr,
313 	hermon_ci_reregister_physical_mr,
314 
315 	/* Mellanox FMR */
316 	hermon_ci_create_fmr_pool,
317 	hermon_ci_destroy_fmr_pool,
318 	hermon_ci_flush_fmr_pool,
319 	hermon_ci_register_physical_fmr,
320 	hermon_ci_deregister_fmr,
321 
322 	/* Memory allocation */
323 	hermon_ci_alloc_io_mem,
324 	hermon_ci_free_io_mem,
325 };
326 
327 
328 /*
329  * hermon_ci_query_hca_ports()
330  *    Returns HCA port attributes for either one or all of the HCA's ports.
331  *    Context: Can be called only from user or kernel context.
332  */
333 static ibt_status_t
334 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
335     ibt_hca_portinfo_t *info_p)
336 {
337 	hermon_state_t	*state;
338 	uint_t		start, end, port;
339 	int		status, indx;
340 
341 	/* Check for valid HCA handle */
342 	if (hca == NULL) {
343 		return (IBT_HCA_HDL_INVALID);
344 	}
345 
346 	/* Grab the Hermon softstate pointer */
347 	state = (hermon_state_t *)hca;
348 
349 	/*
350 	 * If the specified port is zero, then we are supposed to query all
351 	 * ports.  Otherwise, we query only the port number specified.
352 	 * Setup the start and end port numbers as appropriate for the loop
353 	 * below.  Note:  The first Hermon port is port number one (1).
354 	 */
355 	if (query_port == 0) {
356 		start = 1;
357 		end = start + (state->hs_cfg_profile->cp_num_ports - 1);
358 	} else {
359 		end = start = query_port;
360 	}
361 
362 	/* Query the port(s) */
363 	for (port = start, indx = 0; port <= end; port++, indx++) {
364 		status = hermon_port_query(state, port, &info_p[indx]);
365 		if (status != DDI_SUCCESS) {
366 			return (status);
367 		}
368 	}
369 	return (IBT_SUCCESS);
370 }
371 
372 
373 /*
374  * hermon_ci_modify_ports()
375  *    Modify HCA port attributes
376  *    Context: Can be called only from user or kernel context.
377  */
378 static ibt_status_t
379 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
380     ibt_port_modify_flags_t flags, uint8_t init_type)
381 {
382 	hermon_state_t	*state;
383 	int		status;
384 
385 	/* Check for valid HCA handle */
386 	if (hca == NULL) {
387 		return (IBT_HCA_HDL_INVALID);
388 	}
389 
390 	/* Grab the Hermon softstate pointer */
391 	state = (hermon_state_t *)hca;
392 
393 	/* Modify the port(s) */
394 	status = hermon_port_modify(state, port, flags, init_type);
395 	return (status);
396 }
397 
398 /*
399  * hermon_ci_modify_system_image()
400  *    Modify the System Image GUID
401  *    Context: Can be called only from user or kernel context.
402  */
403 /* ARGSUSED */
404 static ibt_status_t
405 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
406 {
407 	/*
408 	 * This is an unsupported interface for the Hermon driver.  This
409 	 * interface is necessary to support modification of the System
410 	 * Image GUID.  Hermon is only capable of modifying this parameter
411 	 * once (during driver initialization).
412 	 */
413 	return (IBT_NOT_SUPPORTED);
414 }
415 
416 /*
417  * hermon_ci_alloc_pd()
418  *    Allocate a Protection Domain
419  *    Context: Can be called only from user or kernel context.
420  */
421 /* ARGSUSED */
422 static ibt_status_t
423 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
424 {
425 	hermon_state_t	*state;
426 	hermon_pdhdl_t	pdhdl;
427 	int		status;
428 
429 	ASSERT(pd_p != NULL);
430 
431 	/* Check for valid HCA handle */
432 	if (hca == NULL) {
433 		return (IBT_HCA_HDL_INVALID);
434 	}
435 
436 	/* Grab the Hermon softstate pointer */
437 	state = (hermon_state_t *)hca;
438 
439 	/* Allocate the PD */
440 	status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
441 	if (status != DDI_SUCCESS) {
442 		return (status);
443 	}
444 
445 	/* Return the Hermon PD handle */
446 	*pd_p = (ibc_pd_hdl_t)pdhdl;
447 
448 	return (IBT_SUCCESS);
449 }
450 
451 
452 /*
453  * hermon_ci_free_pd()
454  *    Free a Protection Domain
455  *    Context: Can be called only from user or kernel context
456  */
457 static ibt_status_t
458 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
459 {
460 	hermon_state_t		*state;
461 	hermon_pdhdl_t		pdhdl;
462 	int			status;
463 
464 	/* Check for valid HCA handle */
465 	if (hca == NULL) {
466 		return (IBT_HCA_HDL_INVALID);
467 	}
468 
469 	/* Check for valid PD handle pointer */
470 	if (pd == NULL) {
471 		return (IBT_PD_HDL_INVALID);
472 	}
473 
474 	/* Grab the Hermon softstate pointer and PD handle */
475 	state = (hermon_state_t *)hca;
476 	pdhdl = (hermon_pdhdl_t)pd;
477 
478 	/* Free the PD */
479 	status = hermon_pd_free(state, &pdhdl);
480 	return (status);
481 }
482 
483 
484 /*
485  * hermon_ci_alloc_rdd()
486  *    Allocate a Reliable Datagram Domain
487  *    Context: Can be called only from user or kernel context.
488  */
489 /* ARGSUSED */
490 static ibt_status_t
491 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
492     ibc_rdd_hdl_t *rdd_p)
493 {
494 	/*
495 	 * This is an unsupported interface for the Hermon driver.  This
496 	 * interface is necessary to support Reliable Datagram (RD)
497 	 * operations.  Hermon does not support RD.
498 	 */
499 	return (IBT_NOT_SUPPORTED);
500 }
501 
502 
503 /*
504  * hermon_free_rdd()
505  *    Free a Reliable Datagram Domain
506  *    Context: Can be called only from user or kernel context.
507  */
508 /* ARGSUSED */
509 static ibt_status_t
510 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
511 {
512 	/*
513 	 * This is an unsupported interface for the Hermon driver.  This
514 	 * interface is necessary to support Reliable Datagram (RD)
515 	 * operations.  Hermon does not support RD.
516 	 */
517 	return (IBT_NOT_SUPPORTED);
518 }
519 
520 
521 /*
522  * hermon_ci_alloc_ah()
523  *    Allocate an Address Handle
524  *    Context: Can be called only from user or kernel context.
525  */
526 /* ARGSUSED */
527 static ibt_status_t
528 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
529     ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
530 {
531 	hermon_state_t	*state;
532 	hermon_ahhdl_t	ahhdl;
533 	hermon_pdhdl_t	pdhdl;
534 	int		status;
535 
536 	/* Check for valid HCA handle */
537 	if (hca == NULL) {
538 		return (IBT_HCA_HDL_INVALID);
539 	}
540 
541 	/* Check for valid PD handle pointer */
542 	if (pd == NULL) {
543 		return (IBT_PD_HDL_INVALID);
544 	}
545 
546 	/* Grab the Hermon softstate pointer and PD handle */
547 	state = (hermon_state_t *)hca;
548 	pdhdl = (hermon_pdhdl_t)pd;
549 
550 	/* Allocate the AH */
551 	status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
552 	if (status != DDI_SUCCESS) {
553 		return (status);
554 	}
555 
556 	/* Return the Hermon AH handle */
557 	*ah_p = (ibc_ah_hdl_t)ahhdl;
558 
559 	return (IBT_SUCCESS);
560 }
561 
562 
563 /*
564  * hermon_ci_free_ah()
565  *    Free an Address Handle
566  *    Context: Can be called only from user or kernel context.
567  */
568 static ibt_status_t
569 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
570 {
571 	hermon_state_t	*state;
572 	hermon_ahhdl_t	ahhdl;
573 	int		status;
574 
575 	/* Check for valid HCA handle */
576 	if (hca == NULL) {
577 		return (IBT_HCA_HDL_INVALID);
578 	}
579 
580 	/* Check for valid address handle pointer */
581 	if (ah == NULL) {
582 		return (IBT_AH_HDL_INVALID);
583 	}
584 
585 	/* Grab the Hermon softstate pointer and AH handle */
586 	state = (hermon_state_t *)hca;
587 	ahhdl = (hermon_ahhdl_t)ah;
588 
589 	/* Free the AH */
590 	status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
591 
592 	return (status);
593 }
594 
595 
596 /*
597  * hermon_ci_query_ah()
598  *    Return the Address Vector information for a specified Address Handle
599  *    Context: Can be called from interrupt or base context.
600  */
601 static ibt_status_t
602 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
603     ibt_adds_vect_t *attr_p)
604 {
605 	hermon_state_t	*state;
606 	hermon_ahhdl_t	ahhdl;
607 	hermon_pdhdl_t	pdhdl;
608 	int		status;
609 
610 	/* Check for valid HCA handle */
611 	if (hca == NULL) {
612 		return (IBT_HCA_HDL_INVALID);
613 	}
614 
615 	/* Check for valid address handle pointer */
616 	if (ah == NULL) {
617 		return (IBT_AH_HDL_INVALID);
618 	}
619 
620 	/* Grab the Hermon softstate pointer and AH handle */
621 	state = (hermon_state_t *)hca;
622 	ahhdl = (hermon_ahhdl_t)ah;
623 
624 	/* Query the AH */
625 	status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
626 	if (status != DDI_SUCCESS) {
627 		return (status);
628 	}
629 
630 	/* Return the Hermon PD handle */
631 	*pd_p = (ibc_pd_hdl_t)pdhdl;
632 
633 	return (IBT_SUCCESS);
634 }
635 
636 
637 /*
638  * hermon_ci_modify_ah()
639  *    Modify the Address Vector information of a specified Address Handle
640  *    Context: Can be called from interrupt or base context.
641  */
642 static ibt_status_t
643 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
644 {
645 	hermon_state_t	*state;
646 	hermon_ahhdl_t	ahhdl;
647 	int		status;
648 
649 	/* Check for valid HCA handle */
650 	if (hca == NULL) {
651 		return (IBT_HCA_HDL_INVALID);
652 	}
653 
654 	/* Check for valid address handle pointer */
655 	if (ah == NULL) {
656 		return (IBT_AH_HDL_INVALID);
657 	}
658 
659 	/* Grab the Hermon softstate pointer and AH handle */
660 	state = (hermon_state_t *)hca;
661 	ahhdl = (hermon_ahhdl_t)ah;
662 
663 	/* Modify the AH */
664 	status = hermon_ah_modify(state, ahhdl, attr_p);
665 
666 	return (status);
667 }
668 
669 
670 /*
671  * hermon_ci_alloc_qp()
672  *    Allocate a Queue Pair
673  *    Context: Can be called only from user or kernel context.
674  */
675 static ibt_status_t
676 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
677     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
678     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
679 {
680 	hermon_state_t		*state;
681 	hermon_qp_info_t		qpinfo;
682 	int			status;
683 
684 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
685 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
686 
687 	/* Check for valid HCA handle */
688 	if (hca == NULL) {
689 		return (IBT_HCA_HDL_INVALID);
690 	}
691 
692 	/* Grab the Hermon softstate pointer */
693 	state = (hermon_state_t *)hca;
694 
695 	/* Allocate the QP */
696 	qpinfo.qpi_attrp	= attr_p;
697 	qpinfo.qpi_type		= type;
698 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
699 	qpinfo.qpi_queueszp	= queue_sizes_p;
700 	qpinfo.qpi_qpn		= qpn;
701 	status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
702 	if (status != DDI_SUCCESS) {
703 		return (status);
704 	}
705 
706 	/* Return the Hermon QP handle */
707 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
708 
709 	return (IBT_SUCCESS);
710 }
711 
712 
713 /*
714  * hermon_ci_alloc_special_qp()
715  *    Allocate a Special Queue Pair
716  *    Context: Can be called only from user or kernel context.
717  */
718 static ibt_status_t
719 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
720     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
721     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
722     ibc_qp_hdl_t *qp_p)
723 {
724 	hermon_state_t		*state;
725 	hermon_qp_info_t		qpinfo;
726 	int			status;
727 
728 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
729 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
730 
731 	/* Check for valid HCA handle */
732 	if (hca == NULL) {
733 		return (IBT_HCA_HDL_INVALID);
734 	}
735 
736 	/* Grab the Hermon softstate pointer */
737 	state = (hermon_state_t *)hca;
738 
739 	/* Allocate the Special QP */
740 	qpinfo.qpi_attrp	= attr_p;
741 	qpinfo.qpi_type		= type;
742 	qpinfo.qpi_port		= port;
743 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
744 	qpinfo.qpi_queueszp	= queue_sizes_p;
745 	status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
746 	if (status != DDI_SUCCESS) {
747 		return (status);
748 	}
749 	/* Return the Hermon QP handle */
750 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
751 
752 	return (IBT_SUCCESS);
753 }
754 
755 /* ARGSUSED */
756 static ibt_status_t
757 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
758     ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
759     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
760     ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
761     ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
762 {
763 	return (IBT_NOT_SUPPORTED);
764 }
765 
766 /*
767  * hermon_ci_free_qp()
768  *    Free a Queue Pair
769  *    Context: Can be called only from user or kernel context.
770  */
771 static ibt_status_t
772 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
773     ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
774 {
775 	hermon_state_t	*state;
776 	hermon_qphdl_t	qphdl;
777 	int		status;
778 
779 	/* Check for valid HCA handle */
780 	if (hca == NULL) {
781 		return (IBT_HCA_HDL_INVALID);
782 	}
783 
784 	/* Check for valid QP handle pointer */
785 	if (qp == NULL) {
786 		return (IBT_QP_HDL_INVALID);
787 	}
788 
789 	/* Grab the Hermon softstate pointer and QP handle */
790 	state = (hermon_state_t *)hca;
791 	qphdl = (hermon_qphdl_t)qp;
792 
793 	/* Free the QP */
794 	status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
795 	    HERMON_NOSLEEP);
796 
797 	return (status);
798 }
799 
800 
801 /*
802  * hermon_ci_release_qpn()
803  *    Release a Queue Pair Number (QPN)
804  *    Context: Can be called only from user or kernel context.
805  */
806 static ibt_status_t
807 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
808 {
809 	hermon_state_t		*state;
810 	hermon_qpn_entry_t	*entry;
811 
812 	/* Check for valid HCA handle */
813 	if (hca == NULL) {
814 		return (IBT_HCA_HDL_INVALID);
815 	}
816 
817 	/* Check for valid QP handle pointer */
818 	if (qpnh == NULL) {
819 		return (IBT_QP_HDL_INVALID);
820 	}
821 
822 	/* Grab the Hermon softstate pointer and QP handle */
823 	state = (hermon_state_t *)hca;
824 	entry = (hermon_qpn_entry_t *)qpnh;
825 
826 	/* Release the QP number */
827 	hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
828 
829 	return (IBT_SUCCESS);
830 }
831 
832 
833 /*
834  * hermon_ci_query_qp()
835  *    Query a Queue Pair
836  *    Context: Can be called from interrupt or base context.
837  */
838 static ibt_status_t
839 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
840     ibt_qp_query_attr_t *attr_p)
841 {
842 	hermon_state_t	*state;
843 	hermon_qphdl_t	qphdl;
844 	int		status;
845 
846 	/* Check for valid HCA handle */
847 	if (hca == NULL) {
848 		return (IBT_HCA_HDL_INVALID);
849 	}
850 
851 	/* Check for valid QP handle */
852 	if (qp == NULL) {
853 		return (IBT_QP_HDL_INVALID);
854 	}
855 
856 	/* Grab the Hermon softstate pointer and QP handle */
857 	state = (hermon_state_t *)hca;
858 	qphdl = (hermon_qphdl_t)qp;
859 
860 	/* Query the QP */
861 	status = hermon_qp_query(state, qphdl, attr_p);
862 	return (status);
863 }
864 
865 
866 /*
867  * hermon_ci_modify_qp()
868  *    Modify a Queue Pair
869  *    Context: Can be called from interrupt or base context.
870  */
871 static ibt_status_t
872 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
873     ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
874     ibt_queue_sizes_t *actual_sz)
875 {
876 	hermon_state_t	*state;
877 	hermon_qphdl_t	qphdl;
878 	int		status;
879 
880 	/* Check for valid HCA handle */
881 	if (hca == NULL) {
882 		return (IBT_HCA_HDL_INVALID);
883 	}
884 
885 	/* Check for valid QP handle */
886 	if (qp == NULL) {
887 		return (IBT_QP_HDL_INVALID);
888 	}
889 
890 	/* Grab the Hermon softstate pointer and QP handle */
891 	state = (hermon_state_t *)hca;
892 	qphdl = (hermon_qphdl_t)qp;
893 
894 	/* Modify the QP */
895 	status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
896 	return (status);
897 }
898 
899 
900 /*
901  * hermon_ci_alloc_cq()
902  *    Allocate a Completion Queue
903  *    Context: Can be called only from user or kernel context.
904  */
905 /* ARGSUSED */
906 static ibt_status_t
907 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
908     ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
909 {
910 	hermon_state_t	*state;
911 	hermon_cqhdl_t	cqhdl;
912 	int		status;
913 
914 	/* Check for valid HCA handle */
915 	if (hca == NULL) {
916 		return (IBT_HCA_HDL_INVALID);
917 	}
918 	/* Grab the Hermon softstate pointer */
919 	state = (hermon_state_t *)hca;
920 
921 
922 	/* Allocate the CQ */
923 	status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
924 	    &cqhdl, HERMON_NOSLEEP);
925 	if (status != DDI_SUCCESS) {
926 		return (status);
927 	}
928 
929 	/* Return the Hermon CQ handle */
930 	*cq_p = (ibc_cq_hdl_t)cqhdl;
931 
932 	return (IBT_SUCCESS);
933 }
934 
935 
936 /*
937  * hermon_ci_free_cq()
938  *    Free a Completion Queue
939  *    Context: Can be called only from user or kernel context.
940  */
941 static ibt_status_t
942 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
943 {
944 	hermon_state_t	*state;
945 	hermon_cqhdl_t	cqhdl;
946 	int		status;
947 
948 	/* Check for valid HCA handle */
949 	if (hca == NULL) {
950 		return (IBT_HCA_HDL_INVALID);
951 	}
952 
953 	/* Check for valid CQ handle pointer */
954 	if (cq == NULL) {
955 		return (IBT_CQ_HDL_INVALID);
956 	}
957 
958 	/* Grab the Hermon softstate pointer and CQ handle */
959 	state = (hermon_state_t *)hca;
960 	cqhdl = (hermon_cqhdl_t)cq;
961 
962 
963 	/* Free the CQ */
964 	status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
965 	return (status);
966 }
967 
968 
969 /*
970  * hermon_ci_query_cq()
971  *    Return the size of a Completion Queue
972  *    Context: Can be called only from user or kernel context.
973  */
974 static ibt_status_t
975 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
976     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
977 {
978 	hermon_cqhdl_t	cqhdl;
979 
980 	/* Check for valid HCA handle */
981 	if (hca == NULL) {
982 		return (IBT_HCA_HDL_INVALID);
983 	}
984 
985 	/* Check for valid CQ handle pointer */
986 	if (cq == NULL) {
987 		return (IBT_CQ_HDL_INVALID);
988 	}
989 
990 	/* Grab the CQ handle */
991 	cqhdl = (hermon_cqhdl_t)cq;
992 
993 	/* Query the current CQ size */
994 	*entries_p = cqhdl->cq_bufsz;
995 	*count_p = cqhdl->cq_intmod_count;
996 	*usec_p = cqhdl->cq_intmod_usec;
997 	*hid_p = 0;
998 
999 	return (IBT_SUCCESS);
1000 }
1001 
1002 
1003 /*
1004  * hermon_ci_resize_cq()
1005  *    Change the size of a Completion Queue
1006  *    Context: Can be called only from user or kernel context.
1007  */
1008 static ibt_status_t
1009 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1010     uint_t *actual_size)
1011 {
1012 	hermon_state_t		*state;
1013 	hermon_cqhdl_t		cqhdl;
1014 	int			status;
1015 
1016 	/* Check for valid HCA handle */
1017 	if (hca == NULL) {
1018 		return (IBT_HCA_HDL_INVALID);
1019 	}
1020 
1021 	/* Check for valid CQ handle pointer */
1022 	if (cq == NULL) {
1023 		return (IBT_CQ_HDL_INVALID);
1024 	}
1025 
1026 	/* Grab the Hermon softstate pointer and CQ handle */
1027 	state = (hermon_state_t *)hca;
1028 	cqhdl = (hermon_cqhdl_t)cq;
1029 
1030 	/* Resize the CQ */
1031 	status = hermon_cq_resize(state, cqhdl, size, actual_size,
1032 	    HERMON_NOSLEEP);
1033 	if (status != DDI_SUCCESS) {
1034 		return (status);
1035 	}
1036 	return (IBT_SUCCESS);
1037 }
1038 
1039 /*
1040  * hermon_ci_modify_cq()
1041  *    Change the interrupt moderation values of a Completion Queue
1042  *    Context: Can be called only from user or kernel context.
1043  */
1044 static ibt_status_t
1045 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
1046     uint_t usec, ibt_cq_handler_id_t hid)
1047 {
1048 	hermon_state_t		*state;
1049 	hermon_cqhdl_t		cqhdl;
1050 	int			status;
1051 
1052 	/* Check for valid HCA handle */
1053 	if (hca == NULL) {
1054 		return (IBT_HCA_HDL_INVALID);
1055 	}
1056 
1057 	/* Check for valid CQ handle pointer */
1058 	if (cq == NULL) {
1059 		return (IBT_CQ_HDL_INVALID);
1060 	}
1061 
1062 	/* Grab the Hermon softstate pointer and CQ handle */
1063 	state = (hermon_state_t *)hca;
1064 	cqhdl = (hermon_cqhdl_t)cq;
1065 
1066 	/* Resize the CQ */
1067 	status = hermon_cq_modify(state, cqhdl, count, usec, hid,
1068 	    HERMON_NOSLEEP);
1069 	return (status);
1070 }
1071 
1072 
1073 /*
1074  * hermon_ci_alloc_cq_sched()
1075  *    Reserve a CQ scheduling class resource
1076  *    Context: Can be called only from user or kernel context.
1077  */
1078 /* ARGSUSED */
1079 static ibt_status_t
1080 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_flags_t flags,
1081     ibc_cq_handler_attr_t *handler_attr_p)
1082 {
1083 	if (hca == NULL) {
1084 		return (IBT_HCA_HDL_INVALID);
1085 	}
1086 
1087 	/*
1088 	 * This is an unsupported interface for the Hermon driver.  Hermon
1089 	 * does not support CQ scheduling classes.
1090 	 */
1091 
1092 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1093 	handler_attr_p->h_id = NULL;
1094 	handler_attr_p->h_pri = 0;
1095 	handler_attr_p->h_bind = NULL;
1096 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1097 	return (IBT_SUCCESS);
1098 }
1099 
1100 
1101 /*
1102  * hermon_ci_free_cq_sched()
1103  *    Free a CQ scheduling class resource
1104  *    Context: Can be called only from user or kernel context.
1105  */
1106 static ibt_status_t
1107 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibt_cq_handler_id_t handler_id)
1108 {
1109 	if (hca == NULL) {
1110 		return (IBT_HCA_HDL_INVALID);
1111 	}
1112 
1113 	/*
1114 	 * This is an unsupported interface for the Hermon driver.  Hermon
1115 	 * does not support CQ scheduling classes.  Returning a NULL
1116 	 * hint is the way to treat this as unsupported.  We check for
1117 	 * the expected NULL, but do not fail in any case.
1118 	 */
1119 	if (handler_id != NULL) {
1120 		cmn_err(CE_NOTE, "hermon_ci_free_cq_sched: unexpected "
1121 		    "non-NULL handler_id\n");
1122 	}
1123 	return (IBT_SUCCESS);
1124 }
1125 
1126 
1127 /*
1128  * hermon_ci_alloc_eec()
1129  *    Allocate an End-to-End context
1130  *    Context: Can be called only from user or kernel context.
1131  */
1132 /* ARGSUSED */
1133 static ibt_status_t
1134 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1135     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1136 {
1137 	/*
1138 	 * This is an unsupported interface for the Hermon driver.  This
1139 	 * interface is necessary to support Reliable Datagram (RD)
1140 	 * operations.  Hermon does not support RD.
1141 	 */
1142 	return (IBT_NOT_SUPPORTED);
1143 }
1144 
1145 
1146 /*
1147  * hermon_ci_free_eec()
1148  *    Free an End-to-End context
1149  *    Context: Can be called only from user or kernel context.
1150  */
1151 /* ARGSUSED */
1152 static ibt_status_t
1153 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1154 {
1155 	/*
1156 	 * This is an unsupported interface for the Hermon driver.  This
1157 	 * interface is necessary to support Reliable Datagram (RD)
1158 	 * operations.  Hermon does not support RD.
1159 	 */
1160 	return (IBT_NOT_SUPPORTED);
1161 }
1162 
1163 
1164 /*
1165  * hermon_ci_query_eec()
1166  *    Query an End-to-End context
1167  *    Context: Can be called from interrupt or base context.
1168  */
1169 /* ARGSUSED */
1170 static ibt_status_t
1171 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1172     ibt_eec_query_attr_t *attr_p)
1173 {
1174 	/*
1175 	 * This is an unsupported interface for the Hermon driver.  This
1176 	 * interface is necessary to support Reliable Datagram (RD)
1177 	 * operations.  Hermon does not support RD.
1178 	 */
1179 	return (IBT_NOT_SUPPORTED);
1180 }
1181 
1182 
1183 /*
1184  * hermon_ci_modify_eec()
1185  *    Modify an End-to-End context
1186  *    Context: Can be called from interrupt or base context.
1187  */
1188 /* ARGSUSED */
1189 static ibt_status_t
1190 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1191     ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1192 {
1193 	/*
1194 	 * This is an unsupported interface for the Hermon driver.  This
1195 	 * interface is necessary to support Reliable Datagram (RD)
1196 	 * operations.  Hermon does not support RD.
1197 	 */
1198 	return (IBT_NOT_SUPPORTED);
1199 }
1200 
1201 
1202 /*
1203  * hermon_ci_register_mr()
1204  *    Prepare a virtually addressed Memory Region for use by an HCA
1205  *    Context: Can be called from interrupt or base context.
1206  */
1207 /* ARGSUSED */
1208 static ibt_status_t
1209 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1210     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1211     ibt_mr_desc_t *mr_desc)
1212 {
1213 	hermon_mr_options_t	op;
1214 	hermon_state_t		*state;
1215 	hermon_pdhdl_t		pdhdl;
1216 	hermon_mrhdl_t		mrhdl;
1217 	int			status;
1218 
1219 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1220 
1221 	ASSERT(mr_attr != NULL);
1222 	ASSERT(mr_p != NULL);
1223 	ASSERT(mr_desc != NULL);
1224 
1225 	/* Check for valid HCA handle */
1226 	if (hca == NULL) {
1227 		return (IBT_HCA_HDL_INVALID);
1228 	}
1229 
1230 	/* Check for valid PD handle pointer */
1231 	if (pd == NULL) {
1232 		return (IBT_PD_HDL_INVALID);
1233 	}
1234 
1235 	/*
1236 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1237 	 * require the Local Write flag to be set
1238 	 */
1239 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1240 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1241 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1242 		return (IBT_MR_ACCESS_REQ_INVALID);
1243 	}
1244 
1245 	/* Grab the Hermon softstate pointer and PD handle */
1246 	state = (hermon_state_t *)hca;
1247 	pdhdl = (hermon_pdhdl_t)pd;
1248 
1249 	/* Register the memory region */
1250 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1251 	op.mro_bind_dmahdl = NULL;
1252 	op.mro_bind_override_addr = 0;
1253 	status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1254 	    &op, HERMON_MPT_DMPT);
1255 	if (status != DDI_SUCCESS) {
1256 		return (status);
1257 	}
1258 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1259 
1260 	/* Fill in the mr_desc structure */
1261 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1262 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1263 	/* Only set RKey if remote access was requested */
1264 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1265 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1266 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1267 		mr_desc->md_rkey = mrhdl->mr_rkey;
1268 	}
1269 
1270 	/*
1271 	 * If region is mapped for streaming (i.e. noncoherent), then set
1272 	 * sync is required
1273 	 */
1274 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1275 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1276 
1277 	/* Return the Hermon MR handle */
1278 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1279 
1280 	return (IBT_SUCCESS);
1281 }
1282 
1283 
1284 /*
1285  * hermon_ci_register_buf()
1286  *    Prepare a Memory Region specified by buf structure for use by an HCA
1287  *    Context: Can be called from interrupt or base context.
1288  */
1289 /* ARGSUSED */
1290 static ibt_status_t
1291 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1292     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1293     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1294 {
1295 	hermon_mr_options_t	op;
1296 	hermon_state_t		*state;
1297 	hermon_pdhdl_t		pdhdl;
1298 	hermon_mrhdl_t		mrhdl;
1299 	int			status;
1300 	ibt_mr_flags_t		flags = attrp->mr_flags;
1301 
1302 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1303 
1304 	ASSERT(mr_p != NULL);
1305 	ASSERT(mr_desc != NULL);
1306 
1307 	/* Check for valid HCA handle */
1308 	if (hca == NULL) {
1309 		return (IBT_HCA_HDL_INVALID);
1310 	}
1311 
1312 	/* Check for valid PD handle pointer */
1313 	if (pd == NULL) {
1314 		return (IBT_PD_HDL_INVALID);
1315 	}
1316 
1317 	/*
1318 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1319 	 * require the Local Write flag to be set
1320 	 */
1321 	if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1322 	    (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1323 	    !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1324 		return (IBT_MR_ACCESS_REQ_INVALID);
1325 	}
1326 
1327 	/* Grab the Hermon softstate pointer and PD handle */
1328 	state = (hermon_state_t *)hca;
1329 	pdhdl = (hermon_pdhdl_t)pd;
1330 
1331 	/* Register the memory region */
1332 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1333 	op.mro_bind_dmahdl = NULL;
1334 	op.mro_bind_override_addr = 0;
1335 	status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1336 	    &mrhdl, &op, HERMON_MPT_DMPT);
1337 	if (status != DDI_SUCCESS) {
1338 		return (status);
1339 	}
1340 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1341 
1342 	/* Fill in the mr_desc structure */
1343 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1344 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1345 	/* Only set RKey if remote access was requested */
1346 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1347 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1348 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1349 		mr_desc->md_rkey = mrhdl->mr_rkey;
1350 	}
1351 
1352 	/*
1353 	 * If region is mapped for streaming (i.e. noncoherent), then set
1354 	 * sync is required
1355 	 */
1356 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1357 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1358 
1359 	/* Return the Hermon MR handle */
1360 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1361 
1362 	return (IBT_SUCCESS);
1363 }
1364 
1365 
1366 /*
1367  * hermon_ci_deregister_mr()
1368  *    Deregister a Memory Region from an HCA translation table
1369  *    Context: Can be called only from user or kernel context.
1370  */
1371 static ibt_status_t
1372 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1373 {
1374 	hermon_state_t		*state;
1375 	hermon_mrhdl_t		mrhdl;
1376 	int			status;
1377 
1378 	/* Check for valid HCA handle */
1379 	if (hca == NULL) {
1380 		return (IBT_HCA_HDL_INVALID);
1381 	}
1382 
1383 	/* Check for valid memory region handle */
1384 	if (mr == NULL) {
1385 		return (IBT_MR_HDL_INVALID);
1386 	}
1387 
1388 	/* Grab the Hermon softstate pointer */
1389 	state = (hermon_state_t *)hca;
1390 	mrhdl = (hermon_mrhdl_t)mr;
1391 
1392 	/*
1393 	 * Deregister the memory region.
1394 	 */
1395 	status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1396 	    HERMON_NOSLEEP);
1397 	return (status);
1398 }
1399 
1400 
1401 /*
1402  * hermon_ci_query_mr()
1403  *    Retrieve information about a specified Memory Region
1404  *    Context: Can be called from interrupt or base context.
1405  */
1406 static ibt_status_t
1407 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1408     ibt_mr_query_attr_t *mr_attr)
1409 {
1410 	hermon_state_t		*state;
1411 	hermon_mrhdl_t		mrhdl;
1412 	int			status;
1413 
1414 	ASSERT(mr_attr != NULL);
1415 
1416 	/* Check for valid HCA handle */
1417 	if (hca == NULL) {
1418 		return (IBT_HCA_HDL_INVALID);
1419 	}
1420 
1421 	/* Check for MemRegion handle */
1422 	if (mr == NULL) {
1423 		return (IBT_MR_HDL_INVALID);
1424 	}
1425 
1426 	/* Grab the Hermon softstate pointer and MR handle */
1427 	state = (hermon_state_t *)hca;
1428 	mrhdl = (hermon_mrhdl_t)mr;
1429 
1430 	/* Query the memory region */
1431 	status = hermon_mr_query(state, mrhdl, mr_attr);
1432 	return (status);
1433 }
1434 
1435 
1436 /*
1437  * hermon_ci_register_shared_mr()
1438  *    Create a shared memory region matching an existing Memory Region
1439  *    Context: Can be called from interrupt or base context.
1440  */
1441 /* ARGSUSED */
1442 static ibt_status_t
1443 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1444     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1445     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1446 {
1447 	hermon_state_t		*state;
1448 	hermon_pdhdl_t		pdhdl;
1449 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1450 	int			status;
1451 
1452 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1453 
1454 	ASSERT(mr_attr != NULL);
1455 	ASSERT(mr_p != NULL);
1456 	ASSERT(mr_desc != NULL);
1457 
1458 	/* Check for valid HCA handle */
1459 	if (hca == NULL) {
1460 		return (IBT_HCA_HDL_INVALID);
1461 	}
1462 
1463 	/* Check for valid PD handle pointer */
1464 	if (pd == NULL) {
1465 		return (IBT_PD_HDL_INVALID);
1466 	}
1467 
1468 	/* Check for valid memory region handle */
1469 	if (mr == NULL) {
1470 		return (IBT_MR_HDL_INVALID);
1471 	}
1472 	/*
1473 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1474 	 * require the Local Write flag to be set
1475 	 */
1476 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1477 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1478 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1479 		return (IBT_MR_ACCESS_REQ_INVALID);
1480 	}
1481 
1482 	/* Grab the Hermon softstate pointer and handles */
1483 	state = (hermon_state_t *)hca;
1484 	pdhdl = (hermon_pdhdl_t)pd;
1485 	mrhdl = (hermon_mrhdl_t)mr;
1486 
1487 	/* Register the shared memory region */
1488 	status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1489 	    &mrhdl_new);
1490 	if (status != DDI_SUCCESS) {
1491 		return (status);
1492 	}
1493 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1494 
1495 	/* Fill in the mr_desc structure */
1496 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1497 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1498 	/* Only set RKey if remote access was requested */
1499 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1500 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1501 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1502 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1503 	}
1504 
1505 	/*
1506 	 * If shared region is mapped for streaming (i.e. noncoherent), then
1507 	 * set sync is required
1508 	 */
1509 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1510 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1511 
1512 	/* Return the Hermon MR handle */
1513 	*mr_p = (ibc_mr_hdl_t)mrhdl_new;
1514 
1515 	return (IBT_SUCCESS);
1516 }
1517 
1518 
1519 /*
1520  * hermon_ci_reregister_mr()
1521  *    Modify the attributes of an existing Memory Region
1522  *    Context: Can be called from interrupt or base context.
1523  */
1524 /* ARGSUSED */
1525 static ibt_status_t
1526 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1527     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1528     ibt_mr_desc_t *mr_desc)
1529 {
1530 	hermon_mr_options_t	op;
1531 	hermon_state_t		*state;
1532 	hermon_pdhdl_t		pdhdl;
1533 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1534 	int			status;
1535 
1536 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1537 
1538 	ASSERT(mr_attr != NULL);
1539 	ASSERT(mr_new != NULL);
1540 	ASSERT(mr_desc != NULL);
1541 
1542 	/* Check for valid HCA handle */
1543 	if (hca == NULL) {
1544 		return (IBT_HCA_HDL_INVALID);
1545 	}
1546 
1547 	/* Check for valid memory region handle */
1548 	if (mr == NULL) {
1549 		return (IBT_MR_HDL_INVALID);
1550 	}
1551 
1552 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1553 	state = (hermon_state_t *)hca;
1554 	mrhdl = (hermon_mrhdl_t)mr;
1555 	pdhdl = (hermon_pdhdl_t)pd;
1556 
1557 	/* Reregister the memory region */
1558 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1559 	status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1560 	    &mrhdl_new, &op);
1561 	if (status != DDI_SUCCESS) {
1562 		return (status);
1563 	}
1564 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1565 
1566 	/* Fill in the mr_desc structure */
1567 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1568 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1569 	/* Only set RKey if remote access was requested */
1570 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1571 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1572 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1573 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1574 	}
1575 
1576 	/*
1577 	 * If region is mapped for streaming (i.e. noncoherent), then set
1578 	 * sync is required
1579 	 */
1580 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1581 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1582 
1583 	/* Return the Hermon MR handle */
1584 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1585 
1586 	return (IBT_SUCCESS);
1587 }
1588 
1589 
1590 /*
1591  * hermon_ci_reregister_buf()
1592  *    Modify the attributes of an existing Memory Region
1593  *    Context: Can be called from interrupt or base context.
1594  */
1595 /* ARGSUSED */
1596 static ibt_status_t
1597 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1598     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1599     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1600 {
1601 	hermon_mr_options_t	op;
1602 	hermon_state_t		*state;
1603 	hermon_pdhdl_t		pdhdl;
1604 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1605 	int			status;
1606 	ibt_mr_flags_t		flags = attrp->mr_flags;
1607 
1608 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1609 
1610 	ASSERT(mr_new != NULL);
1611 	ASSERT(mr_desc != NULL);
1612 
1613 	/* Check for valid HCA handle */
1614 	if (hca == NULL) {
1615 		return (IBT_HCA_HDL_INVALID);
1616 	}
1617 
1618 	/* Check for valid memory region handle */
1619 	if (mr == NULL) {
1620 		return (IBT_MR_HDL_INVALID);
1621 	}
1622 
1623 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1624 	state = (hermon_state_t *)hca;
1625 	mrhdl = (hermon_mrhdl_t)mr;
1626 	pdhdl = (hermon_pdhdl_t)pd;
1627 
1628 	/* Reregister the memory region */
1629 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1630 	status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1631 	    &mrhdl_new, &op);
1632 	if (status != DDI_SUCCESS) {
1633 		return (status);
1634 	}
1635 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1636 
1637 	/* Fill in the mr_desc structure */
1638 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1639 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1640 	/* Only set RKey if remote access was requested */
1641 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1642 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1643 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1644 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1645 	}
1646 
1647 	/*
1648 	 * If region is mapped for streaming (i.e. noncoherent), then set
1649 	 * sync is required
1650 	 */
1651 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1652 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1653 
1654 	/* Return the Hermon MR handle */
1655 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1656 
1657 	return (IBT_SUCCESS);
1658 }
1659 
1660 /*
1661  * hermon_ci_sync_mr()
1662  *    Synchronize access to a Memory Region
1663  *    Context: Can be called from interrupt or base context.
1664  */
1665 static ibt_status_t
1666 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1667 {
1668 	hermon_state_t		*state;
1669 	int			status;
1670 
1671 	ASSERT(mr_segs != NULL);
1672 
1673 	/* Check for valid HCA handle */
1674 	if (hca == NULL) {
1675 		return (IBT_HCA_HDL_INVALID);
1676 	}
1677 
1678 	/* Grab the Hermon softstate pointer */
1679 	state = (hermon_state_t *)hca;
1680 
1681 	/* Sync the memory region */
1682 	status = hermon_mr_sync(state, mr_segs, num_segs);
1683 	return (status);
1684 }
1685 
1686 
1687 /*
1688  * hermon_ci_alloc_mw()
1689  *    Allocate a Memory Window
1690  *    Context: Can be called from interrupt or base context.
1691  */
1692 static ibt_status_t
1693 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1694     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1695 {
1696 	hermon_state_t		*state;
1697 	hermon_pdhdl_t		pdhdl;
1698 	hermon_mwhdl_t		mwhdl;
1699 	int			status;
1700 
1701 	ASSERT(mw_p != NULL);
1702 	ASSERT(rkey_p != NULL);
1703 
1704 	/* Check for valid HCA handle */
1705 	if (hca == NULL) {
1706 		return (IBT_HCA_HDL_INVALID);
1707 	}
1708 
1709 	/* Check for valid PD handle pointer */
1710 	if (pd == NULL) {
1711 		return (IBT_PD_HDL_INVALID);
1712 	}
1713 
1714 	/* Grab the Hermon softstate pointer and PD handle */
1715 	state = (hermon_state_t *)hca;
1716 	pdhdl = (hermon_pdhdl_t)pd;
1717 
1718 	/* Allocate the memory window */
1719 	status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1720 	if (status != DDI_SUCCESS) {
1721 		return (status);
1722 	}
1723 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1724 
1725 	/* Return the MW handle and RKey */
1726 	*mw_p = (ibc_mw_hdl_t)mwhdl;
1727 	*rkey_p = mwhdl->mr_rkey;
1728 
1729 	return (IBT_SUCCESS);
1730 }
1731 
1732 
1733 /*
1734  * hermon_ci_free_mw()
1735  *    Free a Memory Window
1736  *    Context: Can be called from interrupt or base context.
1737  */
1738 static ibt_status_t
1739 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1740 {
1741 	hermon_state_t		*state;
1742 	hermon_mwhdl_t		mwhdl;
1743 	int			status;
1744 
1745 	/* Check for valid HCA handle */
1746 	if (hca == NULL) {
1747 		return (IBT_HCA_HDL_INVALID);
1748 	}
1749 
1750 	/* Check for valid MW handle */
1751 	if (mw == NULL) {
1752 		return (IBT_MW_HDL_INVALID);
1753 	}
1754 
1755 	/* Grab the Hermon softstate pointer and MW handle */
1756 	state = (hermon_state_t *)hca;
1757 	mwhdl = (hermon_mwhdl_t)mw;
1758 
1759 	/* Free the memory window */
1760 	status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1761 	return (status);
1762 }
1763 
1764 
1765 /*
1766  * hermon_ci_query_mw()
1767  *    Return the attributes of the specified Memory Window
1768  *    Context: Can be called from interrupt or base context.
1769  */
1770 static ibt_status_t
1771 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1772     ibt_mw_query_attr_t *mw_attr_p)
1773 {
1774 	hermon_mwhdl_t		mwhdl;
1775 
1776 	ASSERT(mw_attr_p != NULL);
1777 
1778 	/* Check for valid HCA handle */
1779 	if (hca == NULL) {
1780 		return (IBT_HCA_HDL_INVALID);
1781 	}
1782 
1783 	/* Check for valid MemWin handle */
1784 	if (mw == NULL) {
1785 		return (IBT_MW_HDL_INVALID);
1786 	}
1787 
1788 	/* Query the memory window pointer and fill in the return values */
1789 	mwhdl = (hermon_mwhdl_t)mw;
1790 	mutex_enter(&mwhdl->mr_lock);
1791 	mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1792 	mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1793 	mutex_exit(&mwhdl->mr_lock);
1794 
1795 	return (IBT_SUCCESS);
1796 }
1797 
1798 
1799 /*
1800  * hermon_ci_attach_mcg()
1801  *    Attach a Queue Pair to a Multicast Group
1802  *    Context: Can be called only from user or kernel context.
1803  */
1804 static ibt_status_t
1805 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1806     ib_lid_t lid)
1807 {
1808 	hermon_state_t		*state;
1809 	hermon_qphdl_t		qphdl;
1810 	int			status;
1811 
1812 	/* Check for valid HCA handle */
1813 	if (hca == NULL) {
1814 		return (IBT_HCA_HDL_INVALID);
1815 	}
1816 
1817 	/* Check for valid QP handle pointer */
1818 	if (qp == NULL) {
1819 		return (IBT_QP_HDL_INVALID);
1820 	}
1821 
1822 	/* Grab the Hermon softstate pointer and QP handles */
1823 	state = (hermon_state_t *)hca;
1824 	qphdl = (hermon_qphdl_t)qp;
1825 
1826 	/* Attach the QP to the multicast group */
1827 	status = hermon_mcg_attach(state, qphdl, gid, lid);
1828 	return (status);
1829 }
1830 
1831 
1832 /*
1833  * hermon_ci_detach_mcg()
1834  *    Detach a Queue Pair to a Multicast Group
1835  *    Context: Can be called only from user or kernel context.
1836  */
1837 static ibt_status_t
1838 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1839     ib_lid_t lid)
1840 {
1841 	hermon_state_t		*state;
1842 	hermon_qphdl_t		qphdl;
1843 	int			status;
1844 
1845 	/* Check for valid HCA handle */
1846 	if (hca == NULL) {
1847 		return (IBT_HCA_HDL_INVALID);
1848 	}
1849 
1850 	/* Check for valid QP handle pointer */
1851 	if (qp == NULL) {
1852 		return (IBT_QP_HDL_INVALID);
1853 	}
1854 
1855 	/* Grab the Hermon softstate pointer and QP handle */
1856 	state = (hermon_state_t *)hca;
1857 	qphdl = (hermon_qphdl_t)qp;
1858 
1859 	/* Detach the QP from the multicast group */
1860 	status = hermon_mcg_detach(state, qphdl, gid, lid);
1861 	return (status);
1862 }
1863 
1864 
1865 /*
1866  * hermon_ci_post_send()
1867  *    Post send work requests to the send queue on the specified QP
1868  *    Context: Can be called from interrupt or base context.
1869  */
1870 static ibt_status_t
1871 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1872     uint_t num_wr, uint_t *num_posted_p)
1873 {
1874 	hermon_state_t		*state;
1875 	hermon_qphdl_t		qphdl;
1876 	int			status;
1877 
1878 	ASSERT(wr_p != NULL);
1879 	ASSERT(num_wr != 0);
1880 
1881 	/* Check for valid HCA handle */
1882 	if (hca == NULL) {
1883 		return (IBT_HCA_HDL_INVALID);
1884 	}
1885 
1886 	/* Check for valid QP handle pointer */
1887 	if (qp == NULL) {
1888 		return (IBT_QP_HDL_INVALID);
1889 	}
1890 
1891 	/* Grab the Hermon softstate pointer and QP handle */
1892 	state = (hermon_state_t *)hca;
1893 	qphdl = (hermon_qphdl_t)qp;
1894 
1895 	/* Post the send WQEs */
1896 	status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1897 	return (status);
1898 }
1899 
1900 
1901 /*
1902  * hermon_ci_post_recv()
1903  *    Post receive work requests to the receive queue on the specified QP
1904  *    Context: Can be called from interrupt or base context.
1905  */
1906 static ibt_status_t
1907 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1908     uint_t num_wr, uint_t *num_posted_p)
1909 {
1910 	hermon_state_t		*state;
1911 	hermon_qphdl_t		qphdl;
1912 	int			status;
1913 
1914 	ASSERT(wr_p != NULL);
1915 	ASSERT(num_wr != 0);
1916 
1917 	state = (hermon_state_t *)hca;
1918 	qphdl = (hermon_qphdl_t)qp;
1919 
1920 	if (state == NULL) {
1921 		return (IBT_HCA_HDL_INVALID);
1922 	}
1923 
1924 	/* Check for valid QP handle pointer */
1925 	if (qphdl == NULL) {
1926 		return (IBT_QP_HDL_INVALID);
1927 	}
1928 
1929 	/* Post the receive WQEs */
1930 	status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1931 	return (status);
1932 }
1933 
1934 
1935 /*
1936  * hermon_ci_poll_cq()
1937  *    Poll for a work request completion
1938  *    Context: Can be called from interrupt or base context.
1939  */
1940 static ibt_status_t
1941 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1942     uint_t num_wc, uint_t *num_polled)
1943 {
1944 	hermon_state_t		*state;
1945 	hermon_cqhdl_t		cqhdl;
1946 	int			status;
1947 
1948 	ASSERT(wc_p != NULL);
1949 
1950 	/* Check for valid HCA handle */
1951 	if (hca == NULL) {
1952 		return (IBT_HCA_HDL_INVALID);
1953 	}
1954 
1955 	/* Check for valid CQ handle pointer */
1956 	if (cq == NULL) {
1957 		return (IBT_CQ_HDL_INVALID);
1958 	}
1959 
1960 	/* Check for valid num_wc field */
1961 	if (num_wc == 0) {
1962 		return (IBT_INVALID_PARAM);
1963 	}
1964 
1965 	/* Grab the Hermon softstate pointer and CQ handle */
1966 	state = (hermon_state_t *)hca;
1967 	cqhdl = (hermon_cqhdl_t)cq;
1968 
1969 	/* Poll for work request completions */
1970 	status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1971 	return (status);
1972 }
1973 
1974 
1975 /*
1976  * hermon_ci_notify_cq()
1977  *    Enable notification events on the specified CQ
1978  *    Context: Can be called from interrupt or base context.
1979  */
1980 static ibt_status_t
1981 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1982     ibt_cq_notify_flags_t flags)
1983 {
1984 	hermon_state_t		*state;
1985 	hermon_cqhdl_t		cqhdl;
1986 	int			status;
1987 
1988 	/* Check for valid HCA handle */
1989 	if (hca == NULL) {
1990 		return (IBT_HCA_HDL_INVALID);
1991 	}
1992 
1993 	/* Check for valid CQ handle pointer */
1994 	if (cq_hdl == NULL) {
1995 		return (IBT_CQ_HDL_INVALID);
1996 	}
1997 
1998 	/* Grab the Hermon softstate pointer and CQ handle */
1999 	state = (hermon_state_t *)hca;
2000 	cqhdl = (hermon_cqhdl_t)cq_hdl;
2001 
2002 	/* Enable the CQ notification */
2003 	status = hermon_cq_notify(state, cqhdl, flags);
2004 	return (status);
2005 }
2006 
2007 /*
2008  * hermon_ci_ci_data_in()
2009  *    Exchange CI-specific data.
2010  *    Context: Can be called only from user or kernel context.
2011  */
2012 static ibt_status_t
2013 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2014     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2015     size_t data_sz)
2016 {
2017 	hermon_state_t		*state;
2018 	int			status;
2019 
2020 	/* Check for valid HCA handle */
2021 	if (hca == NULL) {
2022 		return (IBT_HCA_HDL_INVALID);
2023 	}
2024 
2025 	/* Grab the Hermon softstate pointer */
2026 	state = (hermon_state_t *)hca;
2027 
2028 	/* Get the Hermon userland mapping information */
2029 	status = hermon_umap_ci_data_in(state, flags, object,
2030 	    ibc_object_handle, data_p, data_sz);
2031 	return (status);
2032 }
2033 
2034 /*
2035  * hermon_ci_ci_data_out()
2036  *    Exchange CI-specific data.
2037  *    Context: Can be called only from user or kernel context.
2038  */
2039 static ibt_status_t
2040 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2041     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2042     size_t data_sz)
2043 {
2044 	hermon_state_t		*state;
2045 	int			status;
2046 
2047 	/* Check for valid HCA handle */
2048 	if (hca == NULL) {
2049 		return (IBT_HCA_HDL_INVALID);
2050 	}
2051 
2052 	/* Grab the Hermon softstate pointer */
2053 	state = (hermon_state_t *)hca;
2054 
2055 	/* Get the Hermon userland mapping information */
2056 	status = hermon_umap_ci_data_out(state, flags, object,
2057 	    ibc_object_handle, data_p, data_sz);
2058 	return (status);
2059 }
2060 
2061 
2062 /*
2063  * hermon_ci_alloc_srq()
2064  *    Allocate a Shared Receive Queue (SRQ)
2065  *    Context: Can be called only from user or kernel context
2066  */
2067 static ibt_status_t
2068 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2069     ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2070     ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2071 {
2072 	hermon_state_t		*state;
2073 	hermon_pdhdl_t		pdhdl;
2074 	hermon_srqhdl_t		srqhdl;
2075 	hermon_srq_info_t	srqinfo;
2076 	int			status;
2077 
2078 	/* Check for valid HCA handle */
2079 	if (hca == NULL) {
2080 		return (IBT_HCA_HDL_INVALID);
2081 	}
2082 
2083 	state = (hermon_state_t *)hca;
2084 
2085 	/* Check for valid PD handle pointer */
2086 	if (pd == NULL) {
2087 		return (IBT_PD_HDL_INVALID);
2088 	}
2089 
2090 	pdhdl = (hermon_pdhdl_t)pd;
2091 
2092 	srqinfo.srqi_ibt_srqhdl = ibt_srq;
2093 	srqinfo.srqi_pd		= pdhdl;
2094 	srqinfo.srqi_sizes	= sizes;
2095 	srqinfo.srqi_real_sizes	= ret_sizes_p;
2096 	srqinfo.srqi_srqhdl	= &srqhdl;
2097 	srqinfo.srqi_flags	= flags;
2098 
2099 	status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
2100 	if (status != DDI_SUCCESS) {
2101 		return (status);
2102 	}
2103 
2104 	*ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2105 
2106 	return (IBT_SUCCESS);
2107 }
2108 
2109 /*
2110  * hermon_ci_free_srq()
2111  *    Free a Shared Receive Queue (SRQ)
2112  *    Context: Can be called only from user or kernel context
2113  */
2114 static ibt_status_t
2115 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2116 {
2117 	hermon_state_t	*state;
2118 	hermon_srqhdl_t	srqhdl;
2119 	int		status;
2120 
2121 	/* Check for valid HCA handle */
2122 	if (hca == NULL) {
2123 		return (IBT_HCA_HDL_INVALID);
2124 	}
2125 
2126 	state = (hermon_state_t *)hca;
2127 
2128 	/* Check for valid SRQ handle pointer */
2129 	if (srq == NULL) {
2130 		return (IBT_SRQ_HDL_INVALID);
2131 	}
2132 
2133 	srqhdl = (hermon_srqhdl_t)srq;
2134 
2135 	/* Free the SRQ */
2136 	status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
2137 	return (status);
2138 }
2139 
2140 /*
2141  * hermon_ci_query_srq()
2142  *    Query properties of a Shared Receive Queue (SRQ)
2143  *    Context: Can be called from interrupt or base context.
2144  */
2145 static ibt_status_t
2146 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2147     ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2148 {
2149 	hermon_srqhdl_t	srqhdl;
2150 
2151 	/* Check for valid HCA handle */
2152 	if (hca == NULL) {
2153 		return (IBT_HCA_HDL_INVALID);
2154 	}
2155 
2156 	/* Check for valid SRQ handle pointer */
2157 	if (srq == NULL) {
2158 		return (IBT_SRQ_HDL_INVALID);
2159 	}
2160 
2161 	srqhdl = (hermon_srqhdl_t)srq;
2162 
2163 	mutex_enter(&srqhdl->srq_lock);
2164 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2165 		mutex_exit(&srqhdl->srq_lock);
2166 		return (IBT_SRQ_ERROR_STATE);
2167 	}
2168 
2169 	*pd_p   = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2170 	sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
2171 	sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2172 	mutex_exit(&srqhdl->srq_lock);
2173 	*limit_p  = 0;
2174 
2175 	return (IBT_SUCCESS);
2176 }
2177 
2178 /*
2179  * hermon_ci_modify_srq()
2180  *    Modify properties of a Shared Receive Queue (SRQ)
2181  *    Context: Can be called from interrupt or base context.
2182  */
2183 /* ARGSUSED */
2184 static ibt_status_t
2185 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2186     ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2187 {
2188 	hermon_state_t	*state;
2189 	hermon_srqhdl_t	srqhdl;
2190 	uint_t		resize_supported, cur_srq_size;
2191 	int		status;
2192 
2193 	/* Check for valid HCA handle */
2194 	if (hca == NULL) {
2195 		return (IBT_HCA_HDL_INVALID);
2196 	}
2197 
2198 	state = (hermon_state_t *)hca;
2199 
2200 	/* Check for valid SRQ handle pointer */
2201 	if (srq == NULL) {
2202 		return (IBT_SRQ_HDL_INVALID);
2203 	}
2204 
2205 	srqhdl = (hermon_srqhdl_t)srq;
2206 
2207 	/*
2208 	 * Check Error State of SRQ.
2209 	 * Also, while we are holding the lock we save away the current SRQ
2210 	 * size for later use.
2211 	 */
2212 	mutex_enter(&srqhdl->srq_lock);
2213 	cur_srq_size = srqhdl->srq_wq_bufsz;
2214 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2215 		mutex_exit(&srqhdl->srq_lock);
2216 		return (IBT_SRQ_ERROR_STATE);
2217 	}
2218 	mutex_exit(&srqhdl->srq_lock);
2219 
2220 	/*
2221 	 * Setting the limit watermark is not currently supported.  This is a
2222 	 * hermon hardware (firmware) limitation.  We return NOT_SUPPORTED here,
2223 	 * and have the limit code commented out for now.
2224 	 *
2225 	 * XXX If we enable the limit watermark support, we need to do checks
2226 	 * and set the 'srq->srq_wr_limit' here, instead of returning not
2227 	 * supported.  The 'hermon_srq_modify' operation below is for resizing
2228 	 * the SRQ only, the limit work should be done here.  If this is
2229 	 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2230 	 * function should also be removed at that time.
2231 	 */
2232 	if (flags & IBT_SRQ_SET_LIMIT) {
2233 		return (IBT_NOT_SUPPORTED);
2234 	}
2235 
2236 	/*
2237 	 * Check the SET_SIZE flag.  If not set, we simply return success here.
2238 	 * However if it is set, we check if resize is supported and only then
2239 	 * do we continue on with our resize processing.
2240 	 */
2241 	if (!(flags & IBT_SRQ_SET_SIZE)) {
2242 		return (IBT_SUCCESS);
2243 	}
2244 
2245 	resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
2246 	    IBT_HCA_RESIZE_SRQ;
2247 
2248 	if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2249 		return (IBT_NOT_SUPPORTED);
2250 	}
2251 
2252 	/*
2253 	 * We do not support resizing an SRQ to be smaller than it's current
2254 	 * size.  If a smaller (or equal) size is requested, then we simply
2255 	 * return success, and do nothing.
2256 	 */
2257 	if (size <= cur_srq_size) {
2258 		*ret_size_p = cur_srq_size;
2259 		return (IBT_SUCCESS);
2260 	}
2261 
2262 	status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2263 	    HERMON_NOSLEEP);
2264 	if (status != DDI_SUCCESS) {
2265 		/* Set return value to current SRQ size */
2266 		*ret_size_p = cur_srq_size;
2267 		return (status);
2268 	}
2269 
2270 	return (IBT_SUCCESS);
2271 }
2272 
2273 /*
2274  * hermon_ci_post_srq()
2275  *    Post a Work Request to the specified Shared Receive Queue (SRQ)
2276  *    Context: Can be called from interrupt or base context.
2277  */
2278 static ibt_status_t
2279 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2280     ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2281 {
2282 	hermon_state_t	*state;
2283 	hermon_srqhdl_t	srqhdl;
2284 	int		status;
2285 
2286 	/* Check for valid HCA handle */
2287 	if (hca == NULL) {
2288 		return (IBT_HCA_HDL_INVALID);
2289 	}
2290 
2291 	state = (hermon_state_t *)hca;
2292 
2293 	/* Check for valid SRQ handle pointer */
2294 	if (srq == NULL) {
2295 		return (IBT_SRQ_HDL_INVALID);
2296 	}
2297 
2298 	srqhdl = (hermon_srqhdl_t)srq;
2299 
2300 	status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2301 	return (status);
2302 }
2303 
2304 /* Address translation */
2305 
2306 struct ibc_ma_s {
2307 	int			h_ma_addr_list_len;
2308 	void			*h_ma_addr_list;
2309 	ddi_dma_handle_t	h_ma_dmahdl;
2310 	ddi_dma_handle_t	h_ma_list_hdl;
2311 	ddi_acc_handle_t	h_ma_list_acc_hdl;
2312 	size_t			h_ma_real_len;
2313 	caddr_t			h_ma_kaddr;
2314 	ibt_phys_addr_t		h_ma_list_cookie;
2315 };
2316 
2317 static ibt_status_t
2318 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2319     uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2320 {
2321 	int			status;
2322 	ibt_status_t		ibt_status;
2323 	ibc_ma_hdl_t		ma_hdl;
2324 	ib_memlen_t		len;
2325 	ddi_dma_attr_t		dma_attr;
2326 	uint_t			cookie_cnt;
2327 	ddi_dma_cookie_t	dmacookie;
2328 	hermon_state_t		*state;
2329 	uint64_t		*kaddr;
2330 	uint64_t		addr, endaddr, pagesize;
2331 	int			i, kmflag;
2332 	int			(*callback)(caddr_t);
2333 
2334 	if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2335 		return (IBT_NOT_SUPPORTED);	/* XXX - not yet implemented */
2336 	}
2337 
2338 	state = (hermon_state_t *)hca;
2339 	hermon_dma_attr_init(state, &dma_attr);
2340 	if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2341 		kmflag = KM_NOSLEEP;
2342 		callback = DDI_DMA_DONTWAIT;
2343 	} else {
2344 		kmflag = KM_SLEEP;
2345 		callback = DDI_DMA_SLEEP;
2346 	}
2347 
2348 	ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2349 	if (ma_hdl == NULL) {
2350 		return (IBT_INSUFF_RESOURCE);
2351 	}
2352 #ifdef	__sparc
2353 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2354 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2355 
2356 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2357 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2358 #endif
2359 
2360 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2361 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2362 	    callback, NULL, &ma_hdl->h_ma_dmahdl);
2363 	if (status != DDI_SUCCESS) {
2364 		kmem_free(ma_hdl, sizeof (*ma_hdl));
2365 		return (IBT_INSUFF_RESOURCE);
2366 	}
2367 	status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2368 	    va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2369 	    callback, NULL, &dmacookie, &cookie_cnt);
2370 	if (status != DDI_DMA_MAPPED) {
2371 		status = ibc_get_ci_failure(0);
2372 		goto marea_fail3;
2373 	}
2374 
2375 	ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2376 	ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2377 	if (ma_hdl->h_ma_kaddr == NULL) {
2378 		ibt_status = IBT_INSUFF_RESOURCE;
2379 		goto marea_fail4;
2380 	}
2381 
2382 	i = 0;
2383 	len = 0;
2384 	pagesize = PAGESIZE;
2385 	kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2386 	while (cookie_cnt-- > 0) {
2387 		addr	= dmacookie.dmac_laddress;
2388 		len	+= dmacookie.dmac_size;
2389 		endaddr	= addr + (dmacookie.dmac_size - 1);
2390 		addr	= addr & ~(pagesize - 1);
2391 		while (addr <= endaddr) {
2392 			if (i >= list_len) {
2393 				status = IBT_PBL_TOO_SMALL;
2394 				goto marea_fail5;
2395 			}
2396 			kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2397 			i++;
2398 			addr += pagesize;
2399 			if (addr == 0) {
2400 				static int do_once = 1;
2401 				_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2402 				    do_once))
2403 				if (do_once) {
2404 					do_once = 0;
2405 					cmn_err(CE_NOTE, "probable error in "
2406 					    "dma_cookie address: map_mem_area");
2407 				}
2408 				break;
2409 			}
2410 		}
2411 		if (cookie_cnt != 0)
2412 			ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2413 	}
2414 
2415 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2416 	pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2417 	pmr->pmr_iova = va_attrs->va_vaddr;
2418 	pmr->pmr_len = len;
2419 	pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2420 	pmr->pmr_buf_sz = PAGESHIFT;	/* PRM says "Page Sice", but... */
2421 	pmr->pmr_num_buf = i;
2422 	pmr->pmr_ma = ma_hdl;
2423 
2424 	*ma_hdl_p = ma_hdl;
2425 	return (IBT_SUCCESS);
2426 
2427 marea_fail5:
2428 	kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2429 marea_fail4:
2430 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2431 marea_fail3:
2432 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2433 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2434 	*ma_hdl_p = NULL;
2435 	return (ibt_status);
2436 }
2437 
2438 /*
2439  * hermon_ci_map_mem_area()
2440  *    Context: Can be called from interrupt or base context.
2441  *
2442  *	Creates the memory mapping suitable for a subsequent posting of an
2443  *	FRWR work request.  All the info about the memory area for the
2444  *	FRWR work request (wr member of "union ibt_reg_req_u") is filled
2445  *	such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2446  *	and then fill in the additional information only it knows.
2447  *
2448  *	Alternatively, creates the memory mapping for FMR.
2449  */
2450 /* ARGSUSED */
2451 static ibt_status_t
2452 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2453     void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2454     ibc_ma_hdl_t *ma_hdl_p)
2455 {
2456 	ibt_status_t		ibt_status;
2457 	int			status;
2458 	ibc_ma_hdl_t		ma_hdl;
2459 	ibt_wr_reg_pmr_t	*pmr;
2460 	ib_memlen_t		len;
2461 	ddi_dma_attr_t		dma_attr;
2462 	ddi_dma_handle_t	khdl;
2463 	uint_t			cookie_cnt;
2464 	ddi_dma_cookie_t	dmacookie, kcookie;
2465 	hermon_state_t		*state;
2466 	uint64_t		*kaddr;
2467 	uint64_t		addr, endaddr, pagesize, kcookie_paddr;
2468 	int			i, j, kmflag;
2469 	int			(*callback)(caddr_t);
2470 
2471 	if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2472 		/* delegate FMR and Physical Register to other function */
2473 		return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2474 		    &reg_req->fn_arg, ma_hdl_p));
2475 	}
2476 
2477 	/* FRWR */
2478 
2479 	state = (hermon_state_t *)hca;
2480 	hermon_dma_attr_init(state, &dma_attr);
2481 #ifdef	__sparc
2482 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2483 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2484 
2485 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2486 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2487 #endif
2488 	if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2489 		kmflag = KM_NOSLEEP;
2490 		callback = DDI_DMA_DONTWAIT;
2491 	} else {
2492 		kmflag = KM_SLEEP;
2493 		callback = DDI_DMA_SLEEP;
2494 	}
2495 
2496 	ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2497 	if (ma_hdl == NULL) {
2498 		return (IBT_INSUFF_RESOURCE);
2499 	}
2500 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2501 
2502 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2503 	    callback, NULL, &ma_hdl->h_ma_dmahdl);
2504 	if (status != DDI_SUCCESS) {
2505 		kmem_free(ma_hdl, sizeof (*ma_hdl));
2506 		ibt_status = IBT_INSUFF_RESOURCE;
2507 		goto marea_fail0;
2508 	}
2509 	dma_attr.dma_attr_align = 64;	/* as per PRM */
2510 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2511 	    callback, NULL, &ma_hdl->h_ma_list_hdl);
2512 	if (status != DDI_SUCCESS) {
2513 		ibt_status = IBT_INSUFF_RESOURCE;
2514 		goto marea_fail1;
2515 	}
2516 	/*
2517 	 * Entries in the list in the last slot on each page cannot be used,
2518 	 * so 1 extra ibt_phys_addr_t is allocated per page.  We add 1 more
2519 	 * to deal with the possibility of a less than 1 page allocation
2520 	 * across a page boundary.
2521 	 */
2522 	status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2523 	    list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2524 	    sizeof (ibt_phys_addr_t),
2525 	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2526 	    &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2527 	    &ma_hdl->h_ma_list_acc_hdl);
2528 	if (status != DDI_SUCCESS) {
2529 		ibt_status = IBT_INSUFF_RESOURCE;
2530 		goto marea_fail2;
2531 	}
2532 	status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2533 	    ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2534 	    DDI_DMA_CONSISTENT, callback, NULL,
2535 	    &kcookie, &cookie_cnt);
2536 	if (status != DDI_SUCCESS) {
2537 		ibt_status = IBT_INSUFF_RESOURCE;
2538 		goto marea_fail3;
2539 	}
2540 	if ((kcookie.dmac_laddress & 0x3f) != 0) {
2541 		cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2542 		ibt_status = ibc_get_ci_failure(0);
2543 		goto marea_fail4;
2544 	}
2545 	ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2546 
2547 	if (va_attrs->va_flags & IBT_VA_BUF) {
2548 		status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2549 		    va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2550 		    callback, NULL, &dmacookie, &cookie_cnt);
2551 	} else {
2552 		status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2553 		    va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2554 		    va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2555 		    callback, NULL, &dmacookie, &cookie_cnt);
2556 	}
2557 	if (status != DDI_DMA_MAPPED) {
2558 		ibt_status = ibc_get_ci_failure(0);
2559 		goto marea_fail4;
2560 	}
2561 	i = 0;	/* count the number of pbl entries */
2562 	j = 0;	/* count the number of links to next HERMON_PAGE */
2563 	len = 0;
2564 	pagesize = PAGESIZE;
2565 	kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2566 	kcookie_paddr = kcookie.dmac_laddress + HERMON_PAGEMASK;
2567 	khdl = ma_hdl->h_ma_list_hdl;
2568 	while (cookie_cnt-- > 0) {
2569 		addr	= dmacookie.dmac_laddress;
2570 		len	+= dmacookie.dmac_size;
2571 		endaddr	= addr + (dmacookie.dmac_size - 1);
2572 		addr	= addr & ~(pagesize - 1);
2573 		while (addr <= endaddr) {
2574 			if (i >= list_len) {
2575 				ibt_status = IBT_PBL_TOO_SMALL;
2576 				goto marea_fail5;
2577 			}
2578 			/* Deal with last entry on page. */
2579 			if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2580 				if (kcookie.dmac_size > HERMON_PAGESIZE) {
2581 					kcookie_paddr += HERMON_PAGESIZE;
2582 					kcookie.dmac_size -= HERMON_PAGESIZE;
2583 				} else {
2584 					ddi_dma_nextcookie(khdl, &kcookie);
2585 					kcookie_paddr = kcookie.dmac_laddress;
2586 				}
2587 				kaddr[i+j] = htonll(kcookie_paddr);
2588 				j++;
2589 			}
2590 			kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2591 			i++;
2592 			addr += pagesize;
2593 			if (addr == 0) {
2594 				static int do_once = 1;
2595 				_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2596 				    do_once))
2597 				if (do_once) {
2598 					do_once = 0;
2599 					cmn_err(CE_NOTE, "probable error in "
2600 					    "dma_cookie address: map_mem_area");
2601 				}
2602 				break;
2603 			}
2604 		}
2605 		if (cookie_cnt != 0)
2606 			ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2607 	}
2608 
2609 	pmr = &reg_req->wr;
2610 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2611 	pmr->pmr_len = len;
2612 	pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2613 	pmr->pmr_buf_sz = PAGESHIFT;	/* PRM says "Page Size", but... */
2614 	pmr->pmr_num_buf = i;
2615 	pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2616 
2617 	*ma_hdl_p = ma_hdl;
2618 	return (IBT_SUCCESS);
2619 
2620 marea_fail5:
2621 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2622 	if (status != DDI_SUCCESS)
2623 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2624 marea_fail4:
2625 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2626 	if (status != DDI_SUCCESS)
2627 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2628 marea_fail3:
2629 	ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2630 marea_fail2:
2631 	ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2632 marea_fail1:
2633 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2634 marea_fail0:
2635 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2636 	*ma_hdl_p = NULL;
2637 	return (ibt_status);
2638 }
2639 
2640 /*
2641  * hermon_ci_unmap_mem_area()
2642  * Unmap the memory area
2643  *    Context: Can be called from interrupt or base context.
2644  */
2645 /* ARGSUSED */
2646 static ibt_status_t
2647 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2648 {
2649 	int			status;
2650 	hermon_state_t		*state;
2651 
2652 	if (ma_hdl == NULL) {
2653 		return (IBT_MA_HDL_INVALID);
2654 	}
2655 	state = (hermon_state_t *)hca;
2656 	if (ma_hdl->h_ma_list_hdl != NULL) {
2657 		status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2658 		if (status != DDI_SUCCESS)
2659 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2660 		ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2661 		ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2662 	} else {
2663 		kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2664 	}
2665 	status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2666 	if (status != DDI_SUCCESS)
2667 		HERMON_WARNING(state, "failed to unbind DMA mapping");
2668 	ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2669 	kmem_free(ma_hdl, sizeof (*ma_hdl));
2670 	return (IBT_SUCCESS);
2671 }
2672 
2673 struct ibc_mi_s {
2674 	int			imh_len;
2675 	ddi_dma_handle_t	imh_dmahandle[1];
2676 };
2677 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2678     ibc_mi_s::imh_len
2679     ibc_mi_s::imh_dmahandle))
2680 
2681 
2682 /*
2683  * hermon_ci_map_mem_iov()
2684  * Map the memory
2685  *    Context: Can be called from interrupt or base context.
2686  */
2687 /* ARGSUSED */
2688 static ibt_status_t
2689 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2690     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2691 {
2692 	int			status;
2693 	int			i, j, nds, max_nds;
2694 	uint_t			len;
2695 	ibt_status_t		ibt_status;
2696 	ddi_dma_handle_t	dmahdl;
2697 	ddi_dma_cookie_t	dmacookie;
2698 	ddi_dma_attr_t		dma_attr;
2699 	uint_t			cookie_cnt;
2700 	ibc_mi_hdl_t		mi_hdl;
2701 	ibt_lkey_t		rsvd_lkey;
2702 	ibt_wr_ds_t		*sgl;
2703 	hermon_state_t		*state;
2704 	int			kmflag;
2705 	int			(*callback)(caddr_t);
2706 
2707 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2708 
2709 	if (mi_hdl_p == NULL)
2710 		return (IBT_MI_HDL_INVALID);
2711 
2712 	/* Check for valid HCA handle */
2713 	if (hca == NULL)
2714 		return (IBT_HCA_HDL_INVALID);
2715 
2716 	state = (hermon_state_t *)hca;
2717 	hermon_dma_attr_init(state, &dma_attr);
2718 #ifdef	__sparc
2719 	if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2720 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2721 
2722 	if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2723 		dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2724 #endif
2725 
2726 	nds = 0;
2727 	max_nds = iov_attr->iov_wr_nds;
2728 	if (iov_attr->iov_lso_hdr_sz)
2729 		max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2730 		    0xf) >> 4;	/* 0xf is for rounding up to a multiple of 16 */
2731 	rsvd_lkey = state->hs_devlim.rsv_lkey;
2732 	if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2733 		kmflag = KM_SLEEP;
2734 		callback = DDI_DMA_SLEEP;
2735 	} else {
2736 		kmflag = KM_NOSLEEP;
2737 		callback = DDI_DMA_DONTWAIT;
2738 	}
2739 
2740 	if (iov_attr->iov_flags & IBT_IOV_BUF) {
2741 		mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2742 		if (mi_hdl == NULL)
2743 			return (IBT_INSUFF_RESOURCE);
2744 		sgl = wr->send.wr_sgl;
2745 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2746 
2747 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2748 		    callback, NULL, &dmahdl);
2749 		if (status != DDI_SUCCESS) {
2750 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2751 			return (IBT_INSUFF_RESOURCE);
2752 		}
2753 		status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2754 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2755 		    &dmacookie, &cookie_cnt);
2756 		if (status != DDI_DMA_MAPPED) {
2757 			ddi_dma_free_handle(&dmahdl);
2758 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2759 			return (ibc_get_ci_failure(0));
2760 		}
2761 		while (cookie_cnt-- > 0) {
2762 			if (nds > max_nds) {
2763 				status = ddi_dma_unbind_handle(dmahdl);
2764 				if (status != DDI_SUCCESS)
2765 					HERMON_WARNING(state, "failed to "
2766 					    "unbind DMA mapping");
2767 				ddi_dma_free_handle(&dmahdl);
2768 				return (IBT_SGL_TOO_SMALL);
2769 			}
2770 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2771 			sgl[nds].ds_key = rsvd_lkey;
2772 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2773 			nds++;
2774 			if (cookie_cnt != 0)
2775 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2776 		}
2777 		wr->send.wr_nds = nds;
2778 		mi_hdl->imh_len = 1;
2779 		mi_hdl->imh_dmahandle[0] = dmahdl;
2780 		*mi_hdl_p = mi_hdl;
2781 		return (IBT_SUCCESS);
2782 	}
2783 
2784 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2785 		sgl = wr->recv.wr_sgl;
2786 	else
2787 		sgl = wr->send.wr_sgl;
2788 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2789 
2790 	len = iov_attr->iov_list_len;
2791 	for (i = 0, j = 0; j < len; j++) {
2792 		if (iov_attr->iov[j].iov_len == 0)
2793 			continue;
2794 		i++;
2795 	}
2796 	mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2797 	    (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2798 	if (mi_hdl == NULL)
2799 		return (IBT_INSUFF_RESOURCE);
2800 	mi_hdl->imh_len = i;
2801 	for (i = 0, j = 0; j < len; j++) {
2802 		if (iov_attr->iov[j].iov_len == 0)
2803 			continue;
2804 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2805 		    callback, NULL, &dmahdl);
2806 		if (status != DDI_SUCCESS) {
2807 			ibt_status = IBT_INSUFF_RESOURCE;
2808 			goto fail2;
2809 		}
2810 		status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2811 		    iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2812 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2813 		    &dmacookie, &cookie_cnt);
2814 		if (status != DDI_DMA_MAPPED) {
2815 			ibt_status = ibc_get_ci_failure(0);
2816 			goto fail1;
2817 		}
2818 		if (nds + cookie_cnt > max_nds) {
2819 			ibt_status = IBT_SGL_TOO_SMALL;
2820 			goto fail2;
2821 		}
2822 		while (cookie_cnt-- > 0) {
2823 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2824 			sgl[nds].ds_key = rsvd_lkey;
2825 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2826 			nds++;
2827 			if (cookie_cnt != 0)
2828 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2829 		}
2830 		mi_hdl->imh_dmahandle[i] = dmahdl;
2831 		i++;
2832 	}
2833 
2834 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2835 		wr->recv.wr_nds = nds;
2836 	else
2837 		wr->send.wr_nds = nds;
2838 	*mi_hdl_p = mi_hdl;
2839 	return (IBT_SUCCESS);
2840 
2841 fail1:
2842 	ddi_dma_free_handle(&dmahdl);
2843 fail2:
2844 	while (--i >= 0) {
2845 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2846 		if (status != DDI_SUCCESS)
2847 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2848 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2849 	}
2850 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2851 	    (len - 1) * sizeof (ddi_dma_handle_t));
2852 	*mi_hdl_p = NULL;
2853 	return (ibt_status);
2854 }
2855 
2856 /*
2857  * hermon_ci_unmap_mem_iov()
2858  * Unmap the memory
2859  *    Context: Can be called from interrupt or base context.
2860  */
2861 /* ARGSUSED */
2862 static ibt_status_t
2863 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2864 {
2865 	int		status, i;
2866 	hermon_state_t	*state;
2867 
2868 	/* Check for valid HCA handle */
2869 	if (hca == NULL)
2870 		return (IBT_HCA_HDL_INVALID);
2871 
2872 	state = (hermon_state_t *)hca;
2873 
2874 	if (mi_hdl == NULL)
2875 		return (IBT_MI_HDL_INVALID);
2876 
2877 	for (i = 0; i < mi_hdl->imh_len; i++) {
2878 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2879 		if (status != DDI_SUCCESS)
2880 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2881 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2882 	}
2883 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2884 	    (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2885 	return (IBT_SUCCESS);
2886 }
2887 
2888 /* Allocate L_Key */
2889 /*
2890  * hermon_ci_alloc_lkey()
2891  */
2892 /* ARGSUSED */
2893 static ibt_status_t
2894 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2895     ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2896     ibt_pmr_desc_t *mem_desc_p)
2897 {
2898 	return (IBT_NOT_SUPPORTED);
2899 }
2900 
2901 /* Physical Register Memory Region */
2902 /*
2903  * hermon_ci_register_physical_mr()
2904  */
2905 /* ARGSUSED */
2906 static ibt_status_t
2907 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2908     ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2909     ibt_pmr_desc_t *mem_desc_p)
2910 {
2911 	return (IBT_NOT_SUPPORTED);
2912 }
2913 
2914 /*
2915  * hermon_ci_reregister_physical_mr()
2916  */
2917 /* ARGSUSED */
2918 static ibt_status_t
2919 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2920     ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2921     ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2922 {
2923 	return (IBT_NOT_SUPPORTED);
2924 }
2925 
2926 /* Mellanox FMR Support */
2927 /*
2928  * hermon_ci_create_fmr_pool()
2929  * Creates a pool of memory regions suitable for FMR registration
2930  *    Context: Can be called from base context only
2931  */
2932 static ibt_status_t
2933 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2934     ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2935 {
2936 	hermon_state_t	*state;
2937 	hermon_pdhdl_t	pdhdl;
2938 	hermon_fmrhdl_t	fmrpoolhdl;
2939 	int		status;
2940 
2941 	/* Check for valid HCA handle */
2942 	if (hca == NULL) {
2943 		return (IBT_HCA_HDL_INVALID);
2944 	}
2945 
2946 	state = (hermon_state_t *)hca;
2947 
2948 	/* Check for valid PD handle pointer */
2949 	if (pd == NULL) {
2950 		return (IBT_PD_HDL_INVALID);
2951 	}
2952 
2953 	pdhdl = (hermon_pdhdl_t)pd;
2954 
2955 	/*
2956 	 * Validate the access flags.  Both Remote Write and Remote Atomic
2957 	 * require the Local Write flag to be set
2958 	 */
2959 	if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2960 	    (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2961 	    !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2962 		return (IBT_MR_ACCESS_REQ_INVALID);
2963 	}
2964 
2965 	status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2966 	if (status != DDI_SUCCESS) {
2967 		return (status);
2968 	}
2969 
2970 	/* Set fmr_pool from hermon handle */
2971 	*fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2972 
2973 	return (IBT_SUCCESS);
2974 }
2975 
2976 /*
2977  * hermon_ci_destroy_fmr_pool()
2978  * Free all resources associated with an FMR pool.
2979  *    Context: Can be called from base context only.
2980  */
2981 static ibt_status_t
2982 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2983 {
2984 	hermon_state_t	*state;
2985 	hermon_fmrhdl_t	fmrpoolhdl;
2986 	int		status;
2987 
2988 	/* Check for valid HCA handle */
2989 	if (hca == NULL) {
2990 		return (IBT_HCA_HDL_INVALID);
2991 	}
2992 
2993 	state = (hermon_state_t *)hca;
2994 
2995 	/* Check for valid FMR Pool handle */
2996 	if (fmr_pool == NULL) {
2997 		return (IBT_FMR_POOL_HDL_INVALID);
2998 	}
2999 
3000 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
3001 
3002 	status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
3003 	return (status);
3004 }
3005 
3006 /*
3007  * hermon_ci_flush_fmr_pool()
3008  * Force a flush of the memory tables, cleaning up used FMR resources.
3009  *    Context: Can be called from interrupt or base context.
3010  */
3011 static ibt_status_t
3012 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3013 {
3014 	hermon_state_t	*state;
3015 	hermon_fmrhdl_t	fmrpoolhdl;
3016 	int		status;
3017 
3018 	/* Check for valid HCA handle */
3019 	if (hca == NULL) {
3020 		return (IBT_HCA_HDL_INVALID);
3021 	}
3022 
3023 	state = (hermon_state_t *)hca;
3024 
3025 	/* Check for valid FMR Pool handle */
3026 	if (fmr_pool == NULL) {
3027 		return (IBT_FMR_POOL_HDL_INVALID);
3028 	}
3029 
3030 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
3031 
3032 	status = hermon_flush_fmr_pool(state, fmrpoolhdl);
3033 	return (status);
3034 }
3035 
3036 /*
3037  * hermon_ci_register_physical_fmr()
3038  * From the 'pool' of FMR regions passed in, performs register physical
3039  * operation.
3040  *    Context: Can be called from interrupt or base context.
3041  */
3042 /* ARGSUSED */
3043 static ibt_status_t
3044 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
3045     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
3046     void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
3047 {
3048 	hermon_state_t		*state;
3049 	hermon_mrhdl_t		mrhdl;
3050 	hermon_fmrhdl_t		fmrpoolhdl;
3051 	int			status;
3052 
3053 	ASSERT(mem_pattr != NULL);
3054 	ASSERT(mr_p != NULL);
3055 	ASSERT(mem_desc_p != NULL);
3056 
3057 	/* Check for valid HCA handle */
3058 	if (hca == NULL) {
3059 		return (IBT_HCA_HDL_INVALID);
3060 	}
3061 
3062 	/* Grab the Hermon softstate pointer */
3063 	state = (hermon_state_t *)hca;
3064 
3065 	/* Check for valid FMR Pool handle */
3066 	if (fmr_pool == NULL) {
3067 		return (IBT_FMR_POOL_HDL_INVALID);
3068 	}
3069 
3070 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
3071 
3072 	status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
3073 	    &mrhdl, mem_desc_p);
3074 	if (status != DDI_SUCCESS) {
3075 		return (status);
3076 	}
3077 
3078 	/*
3079 	 * If region is mapped for streaming (i.e. noncoherent), then set
3080 	 * sync is required
3081 	 */
3082 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
3083 	mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
3084 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
3085 	if (mem_desc_p->pmd_sync_required == B_TRUE) {
3086 		/* Fill in DMA handle for future sync operations */
3087 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
3088 		mrhdl->mr_bindinfo.bi_dmahdl =
3089 		    (ddi_dma_handle_t)mem_pattr->pmr_ma;
3090 	}
3091 
3092 	/* Return the Hermon MR handle */
3093 	*mr_p = (ibc_mr_hdl_t)mrhdl;
3094 
3095 	return (IBT_SUCCESS);
3096 }
3097 
3098 /*
3099  * hermon_ci_deregister_fmr()
3100  * Moves an FMR (specified by 'mr') to the deregistered state.
3101  *    Context: Can be called from base context only.
3102  */
3103 static ibt_status_t
3104 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
3105 {
3106 	hermon_state_t		*state;
3107 	hermon_mrhdl_t		mrhdl;
3108 	int			status;
3109 
3110 	/* Check for valid HCA handle */
3111 	if (hca == NULL) {
3112 		return (IBT_HCA_HDL_INVALID);
3113 	}
3114 
3115 	/* Check for valid memory region handle */
3116 	if (mr == NULL) {
3117 		return (IBT_MR_HDL_INVALID);
3118 	}
3119 
3120 	/* Grab the Hermon softstate pointer */
3121 	state = (hermon_state_t *)hca;
3122 	mrhdl = (hermon_mrhdl_t)mr;
3123 
3124 	/*
3125 	 * Deregister the memory region, either "unmap" the FMR or deregister
3126 	 * the normal memory region.
3127 	 */
3128 	status = hermon_deregister_fmr(state, mrhdl);
3129 	return (status);
3130 }
3131 
3132 static int
3133 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
3134     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
3135 {
3136 	ddi_dma_handle_t	dma_hdl;
3137 	ddi_dma_attr_t		dma_attr;
3138 	ddi_acc_handle_t	acc_hdl;
3139 	size_t			real_len;
3140 	int			status;
3141 	int			(*ddi_cb)(caddr_t);
3142 	ibc_mem_alloc_hdl_t	mem_alloc_hdl;
3143 
3144 	hermon_dma_attr_init(state, &dma_attr);
3145 
3146 	ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3147 
3148 	/* Allocate a DMA handle */
3149 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
3150 	    NULL, &dma_hdl);
3151 	if (status != DDI_SUCCESS) {
3152 		return (DDI_FAILURE);
3153 	}
3154 
3155 	/* Allocate DMA memory */
3156 	status = ddi_dma_mem_alloc(dma_hdl, size,
3157 	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3158 	    NULL, kaddrp, &real_len, &acc_hdl);
3159 	if (status != DDI_SUCCESS) {
3160 		ddi_dma_free_handle(&dma_hdl);
3161 		return (DDI_FAILURE);
3162 	}
3163 
3164 	/* Package the hermon_dma_info contents and return */
3165 	mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
3166 	    (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
3167 	if (mem_alloc_hdl == NULL) {
3168 		ddi_dma_mem_free(&acc_hdl);
3169 		ddi_dma_free_handle(&dma_hdl);
3170 		return (DDI_FAILURE);
3171 	}
3172 	mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
3173 	mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
3174 
3175 	*mem_hdl = mem_alloc_hdl;
3176 
3177 	return (DDI_SUCCESS);
3178 }
3179 
3180 /*
3181  * hermon_ci_alloc_io_mem()
3182  *	Allocate dma-able memory
3183  *
3184  */
3185 static ibt_status_t
3186 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
3187     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
3188 {
3189 	hermon_state_t	*state;
3190 	int		status;
3191 
3192 	/* Check for valid HCA handle */
3193 	if (hca == NULL) {
3194 		return (IBT_HCA_HDL_INVALID);
3195 	}
3196 
3197 	/* Check for valid mem_alloc_hdl_p handle pointer */
3198 	if (mem_alloc_hdl_p == NULL) {
3199 		return (IBT_MEM_ALLOC_HDL_INVALID);
3200 	}
3201 
3202 	/* Grab the Hermon softstate pointer and mem handle */
3203 	state = (hermon_state_t *)hca;
3204 
3205 	/* Allocate the memory and handles */
3206 	status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
3207 	    mem_alloc_hdl_p);
3208 
3209 	if (status != DDI_SUCCESS) {
3210 		*mem_alloc_hdl_p = NULL;
3211 		*kaddrp = NULL;
3212 		return (status);
3213 	}
3214 
3215 	return (IBT_SUCCESS);
3216 }
3217 
3218 
3219 /*
3220  * hermon_ci_free_io_mem()
3221  * Unbind handl and free the memory
3222  */
3223 static ibt_status_t
3224 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
3225 {
3226 	/* Check for valid HCA handle */
3227 	if (hca == NULL) {
3228 		return (IBT_HCA_HDL_INVALID);
3229 	}
3230 
3231 	/* Check for valid mem_alloc_hdl handle pointer */
3232 	if (mem_alloc_hdl == NULL) {
3233 		return (IBT_MEM_ALLOC_HDL_INVALID);
3234 	}
3235 
3236 	/* Unbind the handles and free the memory */
3237 	(void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
3238 	ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
3239 	ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
3240 	kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
3241 
3242 	return (IBT_SUCCESS);
3243 }
3244