xref: /illumos-gate/usr/src/uts/common/io/ib/clients/rdsv3/ib_rdma.c (revision 6e18d381c642549b8bb1774a803d3510aec6baaf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Copyright (c) 2006 Oracle.  All rights reserved.
27  *
28  * This software is available to you under a choice of one of two
29  * licenses.  You may choose to be licensed under the terms of the GNU
30  * General Public License (GPL) Version 2, available from the file
31  * COPYING in the main directory of this source tree, or the
32  * OpenIB.org BSD license below:
33  *
34  *     Redistribution and use in source and binary forms, with or
35  *     without modification, are permitted provided that the following
36  *     conditions are met:
37  *
38  *      - Redistributions of source code must retain the above
39  *        copyright notice, this list of conditions and the following
40  *        disclaimer.
41  *
42  *      - Redistributions in binary form must reproduce the above
43  *        copyright notice, this list of conditions and the following
44  *        disclaimer in the documentation and/or other materials
45  *        provided with the distribution.
46  *
47  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
50  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
51  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
52  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
53  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
54  * SOFTWARE.
55  *
56  */
57 #include <sys/rds.h>
58 #include <netinet/in.h>
59 
60 #include <sys/ib/clients/rdsv3/rdsv3.h>
61 #include <sys/ib/clients/rdsv3/rdma.h>
62 #include <sys/ib/clients/rdsv3/ib.h>
63 #include <sys/ib/clients/rdsv3/rdsv3_debug.h>
64 
65 /*
66  * This is stored as mr->r_trans_private.
67  */
68 struct rdsv3_ib_mr {
69 	struct rdsv3_ib_device	*device;
70 	struct rdsv3_ib_mr_pool	*pool;
71 	struct ib_fmr		*fmr;
72 	struct list		list;
73 	unsigned int		remap_count;
74 
75 	struct rdsv3_scatterlist	*sg;
76 	unsigned int		sg_len;
77 	uint64_t		*dma;
78 	int			sg_dma_len;
79 
80 	/* DDI pinned memory */
81 	ddi_umem_cookie_t	umem_cookie;
82 	/* IBTF type definitions */
83 	ibt_fmr_pool_hdl_t	fmr_pool_hdl;
84 	ibt_ma_hdl_t		rc_ma_hdl;
85 	ibt_mr_hdl_t		rc_fmr_hdl;
86 	ibt_pmr_desc_t		rc_mem_desc;
87 };
88 
89 /*
90  * Our own little FMR pool
91  */
92 struct rdsv3_ib_mr_pool {
93 	struct mutex		flush_lock;	/* serialize fmr invalidate */
94 	struct rdsv3_work_s	flush_worker;	/* flush worker */
95 
96 	kmutex_t		list_lock;	/* protect variables below */
97 	atomic_t		item_count;	/* total # of MRs */
98 	atomic_t		dirty_count;	/* # dirty of MRs */
99 	/* MRs that have reached their max_maps limit */
100 	struct list		drop_list;
101 	struct list		free_list;	/* unused MRs */
102 	struct list		clean_list;	/* unused & unamapped MRs */
103 	atomic_t		free_pinned;	/* memory pinned by free MRs */
104 	unsigned long		max_items;
105 	unsigned long		max_items_soft;
106 	unsigned long		max_free_pinned;
107 };
108 
109 static int rdsv3_ib_flush_mr_pool(struct rdsv3_ib_device *rds_ibdev,
110 	ibt_fmr_pool_hdl_t pool_hdl, int free_all);
111 static void rdsv3_ib_teardown_mr(struct rdsv3_ib_mr *ibmr);
112 static void rdsv3_ib_mr_pool_flush_worker(struct rdsv3_work_s *work);
113 static struct rdsv3_ib_mr *rdsv3_ib_alloc_fmr(struct rdsv3_ib_device
114 	*rds_ibdev);
115 static int rdsv3_ib_map_fmr(struct rdsv3_ib_device *rds_ibdev,
116 	struct rdsv3_ib_mr *ibmr, struct buf *bp, unsigned int nents);
117 
118 static struct rdsv3_ib_device *
119 rdsv3_ib_get_device(uint32_be_t ipaddr)
120 {
121 	struct rdsv3_ib_device *rds_ibdev;
122 	struct rdsv3_ib_ipaddr *i_ipaddr;
123 
124 	RDSV3_DPRINTF4("rdsv3_ib_get_device", "Enter: ipaddr: 0x%x", ipaddr);
125 
126 	RDSV3_FOR_EACH_LIST_NODE(rds_ibdev, &rdsv3_ib_devices, list) {
127 		mutex_enter(&rds_ibdev->spinlock);
128 		RDSV3_FOR_EACH_LIST_NODE(i_ipaddr, &rds_ibdev->ipaddr_list,
129 		    list) {
130 			if (i_ipaddr->ipaddr == ipaddr) {
131 				mutex_exit(&rds_ibdev->spinlock);
132 				return (rds_ibdev);
133 			}
134 		}
135 		mutex_exit(&rds_ibdev->spinlock);
136 	}
137 
138 	RDSV3_DPRINTF4("rdsv3_ib_get_device", "Return: ipaddr: 0x%x", ipaddr);
139 
140 	return (NULL);
141 }
142 
143 static int
144 rdsv3_ib_add_ipaddr(struct rdsv3_ib_device *rds_ibdev, uint32_be_t ipaddr)
145 {
146 	struct rdsv3_ib_ipaddr *i_ipaddr;
147 
148 	RDSV3_DPRINTF4("rdsv3_ib_add_ipaddr", "rds_ibdev: %p ipaddr: %x",
149 	    rds_ibdev, ipaddr);
150 
151 	i_ipaddr = kmem_alloc(sizeof (*i_ipaddr), KM_NOSLEEP);
152 	if (!i_ipaddr)
153 		return (-ENOMEM);
154 
155 	i_ipaddr->ipaddr = ipaddr;
156 
157 	mutex_enter(&rds_ibdev->spinlock);
158 	list_insert_tail(&rds_ibdev->ipaddr_list, i_ipaddr);
159 	mutex_exit(&rds_ibdev->spinlock);
160 
161 	return (0);
162 }
163 
164 static void
165 rdsv3_ib_remove_ipaddr(struct rdsv3_ib_device *rds_ibdev, uint32_be_t ipaddr)
166 {
167 	struct rdsv3_ib_ipaddr *i_ipaddr, *next;
168 
169 	RDSV3_DPRINTF4("rdsv3_ib_remove_ipaddr", "rds_ibdev: %p, ipaddr: %x",
170 	    rds_ibdev, ipaddr);
171 
172 	mutex_enter(&rds_ibdev->spinlock);
173 	RDSV3_FOR_EACH_LIST_NODE_SAFE(i_ipaddr, next, &rds_ibdev->ipaddr_list,
174 	    list) {
175 		if (i_ipaddr->ipaddr == ipaddr) {
176 			list_remove_node(&i_ipaddr->list);
177 			kmem_free(i_ipaddr, sizeof (*i_ipaddr));
178 			break;
179 		}
180 	}
181 	mutex_exit(&rds_ibdev->spinlock);
182 
183 	RDSV3_DPRINTF4("rdsv3_ib_remove_ipaddr",
184 	    "Return: rds_ibdev: %p, ipaddr: %x", rds_ibdev, ipaddr);
185 }
186 
187 int
188 rdsv3_ib_update_ipaddr(struct rdsv3_ib_device *rds_ibdev, uint32_be_t ipaddr)
189 {
190 	struct rdsv3_ib_device *rds_ibdev_old;
191 
192 	RDSV3_DPRINTF4("rdsv3_ib_update_ipaddr", "rds_ibdev: %p, ipaddr: %x",
193 	    rds_ibdev, ipaddr);
194 
195 	rds_ibdev_old = rdsv3_ib_get_device(ipaddr);
196 	if (rds_ibdev_old)
197 		rdsv3_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
198 
199 	return (rdsv3_ib_add_ipaddr(rds_ibdev, ipaddr));
200 }
201 
202 void
203 rdsv3_ib_add_conn(struct rdsv3_ib_device *rds_ibdev,
204     struct rdsv3_connection *conn)
205 {
206 	struct rdsv3_ib_connection *ic = conn->c_transport_data;
207 
208 	RDSV3_DPRINTF4("rdsv3_ib_add_conn", "rds_ibdev: %p, conn: %p",
209 	    rds_ibdev, conn);
210 
211 	/* conn was previously on the nodev_conns_list */
212 	mutex_enter(&ib_nodev_conns_lock);
213 	ASSERT(!list_is_empty(&ib_nodev_conns));
214 	ASSERT(list_link_active(&ic->ib_node));
215 	list_remove_node(&ic->ib_node);
216 
217 	mutex_enter(&rds_ibdev->spinlock);
218 	list_insert_tail(&rds_ibdev->conn_list, ic);
219 	mutex_exit(&rds_ibdev->spinlock);
220 	mutex_exit(&ib_nodev_conns_lock);
221 
222 	ic->rds_ibdev = rds_ibdev;
223 }
224 
225 void
226 rdsv3_ib_remove_conn(struct rdsv3_ib_device *rds_ibdev,
227     struct rdsv3_connection *conn)
228 {
229 	struct rdsv3_ib_connection *ic = conn->c_transport_data;
230 
231 	RDSV3_DPRINTF4("rdsv3_ib_remove_conn", "rds_ibdev: %p, conn: %p",
232 	    rds_ibdev, conn);
233 
234 	/* place conn on nodev_conns_list */
235 	mutex_enter(&ib_nodev_conns_lock);
236 
237 	mutex_enter(&rds_ibdev->spinlock);
238 	ASSERT(list_link_active(&ic->ib_node));
239 	list_remove_node(&ic->ib_node);
240 	mutex_exit(&rds_ibdev->spinlock);
241 
242 	list_insert_tail(&ib_nodev_conns, ic);
243 
244 	mutex_exit(&ib_nodev_conns_lock);
245 
246 	ic->rds_ibdev = NULL;
247 
248 	RDSV3_DPRINTF4("rdsv3_ib_remove_conn",
249 	    "Return: rds_ibdev: %p, conn: %p", rds_ibdev, conn);
250 }
251 
252 void
253 __rdsv3_ib_destroy_conns(struct list *list, kmutex_t *list_lock)
254 {
255 	struct rdsv3_ib_connection *ic, *_ic;
256 	list_t tmp_list;
257 
258 	RDSV3_DPRINTF4("__rdsv3_ib_destroy_conns", "Enter: list: %p", list);
259 
260 	/* avoid calling conn_destroy with irqs off */
261 	mutex_enter(list_lock);
262 	list_splice(list, &tmp_list);
263 	mutex_exit(list_lock);
264 
265 	RDSV3_FOR_EACH_LIST_NODE_SAFE(ic, _ic, &tmp_list, ib_node) {
266 		rdsv3_conn_destroy(ic->conn);
267 	}
268 
269 	RDSV3_DPRINTF4("__rdsv3_ib_destroy_conns", "Return: list: %p", list);
270 }
271 
272 void
273 rdsv3_ib_destroy_mr_pool(struct rdsv3_ib_device *rds_ibdev)
274 {
275 	RDSV3_DPRINTF4("rdsv3_ib_destroy_mr_pool", "Enter: ibdev: %p",
276 	    rds_ibdev);
277 
278 	if (rds_ibdev->fmr_pool_hdl == NULL)
279 		return;
280 
281 	(void) rdsv3_ib_flush_mr_pool(rds_ibdev, rds_ibdev->fmr_pool_hdl, 1);
282 	(void) ibt_destroy_fmr_pool(ib_get_ibt_hca_hdl(rds_ibdev->dev),
283 	    rds_ibdev->fmr_pool_hdl);
284 }
285 
286 #define	IB_FMR_MAX_BUF_SIZE	0x1000000	/* 16MB max buf */
287 int
288 rdsv3_ib_create_mr_pool(struct rdsv3_ib_device *rds_ibdev)
289 {
290 	uint_t h_page_sz;
291 	ibt_fmr_pool_attr_t fmr_attr;
292 	ibt_status_t ibt_status;
293 	ibt_hca_hdl_t hca_hdl;
294 
295 	RDSV3_DPRINTF4("rdsv3_ib_create_mr_pool",
296 	    "Enter: ibdev: %p", rds_ibdev);
297 
298 	hca_hdl = ib_get_ibt_hca_hdl(rds_ibdev->dev);
299 	/* get hca attributes */
300 	ibt_status = ibt_query_hca(hca_hdl, &rds_ibdev->hca_attr);
301 	if (ibt_status != IBT_SUCCESS) {
302 		return (-ENOMEM);
303 	}
304 
305 	/* setup FMR pool attributes */
306 	h_page_sz = rds_ibdev->hca_attr.hca_page_sz * 1024;
307 
308 	fmr_attr.fmr_max_pages_per_fmr = (IB_FMR_MAX_BUF_SIZE / h_page_sz) + 2;
309 	fmr_attr.fmr_pool_size = RDSV3_FMR_POOL_SIZE;
310 	fmr_attr.fmr_dirty_watermark = 128;
311 	fmr_attr.fmr_cache = B_FALSE;
312 	fmr_attr.fmr_flags = IBT_MR_NOSLEEP  | IBT_MR_ENABLE_LOCAL_WRITE |
313 	    IBT_MR_ENABLE_REMOTE_WRITE | IBT_MR_ENABLE_REMOTE_READ;
314 	fmr_attr.fmr_page_sz = h_page_sz;
315 	fmr_attr.fmr_func_hdlr = NULL;
316 	fmr_attr.fmr_func_arg = (void *) NULL;
317 
318 	/* create the FMR pool */
319 	ibt_status = ibt_create_fmr_pool(hca_hdl, rds_ibdev->pd->ibt_pd,
320 	    &fmr_attr, &rds_ibdev->fmr_pool_hdl);
321 	if (ibt_status != IBT_SUCCESS) {
322 		return (-ENOMEM);
323 	}
324 	rds_ibdev->max_fmrs = fmr_attr.fmr_pool_size;
325 	rds_ibdev->fmr_message_size = fmr_attr.fmr_max_pages_per_fmr;
326 	return (0);
327 }
328 
329 void
330 rdsv3_ib_get_mr_info(struct rdsv3_ib_device *rds_ibdev,
331 	struct rdsv3_info_rdma_connection *iinfo)
332 {
333 	iinfo->rdma_mr_max = rds_ibdev->max_fmrs;
334 	iinfo->rdma_mr_size = rds_ibdev->fmr_message_size;
335 }
336 
337 static void
338 rdsv3_umem_cb(ddi_umem_cookie_t *umem_cookie)
339 {
340 	/* LINTED E_FUNC_SET_NOT_USED */
341 	ddi_umem_cookie_t *cp = umem_cookie;
342 	RDSV3_DPRINTF5("rdsv3_umem_cb", "Enter: umem_cookie %p", umem_cookie);
343 	/* all umem_cookies are freed at socket fd close */
344 	/* there should be no umem_cookies when clearing the addr space */
345 }
346 
347 struct umem_callback_ops rdsv3_umem_cbops = {
348 	UMEM_CALLBACK_VERSION,
349 	rdsv3_umem_cb,
350 };
351 
352 void *
353 rdsv3_ib_get_mr(struct rdsv3_iovec *args, unsigned long nents,
354 	struct rdsv3_sock *rs, uint32_t *key_ret)
355 {
356 	struct rdsv3_ib_device *rds_ibdev;
357 	struct rdsv3_ib_mr *ibmr = NULL;
358 	ddi_umem_cookie_t umem_cookie;
359 	size_t umem_len;
360 	caddr_t umem_addr;
361 	int umem_flags;
362 	int ret;
363 	struct buf *bp;
364 
365 	RDSV3_DPRINTF4("rdsv3_ib_get_mr", "Enter: args.addr: %p", args->addr);
366 
367 	rds_ibdev = rdsv3_ib_get_device(rs->rs_bound_addr);
368 
369 	if (rds_ibdev == NULL)
370 		return (void *)(PTR_ERR(-EFAULT));
371 
372 	ibmr = rdsv3_ib_alloc_fmr(rds_ibdev);
373 	if (IS_ERR(ibmr))
374 		return (ibmr);
375 
376 	/* pin user memory pages */
377 	umem_len   = ptob(btopr(args->bytes +
378 	    ((uintptr_t)args->addr & PAGEOFFSET)));
379 	umem_addr  = (caddr_t)((uintptr_t)args->addr & ~PAGEOFFSET);
380 	umem_flags = (DDI_UMEMLOCK_WRITE | DDI_UMEMLOCK_READ |
381 	    DDI_UMEMLOCK_LONGTERM);
382 	ret = umem_lockmemory(umem_addr, umem_len, umem_flags,
383 	    &umem_cookie, &rdsv3_umem_cbops, NULL);
384 	if (ret != 0) {
385 		kmem_free((void *) ibmr, sizeof (*ibmr));
386 		ibmr = ERR_PTR(ret);
387 		return (ibmr);
388 	}
389 
390 	/* transpose umem_cookie to buf structure for rdsv3_ib_map_fmr() */
391 	bp = ddi_umem_iosetup(umem_cookie, 0, umem_len,
392 	    B_WRITE, 0, 0, NULL, DDI_UMEM_SLEEP);
393 
394 	ret = rdsv3_ib_map_fmr(rds_ibdev, ibmr, bp, nents);
395 	freerbuf(bp);	/* free bp */
396 	if (ret == 0) {
397 		ibmr->umem_cookie = umem_cookie;
398 		*key_ret = (uint32_t)ibmr->rc_mem_desc.pmd_rkey;
399 		ibmr->device = rds_ibdev;
400 		RDSV3_DPRINTF4("rdsv3_ib_get_mr",
401 		    "Return: ibmr: %p umem_cookie %p", ibmr, ibmr->umem_cookie);
402 		return (ibmr);
403 	} else { /* error return */
404 		RDSV3_DPRINTF2("rdsv3_ib_get_mr", "map_fmr failed (errno=%d)\n",
405 		    ret);
406 		ddi_umem_unlock(umem_cookie);
407 		kmem_free((void *)ibmr, sizeof (*ibmr));
408 		return (ERR_PTR(ret));
409 	}
410 }
411 
412 static struct rdsv3_ib_mr *
413 rdsv3_ib_alloc_fmr(struct rdsv3_ib_device *rds_ibdev)
414 {
415 	struct rdsv3_ib_mr *ibmr;
416 
417 	RDSV3_DPRINTF4("rdsv3_ib_alloc_fmr", "Enter: ibdev: %p", rds_ibdev);
418 
419 	if (rds_ibdev->fmr_pool_hdl) {
420 		ibmr = (struct rdsv3_ib_mr *)kmem_zalloc(sizeof (*ibmr),
421 		    KM_SLEEP);
422 		ibmr->fmr_pool_hdl = rds_ibdev->fmr_pool_hdl;
423 		return (ibmr);
424 	}
425 	return (struct rdsv3_ib_mr *)(PTR_ERR(-ENOMEM));
426 }
427 
428 static int
429 rdsv3_ib_map_fmr(struct rdsv3_ib_device *rds_ibdev, struct rdsv3_ib_mr *ibmr,
430 	struct buf *bp, unsigned int nents)
431 {
432 	ibt_va_attr_t va_attr;
433 	ibt_reg_req_t reg_req;
434 	uint_t paddr_list_len;
435 	uint_t page_sz;
436 	ibt_status_t ibt_status;
437 	/* LINTED E_FUNC_SET_NOT_USED */
438 	unsigned int l_nents = nents;
439 
440 	RDSV3_DPRINTF4("rdsv3_ib_map_fmr", "Enter: ibmr: %p", ibmr);
441 	RDSV3_DPRINTF4("rdsv3_ib_map_fmr", "buf addr: %p", bp->b_un.b_addr);
442 
443 	/* setup ibt_map_mem_area attributes */
444 	bzero(&va_attr, sizeof (ibt_va_attr_t));
445 	va_attr.va_buf   = bp;
446 	va_attr.va_flags = IBT_VA_FMR | IBT_VA_BUF;
447 
448 	page_sz = rds_ibdev->hca_attr.hca_page_sz * 1024; /* in kbytes */
449 	paddr_list_len = (bp->b_bcount / page_sz) + 2; /* start + end pg */
450 
451 	/* map user buffer to HCA address */
452 	ibt_status = ibt_map_mem_area(ib_get_ibt_hca_hdl(rds_ibdev->dev),
453 	    &va_attr, paddr_list_len, &reg_req, &ibmr->rc_ma_hdl);
454 	if (ibt_status != IBT_SUCCESS) {
455 		return (-ENOMEM);
456 	}
457 
458 	/*  use a free entry from FMR pool to register the specified memory */
459 	ibt_status = ibt_register_physical_fmr(
460 	    ib_get_ibt_hca_hdl(rds_ibdev->dev), ibmr->fmr_pool_hdl,
461 	    &reg_req.fn_arg, &ibmr->rc_fmr_hdl, &ibmr->rc_mem_desc);
462 	if (ibt_status != IBT_SUCCESS) {
463 		(void) ibt_unmap_mem_area(ib_get_ibt_hca_hdl(rds_ibdev->dev),
464 		    ibmr->rc_ma_hdl);
465 		if (ibt_status == IBT_INSUFF_RESOURCE) {
466 			return (-ENOBUFS);
467 		}
468 		return (-EINVAL);
469 	}
470 	RDSV3_DPRINTF4("rdsv3_ib_map_fmr", "Return: ibmr: %p rkey: 0x%x",
471 	    ibmr, (uint32_t)ibmr->rc_mem_desc.pmd_rkey);
472 	return (0);
473 }
474 
475 void
476 rdsv3_ib_sync_mr(void *trans_private, int direction)
477 {
478 	/* LINTED E_FUNC_SET_NOT_USED */
479 	void *l_trans_private = trans_private;
480 	/* LINTED E_FUNC_SET_NOT_USED */
481 	int l_direction = direction;
482 
483 	/* FMR Sync not needed in Solaris on PCI-ex systems */
484 
485 	RDSV3_DPRINTF4("rdsv3_ib_sync_mr", "Enter:");
486 }
487 
488 void
489 rdsv3_ib_flush_mrs(void)
490 {
491 	struct rdsv3_ib_device *rds_ibdev;
492 
493 	RDSV3_DPRINTF4("rdsv3_ib_flush_mrs", "Enter:");
494 
495 	RDSV3_FOR_EACH_LIST_NODE(rds_ibdev, &rdsv3_ib_devices, list) {
496 		if (rds_ibdev->fmr_pool_hdl) {
497 			(void) rdsv3_ib_flush_mr_pool(rds_ibdev,
498 			    rds_ibdev->fmr_pool_hdl, 0);
499 		}
500 	}
501 }
502 
503 static void
504 __rdsv3_ib_teardown_mr(struct rdsv3_ib_mr *ibmr)
505 {
506 	RDSV3_DPRINTF4("__rdsv3_ib_teardown_mr",
507 	    "Enter: ibmr: %p umem_cookie %p", ibmr, ibmr->umem_cookie);
508 
509 	/* unpin memory pages */
510 	(void) ddi_umem_unlock(ibmr->umem_cookie);
511 }
512 
513 void
514 rdsv3_ib_free_mr(void *trans_private, int invalidate)
515 {
516 	struct rdsv3_ib_mr *ibmr = trans_private;
517 	struct rdsv3_ib_device *rds_ibdev = ibmr->device;
518 
519 	RDSV3_DPRINTF4("rdsv3_ib_free_mr", "Enter: ibmr: %p inv: %d",
520 	    ibmr, invalidate);
521 
522 	/* return the fmr to the IBTF pool */
523 	/* the final punch will come from the ibt_flush_fmr_pool() */
524 	(void) ibt_deregister_fmr(ib_get_ibt_hca_hdl(rds_ibdev->dev),
525 	    ibmr->rc_fmr_hdl);
526 	(void) ibt_unmap_mem_area(ib_get_ibt_hca_hdl(rds_ibdev->dev),
527 	    ibmr->rc_ma_hdl);
528 	__rdsv3_ib_teardown_mr(ibmr);
529 	if (invalidate) {
530 		rds_ibdev = ibmr->device;
531 		(void) rdsv3_ib_flush_mr_pool(rds_ibdev,
532 		    rds_ibdev->fmr_pool_hdl, 0);
533 	}
534 	kmem_free((void *) ibmr, sizeof (*ibmr));
535 }
536 
537 static int
538 rdsv3_ib_flush_mr_pool(struct rdsv3_ib_device *rds_ibdev,
539     ibt_fmr_pool_hdl_t pool_hdl, int free_all)
540 {
541 	/* LINTED E_FUNC_SET_NOT_USED */
542 	int l_free_all = free_all;
543 
544 	RDSV3_DPRINTF4("rdsv3_ib_flush_mr_pool", "Enter: pool: %p", pool_hdl);
545 
546 	rdsv3_ib_stats_inc(s_ib_rdma_mr_pool_flush);
547 
548 	(void) ibt_flush_fmr_pool(ib_get_ibt_hca_hdl(rds_ibdev->dev),
549 	    pool_hdl);
550 	return (0);
551 }
552