1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * hermon_umap.c
29  *    Hermon Userland Mapping Routines
30  *
31  *    Implements all the routines necessary for enabling direct userland
32  *    access to the Hermon hardware.  This includes all routines necessary for
33  *    maintaining the "userland resources database" and all the support routines
34  *    for the devmap calls.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/modctl.h>
42 #include <sys/file.h>
43 #include <sys/avl.h>
44 #include <sys/sysmacros.h>
45 
46 #include <sys/ib/adapters/hermon/hermon.h>
47 
48 /* Hermon HCA state pointer (extern) */
49 extern void *hermon_statep;
50 
51 /* Hermon HCA Userland Resource Database (extern) */
52 extern hermon_umap_db_t hermon_userland_rsrc_db;
53 
54 static int hermon_umap_uarpg(hermon_state_t *state, devmap_cookie_t dhp,
55     hermon_rsrc_t *rsrcp, uint64_t offset, size_t *maplen, int *err);
56 static int hermon_umap_cqmem(hermon_state_t *state, devmap_cookie_t dhp,
57     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
58 static int hermon_umap_qpmem(hermon_state_t *state, devmap_cookie_t dhp,
59     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
60 static int hermon_umap_srqmem(hermon_state_t *state, devmap_cookie_t dhp,
61     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
62 static int hermon_umap_dbrecmem(hermon_state_t *state, devmap_cookie_t dhp,
63     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err);
64 static int hermon_devmap_umem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
65     offset_t off, size_t len, void **pvtp);
66 static int hermon_devmap_umem_dup(devmap_cookie_t dhp, void *pvtp,
67     devmap_cookie_t new_dhp, void **new_pvtp);
68 static void hermon_devmap_umem_unmap(devmap_cookie_t dhp, void *pvtp,
69     offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
70     devmap_cookie_t new_dhp2, void **pvtp2);
71 static int hermon_devmap_dbrecmem_map(devmap_cookie_t dhp, dev_t dev,
72     uint_t flags, offset_t off, size_t len, void **pvtp);
73 static int hermon_devmap_dbrecmem_dup(devmap_cookie_t dhp, void *pvtp,
74     devmap_cookie_t new_dhp, void **new_pvtp);
75 static void hermon_devmap_dbrecmem_unmap(devmap_cookie_t dhp, void *pvtp,
76     offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
77     devmap_cookie_t new_dhp2, void **pvtp2);
78 static int hermon_devmap_devmem_map(devmap_cookie_t dhp, dev_t dev,
79     uint_t flags, offset_t off, size_t len, void **pvtp);
80 static int hermon_devmap_devmem_dup(devmap_cookie_t dhp, void *pvtp,
81     devmap_cookie_t new_dhp, void **new_pvtp);
82 static void hermon_devmap_devmem_unmap(devmap_cookie_t dhp, void *pvtp,
83     offset_t off, size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
84     devmap_cookie_t new_dhp2, void **pvtp2);
85 static ibt_status_t hermon_umap_mr_data_in(hermon_mrhdl_t mr,
86     ibt_mr_data_in_t *data, size_t data_sz);
87 static ibt_status_t hermon_umap_cq_data_out(hermon_cqhdl_t cq,
88     mlnx_umap_cq_data_out_t *data, size_t data_sz);
89 static ibt_status_t hermon_umap_qp_data_out(hermon_qphdl_t qp,
90     mlnx_umap_qp_data_out_t *data, size_t data_sz);
91 static ibt_status_t hermon_umap_srq_data_out(hermon_srqhdl_t srq,
92     mlnx_umap_srq_data_out_t *data, size_t data_sz);
93 static ibt_status_t hermon_umap_pd_data_out(hermon_pdhdl_t pd,
94     mlnx_umap_pd_data_out_t *data, size_t data_sz);
95 static int hermon_umap_db_compare(const void *query, const void *entry);
96 
97 
98 /*
99  * These callbacks are passed to devmap_umem_setup() and devmap_devmem_setup(),
100  * respectively.  They are used to handle (among other things) partial
101  * unmappings and to provide a method for invalidating mappings inherited
102  * as a result of a fork(2) system call.
103  */
104 static struct devmap_callback_ctl hermon_devmap_umem_cbops = {
105 	DEVMAP_OPS_REV,
106 	hermon_devmap_umem_map,
107 	NULL,
108 	hermon_devmap_umem_dup,
109 	hermon_devmap_umem_unmap
110 };
111 static struct devmap_callback_ctl hermon_devmap_devmem_cbops = {
112 	DEVMAP_OPS_REV,
113 	hermon_devmap_devmem_map,
114 	NULL,
115 	hermon_devmap_devmem_dup,
116 	hermon_devmap_devmem_unmap
117 };
118 static struct devmap_callback_ctl hermon_devmap_dbrecmem_cbops = {
119 	DEVMAP_OPS_REV,
120 	hermon_devmap_dbrecmem_map,
121 	NULL,
122 	hermon_devmap_dbrecmem_dup,
123 	hermon_devmap_dbrecmem_unmap
124 };
125 
126 /*
127  * hermon_devmap()
128  *    Context: Can be called from user context.
129  */
130 /* ARGSUSED */
131 int
132 hermon_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
133     size_t *maplen, uint_t model)
134 {
135 	hermon_state_t	*state;
136 	hermon_rsrc_t 	*rsrcp;
137 	minor_t		instance;
138 	uint64_t	key, value;
139 	uint64_t	bf_offset = 0;
140 	uint_t		type;
141 	int		err, status;
142 
143 	/* Get Hermon softstate structure from instance */
144 	instance = HERMON_DEV_INSTANCE(dev);
145 	state = ddi_get_soft_state(hermon_statep, instance);
146 	if (state == NULL) {
147 		return (ENXIO);
148 	}
149 
150 	/*
151 	 * Access to Hermon devmap interface is not allowed in
152 	 * "maintenance mode".
153 	 */
154 	if (state->hs_operational_mode == HERMON_MAINTENANCE_MODE) {
155 		return (EFAULT);
156 	}
157 
158 	/*
159 	 * The bottom bits of "offset" are undefined (number depends on
160 	 * system PAGESIZE).  Shifting these off leaves us with a "key".
161 	 * The "key" is actually a combination of both a real key value
162 	 * (for the purpose of database lookup) and a "type" value.  We
163 	 * extract this information before doing the database lookup.
164 	 */
165 	key  = off >> PAGESHIFT;
166 	type = key & MLNX_UMAP_RSRC_TYPE_MASK;
167 	key  = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
168 	if (type == MLNX_UMAP_BLUEFLAMEPG_RSRC) {
169 		if (state->hs_devlim.blu_flm == 0) {
170 			return (EFAULT);
171 		}
172 		bf_offset = state->hs_bf_offset;
173 		type = MLNX_UMAP_UARPG_RSRC;
174 	}
175 	status = hermon_umap_db_find(instance, key, type, &value, 0, NULL);
176 	if (status == DDI_SUCCESS) {
177 		rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
178 
179 		switch (type) {
180 		case MLNX_UMAP_UARPG_RSRC:
181 			/*
182 			 * Double check that process who open()'d Hermon is
183 			 * same process attempting to mmap() UAR page.
184 			 */
185 			if (key != ddi_get_pid()) {
186 				return (EINVAL);
187 			}
188 
189 			/* Map the UAR page out for userland access */
190 			status = hermon_umap_uarpg(state, dhp, rsrcp, bf_offset,
191 			    maplen, &err);
192 			if (status != DDI_SUCCESS) {
193 				return (err);
194 			}
195 			break;
196 
197 		case MLNX_UMAP_CQMEM_RSRC:
198 			/* Map the CQ memory out for userland access */
199 			status = hermon_umap_cqmem(state, dhp, rsrcp, off,
200 			    maplen, &err);
201 			if (status != DDI_SUCCESS) {
202 				return (err);
203 			}
204 			break;
205 
206 		case MLNX_UMAP_QPMEM_RSRC:
207 			/* Map the QP memory out for userland access */
208 			status = hermon_umap_qpmem(state, dhp, rsrcp, off,
209 			    maplen, &err);
210 			if (status != DDI_SUCCESS) {
211 				return (err);
212 			}
213 			break;
214 
215 		case MLNX_UMAP_SRQMEM_RSRC:
216 			/* Map the SRQ memory out for userland access */
217 			status = hermon_umap_srqmem(state, dhp, rsrcp, off,
218 			    maplen, &err);
219 			if (status != DDI_SUCCESS) {
220 				return (err);
221 			}
222 			break;
223 
224 		case MLNX_UMAP_DBRMEM_RSRC:
225 			/*
226 			 * Map the doorbell record memory out for
227 			 * userland access
228 			 */
229 			status = hermon_umap_dbrecmem(state, dhp, rsrcp, off,
230 			    maplen, &err);
231 			if (status != DDI_SUCCESS) {
232 				return (err);
233 			}
234 			break;
235 
236 		default:
237 			HERMON_WARNING(state, "unexpected rsrc type in devmap");
238 			return (EINVAL);
239 		}
240 	} else {
241 		return (EINVAL);
242 	}
243 
244 	return (0);
245 }
246 
247 
248 /*
249  * hermon_umap_uarpg()
250  *    Context: Can be called from user context.
251  */
252 static int
253 hermon_umap_uarpg(hermon_state_t *state, devmap_cookie_t dhp,
254     hermon_rsrc_t *rsrcp, uint64_t offset, size_t *maplen, int *err)
255 {
256 	int			status;
257 	uint_t			maxprot;
258 	ddi_device_acc_attr_t	*accattrp = &state->hs_reg_accattr;
259 	ddi_device_acc_attr_t	accattr;
260 
261 	if (offset != 0) {	/* Hermon Blueflame */
262 		/* Try to use write coalescing data ordering */
263 		accattr = *accattrp;
264 		accattr.devacc_attr_dataorder = DDI_STORECACHING_OK_ACC;
265 		accattrp = &accattr;
266 	}
267 
268 	/* Map out the UAR page (doorbell page) */
269 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
270 	status = devmap_devmem_setup(dhp, state->hs_dip,
271 	    &hermon_devmap_devmem_cbops, HERMON_UAR_BAR, (rsrcp->hr_indx <<
272 	    PAGESHIFT) + offset, PAGESIZE, maxprot, DEVMAP_ALLOW_REMAP,
273 	    accattrp);
274 	if (status < 0) {
275 		*err = status;
276 		return (DDI_FAILURE);
277 	}
278 
279 	*maplen = PAGESIZE;
280 	return (DDI_SUCCESS);
281 }
282 
283 
284 /*
285  * hermon_umap_cqmem()
286  *    Context: Can be called from user context.
287  */
288 /* ARGSUSED */
289 static int
290 hermon_umap_cqmem(hermon_state_t *state, devmap_cookie_t dhp,
291     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
292 {
293 	hermon_cqhdl_t	cq;
294 	size_t		size;
295 	uint_t		maxprot;
296 	int		status;
297 
298 	/* Extract the Hermon CQ handle pointer from the hermon_rsrc_t */
299 	cq = (hermon_cqhdl_t)rsrcp->hr_addr;
300 
301 	/* Round-up the CQ size to system page size */
302 	size = ptob(btopr(cq->cq_resize_hdl ?
303 	    cq->cq_resize_hdl->cq_cqinfo.qa_size : cq->cq_cqinfo.qa_size));
304 
305 	/* Map out the CQ memory - use resize_hdl if non-NULL */
306 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
307 	status = devmap_umem_setup(dhp, state->hs_dip,
308 	    &hermon_devmap_umem_cbops, cq->cq_resize_hdl ?
309 	    cq->cq_resize_hdl->cq_cqinfo.qa_umemcookie :
310 	    cq->cq_cqinfo.qa_umemcookie, 0, size,
311 	    maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
312 	if (status < 0) {
313 		*err = status;
314 		return (DDI_FAILURE);
315 	}
316 	*maplen = size;
317 
318 	return (DDI_SUCCESS);
319 }
320 
321 
322 /*
323  * hermon_umap_qpmem()
324  *    Context: Can be called from user context.
325  */
326 /* ARGSUSED */
327 static int
328 hermon_umap_qpmem(hermon_state_t *state, devmap_cookie_t dhp,
329     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
330 {
331 	hermon_qphdl_t	qp;
332 	offset_t	offset;
333 	size_t		size;
334 	uint_t		maxprot;
335 	int		status;
336 
337 	/* Extract the Hermon QP handle pointer from the hermon_rsrc_t */
338 	qp = (hermon_qphdl_t)rsrcp->hr_addr;
339 
340 	/*
341 	 * Calculate the offset of the first work queue (send or recv) into
342 	 * the memory (ddi_umem_alloc()) allocated previously for the QP.
343 	 */
344 	offset = (offset_t)((uintptr_t)qp->qp_wqinfo.qa_buf_aligned -
345 	    (uintptr_t)qp->qp_wqinfo.qa_buf_real);
346 
347 	/* Round-up the QP work queue sizes to system page size */
348 	size = ptob(btopr(qp->qp_wqinfo.qa_size));
349 
350 	/* Map out the QP memory */
351 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
352 	status = devmap_umem_setup(dhp, state->hs_dip,
353 	    &hermon_devmap_umem_cbops, qp->qp_wqinfo.qa_umemcookie, offset,
354 	    size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
355 	if (status < 0) {
356 		*err = status;
357 		return (DDI_FAILURE);
358 	}
359 	*maplen = size;
360 
361 	return (DDI_SUCCESS);
362 }
363 
364 
365 /*
366  * hermon_umap_srqmem()
367  *    Context: Can be called from user context.
368  */
369 /* ARGSUSED */
370 static int
371 hermon_umap_srqmem(hermon_state_t *state, devmap_cookie_t dhp,
372     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
373 {
374 	hermon_srqhdl_t	srq;
375 	offset_t	offset;
376 	size_t		size;
377 	uint_t		maxprot;
378 	int		status;
379 
380 	/* Extract the Hermon SRQ handle pointer from the hermon_rsrc_t */
381 	srq = (hermon_srqhdl_t)rsrcp->hr_addr;
382 
383 	/*
384 	 * Calculate the offset of the first shared recv queue into the memory
385 	 * (ddi_umem_alloc()) allocated previously for the SRQ.
386 	 */
387 	offset = (offset_t)((uintptr_t)srq->srq_wqinfo.qa_buf_aligned -
388 	    (uintptr_t)srq->srq_wqinfo.qa_buf_real);
389 
390 	/* Round-up the SRQ work queue sizes to system page size */
391 	size = ptob(btopr(srq->srq_wqinfo.qa_size));
392 
393 	/* Map out the SRQ memory */
394 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
395 	status = devmap_umem_setup(dhp, state->hs_dip,
396 	    &hermon_devmap_umem_cbops, srq->srq_wqinfo.qa_umemcookie, offset,
397 	    size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
398 	if (status < 0) {
399 		*err = status;
400 		return (DDI_FAILURE);
401 	}
402 	*maplen = size;
403 
404 	return (DDI_SUCCESS);
405 }
406 
407 
408 /*
409  * hermon_devmap_dbrecmem()
410  *    Context: Can be called from user context.
411  */
412 /* ARGSUSED */
413 static int
414 hermon_umap_dbrecmem(hermon_state_t *state, devmap_cookie_t dhp,
415     hermon_rsrc_t *rsrcp, offset_t off, size_t *maplen, int *err)
416 {
417 	hermon_udbr_page_t *pagep;
418 	offset_t	offset;
419 	size_t		size;
420 	uint_t		maxprot;
421 	int		status;
422 
423 	/* We stored the udbr_page pointer, and not a hermon_rsrc_t */
424 	pagep = (hermon_udbr_page_t *)rsrcp;
425 
426 	/*
427 	 * Calculate the offset of the doorbell records into the memory
428 	 * (ddi_umem_alloc()) allocated previously for them.
429 	 */
430 	offset = 0;
431 
432 	/* Round-up the doorbell page to system page size */
433 	size = PAGESIZE;
434 
435 	/* Map out the Doorbell Record memory */
436 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
437 	status = devmap_umem_setup(dhp, state->hs_dip,
438 	    &hermon_devmap_dbrecmem_cbops, pagep->upg_umemcookie, offset,
439 	    size, maxprot, (DEVMAP_ALLOW_REMAP | DEVMAP_DEFAULTS), NULL);
440 	if (status < 0) {
441 		*err = status;
442 		return (DDI_FAILURE);
443 	}
444 	*maplen = size;
445 
446 	return (DDI_SUCCESS);
447 }
448 
449 
450 /*
451  * hermon_devmap_umem_map()
452  *    Context: Can be called from kernel context.
453  */
454 /* ARGSUSED */
455 static int
456 hermon_devmap_umem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
457     offset_t off, size_t len, void **pvtp)
458 {
459 	hermon_state_t		*state;
460 	hermon_devmap_track_t	*dvm_track;
461 	hermon_cqhdl_t		cq;
462 	hermon_qphdl_t		qp;
463 	hermon_srqhdl_t		srq;
464 	minor_t			instance;
465 	uint64_t		key;
466 	uint_t			type;
467 
468 	/* Get Hermon softstate structure from instance */
469 	instance = HERMON_DEV_INSTANCE(dev);
470 	state = ddi_get_soft_state(hermon_statep, instance);
471 	if (state == NULL) {
472 		return (ENXIO);
473 	}
474 
475 	/*
476 	 * The bottom bits of "offset" are undefined (number depends on
477 	 * system PAGESIZE).  Shifting these off leaves us with a "key".
478 	 * The "key" is actually a combination of both a real key value
479 	 * (for the purpose of database lookup) and a "type" value.  Although
480 	 * we are not going to do any database lookup per se, we do want
481 	 * to extract the "key" and the "type" (to enable faster lookup of
482 	 * the appropriate CQ or QP handle).
483 	 */
484 	key  = off >> PAGESHIFT;
485 	type = key & MLNX_UMAP_RSRC_TYPE_MASK;
486 	key  = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
487 
488 	/*
489 	 * Allocate an entry to track the mapping and unmapping (specifically,
490 	 * partial unmapping) of this resource.
491 	 */
492 	dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
493 	    sizeof (hermon_devmap_track_t), KM_SLEEP);
494 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
495 	dvm_track->hdt_offset = off;
496 	dvm_track->hdt_state  = state;
497 	dvm_track->hdt_refcnt = 1;
498 	mutex_init(&dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
499 	    DDI_INTR_PRI(state->hs_intrmsi_pri));
500 
501 	/*
502 	 * Depending of the type of resource that has been mapped out, we
503 	 * need to update the QP or CQ handle to reflect that it has, in
504 	 * fact, been mapped.  This allows the driver code which frees a QP
505 	 * or a CQ to know whether it is appropriate to do a
506 	 * devmap_devmem_remap() to invalidate the userland mapping for the
507 	 * corresponding queue's memory.
508 	 */
509 	if (type == MLNX_UMAP_CQMEM_RSRC) {
510 
511 		/* Use "key" (CQ number) to do fast lookup of CQ handle */
512 		cq = hermon_cqhdl_from_cqnum(state, key);
513 
514 		/*
515 		 * Update the handle to the userland mapping.  Note:  If
516 		 * the CQ already has a valid userland mapping, then stop
517 		 * and return failure.
518 		 */
519 		mutex_enter(&cq->cq_lock);
520 		if (cq->cq_umap_dhp == NULL) {
521 			cq->cq_umap_dhp = dhp;
522 			dvm_track->hdt_size = cq->cq_cqinfo.qa_size;
523 			mutex_exit(&cq->cq_lock);
524 		} else if (cq->cq_resize_hdl &&
525 		    (cq->cq_resize_hdl->cq_umap_dhp == NULL)) {
526 			cq->cq_resize_hdl->cq_umap_dhp = dhp;
527 			dvm_track->hdt_size =
528 			    cq->cq_resize_hdl->cq_cqinfo.qa_size;
529 			mutex_exit(&cq->cq_lock);
530 		} else {
531 			mutex_exit(&cq->cq_lock);
532 			goto umem_map_fail;
533 		}
534 
535 	} else if (type == MLNX_UMAP_QPMEM_RSRC) {
536 
537 		/* Use "key" (QP number) to do fast lookup of QP handle */
538 		qp = hermon_qphdl_from_qpnum(state, key);
539 
540 		/*
541 		 * Update the handle to the userland mapping.  Note:  If
542 		 * the CQ already has a valid userland mapping, then stop
543 		 * and return failure.
544 		 */
545 		mutex_enter(&qp->qp_lock);
546 		if (qp->qp_umap_dhp == NULL) {
547 			qp->qp_umap_dhp = dhp;
548 			dvm_track->hdt_size = qp->qp_wqinfo.qa_size;
549 			mutex_exit(&qp->qp_lock);
550 		} else {
551 			mutex_exit(&qp->qp_lock);
552 			goto umem_map_fail;
553 		}
554 
555 	} else if (type == MLNX_UMAP_SRQMEM_RSRC) {
556 
557 		/* Use "key" (SRQ number) to do fast lookup on SRQ handle */
558 		srq = hermon_srqhdl_from_srqnum(state, key);
559 
560 		/*
561 		 * Update the handle to the userland mapping.  Note:  If the
562 		 * SRQ already has a valid userland mapping, then stop and
563 		 * return failure.
564 		 */
565 		mutex_enter(&srq->srq_lock);
566 		if (srq->srq_umap_dhp == NULL) {
567 			srq->srq_umap_dhp = dhp;
568 			dvm_track->hdt_size = srq->srq_wqinfo.qa_size;
569 			mutex_exit(&srq->srq_lock);
570 		} else {
571 			mutex_exit(&srq->srq_lock);
572 			goto umem_map_fail;
573 		}
574 	}
575 
576 	/*
577 	 * Pass the private "Hermon devmap tracking structure" back.  This
578 	 * pointer will be returned in subsequent "unmap" callbacks.
579 	 */
580 	*pvtp = dvm_track;
581 
582 	return (DDI_SUCCESS);
583 
584 umem_map_fail:
585 	mutex_destroy(&dvm_track->hdt_lock);
586 	kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
587 	return (DDI_FAILURE);
588 }
589 
590 
591 /*
592  * hermon_devmap_umem_dup()
593  *    Context: Can be called from kernel context.
594  */
595 /* ARGSUSED */
596 static int
597 hermon_devmap_umem_dup(devmap_cookie_t dhp, void *pvtp, devmap_cookie_t new_dhp,
598     void **new_pvtp)
599 {
600 	hermon_state_t		*state;
601 	hermon_devmap_track_t	*dvm_track, *new_dvm_track;
602 	uint_t			maxprot;
603 	int			status;
604 
605 	/*
606 	 * Extract the Hermon softstate pointer from "Hermon devmap tracking
607 	 * structure" (in "pvtp").
608 	 */
609 	dvm_track = (hermon_devmap_track_t *)pvtp;
610 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
611 	state = dvm_track->hdt_state;
612 
613 	/*
614 	 * Since this devmap_dup() entry point is generally called
615 	 * when a process does fork(2), it is incumbent upon the driver
616 	 * to insure that the child does not inherit a valid copy of
617 	 * the parent's QP or CQ resource.  This is accomplished by using
618 	 * devmap_devmem_remap() to invalidate the child's mapping to the
619 	 * kernel memory.
620 	 */
621 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
622 	status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
623 	    dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
624 	if (status != DDI_SUCCESS) {
625 		HERMON_WARNING(state, "failed in hermon_devmap_umem_dup()");
626 		return (status);
627 	}
628 
629 	/*
630 	 * Allocate a new entry to track the subsequent unmapping
631 	 * (specifically, all partial unmappings) of the child's newly
632 	 * invalidated resource.  Note: Setting the "hdt_size" field to
633 	 * zero here is an indication to the devmap_unmap() entry point
634 	 * that this mapping is invalid, and that its subsequent unmapping
635 	 * should not affect any of the parent's CQ or QP resources.
636 	 */
637 	new_dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
638 	    sizeof (hermon_devmap_track_t), KM_SLEEP);
639 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*new_dvm_track))
640 	new_dvm_track->hdt_offset = 0;
641 	new_dvm_track->hdt_state  = state;
642 	new_dvm_track->hdt_refcnt = 1;
643 	new_dvm_track->hdt_size	  = 0;
644 	mutex_init(&new_dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
645 	    DDI_INTR_PRI(state->hs_intrmsi_pri));
646 	*new_pvtp = new_dvm_track;
647 
648 	return (DDI_SUCCESS);
649 }
650 
651 
652 /*
653  * hermon_devmap_umem_unmap()
654  *    Context: Can be called from kernel context.
655  */
656 /* ARGSUSED */
657 static void
658 hermon_devmap_umem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
659     size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
660     devmap_cookie_t new_dhp2, void **pvtp2)
661 {
662 	hermon_state_t 		*state;
663 	hermon_rsrc_t 		*rsrcp;
664 	hermon_devmap_track_t	*dvm_track;
665 	hermon_cqhdl_t		cq;
666 	hermon_qphdl_t		qp;
667 	hermon_srqhdl_t		srq;
668 	uint64_t		key, value;
669 	uint_t			type;
670 	uint_t			size;
671 	int			status;
672 
673 	/*
674 	 * Extract the Hermon softstate pointer from "Hermon devmap tracking
675 	 * structure" (in "pvtp").
676 	 */
677 	dvm_track = (hermon_devmap_track_t *)pvtp;
678 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
679 	state	  = dvm_track->hdt_state;
680 
681 	/*
682 	 * Extract the "offset" from the "Hermon devmap tracking structure".
683 	 * Note: The input argument "off" is ignored here because the
684 	 * Hermon mapping interfaces define a very specific meaning to
685 	 * each "logical offset".  Also extract the "key" and "type" encoded
686 	 * in the logical offset.
687 	 */
688 	key  = dvm_track->hdt_offset >> PAGESHIFT;
689 	type = key & MLNX_UMAP_RSRC_TYPE_MASK;
690 	key  = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
691 
692 	/*
693 	 * Extract the "size" of the mapping.  If this size is determined
694 	 * to be zero, then it is an indication of a previously invalidated
695 	 * mapping, and no CQ or QP resources should be affected.
696 	 */
697 	size = dvm_track->hdt_size;
698 
699 	/*
700 	 * If only the "middle portion of a given mapping is being unmapped,
701 	 * then we are effectively creating one new piece of mapped memory.
702 	 * (Original region is divided into three pieces of which the middle
703 	 * piece is being removed.  This leaves two pieces.  Since we started
704 	 * with one piece and now have two pieces, we need to increment the
705 	 * counter in the "Hermon devmap tracking structure".
706 	 *
707 	 * If, however, the whole mapped region is being unmapped, then we
708 	 * have started with one region which we are completely removing.
709 	 * In this case, we need to decrement the counter in the "Hermon
710 	 * devmap tracking structure".
711 	 *
712 	 * In each of the remaining cases, we will have started with one
713 	 * mapped region and ended with one (different) region.  So no counter
714 	 * modification is necessary.
715 	 */
716 	mutex_enter(&dvm_track->hdt_lock);
717 	if ((new_dhp1 == NULL) && (new_dhp2 == NULL)) {
718 		dvm_track->hdt_refcnt--;
719 	} else if ((new_dhp1 != NULL) && (new_dhp2 != NULL)) {
720 		dvm_track->hdt_refcnt++;
721 	}
722 	mutex_exit(&dvm_track->hdt_lock);
723 
724 	/*
725 	 * For each of the cases where the region is being divided, then we
726 	 * need to pass back the "Hermon devmap tracking structure".  This way
727 	 * we get it back when each of the remaining pieces is subsequently
728 	 * unmapped.
729 	 */
730 	if (new_dhp1 != NULL) {
731 		*pvtp1 = pvtp;
732 	}
733 	if (new_dhp2 != NULL) {
734 		*pvtp2 = pvtp;
735 	}
736 
737 	/*
738 	 * If the "Hermon devmap tracking structure" is no longer being
739 	 * referenced, then free it up.  Otherwise, return.
740 	 */
741 	if (dvm_track->hdt_refcnt == 0) {
742 		mutex_destroy(&dvm_track->hdt_lock);
743 		kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
744 
745 		/*
746 		 * If the mapping was invalid (see explanation above), then
747 		 * no further processing is necessary.
748 		 */
749 		if (size == 0) {
750 			return;
751 		}
752 	} else {
753 		return;
754 	}
755 
756 	/*
757 	 * Now that we can guarantee that the user memory is fully unmapped,
758 	 * we can use the "key" and "type" values to try to find the entry
759 	 * in the "userland resources database".  If it's found, then it
760 	 * indicates that the queue memory (CQ or QP) has not yet been freed.
761 	 * In this case, we update the corresponding CQ or QP handle to
762 	 * indicate that the "devmap_devmem_remap()" call will be unnecessary.
763 	 * If it's _not_ found, then it indicates that the CQ or QP memory
764 	 * was, in fact, freed before it was unmapped (thus requiring a
765 	 * previous invalidation by remapping - which will already have
766 	 * been done in the free routine).
767 	 */
768 	status = hermon_umap_db_find(state->hs_instance, key, type, &value,
769 	    0, NULL);
770 	if (status == DDI_SUCCESS) {
771 		/*
772 		 * Depending on the type of the mapped resource (CQ or QP),
773 		 * update handle to indicate that no invalidation remapping
774 		 * will be necessary.
775 		 */
776 		if (type == MLNX_UMAP_CQMEM_RSRC) {
777 
778 			/* Use "value" to convert to CQ handle */
779 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
780 			cq = (hermon_cqhdl_t)rsrcp->hr_addr;
781 
782 			/*
783 			 * Invalidate the handle to the userland mapping.
784 			 * Note: We must ensure that the mapping being
785 			 * unmapped here is the current one for the CQ.  It
786 			 * is possible that it might not be if this CQ has
787 			 * been resized and the previous CQ memory has not
788 			 * yet been unmapped.  But in that case, because of
789 			 * the devmap_devmem_remap(), there is no longer any
790 			 * association between the mapping and the real CQ
791 			 * kernel memory.
792 			 */
793 			mutex_enter(&cq->cq_lock);
794 			if (cq->cq_umap_dhp == dhp) {
795 				cq->cq_umap_dhp = NULL;
796 				if (cq->cq_resize_hdl) {
797 					/* resize is DONE, switch queues */
798 					hermon_cq_resize_helper(state, cq);
799 				}
800 			} else {
801 				if (cq->cq_resize_hdl &&
802 				    cq->cq_resize_hdl->cq_umap_dhp == dhp) {
803 					/*
804 					 * Unexpected case.  munmap of the
805 					 * cq_resize buf, and not the
806 					 * original buf.
807 					 */
808 					cq->cq_resize_hdl->cq_umap_dhp = NULL;
809 				}
810 			}
811 			mutex_exit(&cq->cq_lock);
812 
813 		} else if (type == MLNX_UMAP_QPMEM_RSRC) {
814 
815 			/* Use "value" to convert to QP handle */
816 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
817 			qp = (hermon_qphdl_t)rsrcp->hr_addr;
818 
819 			/*
820 			 * Invalidate the handle to the userland mapping.
821 			 * Note: we ensure that the mapping being unmapped
822 			 * here is the current one for the QP.  This is
823 			 * more of a sanity check here since, unlike CQs
824 			 * (above) we do not support resize of QPs.
825 			 */
826 			mutex_enter(&qp->qp_lock);
827 			if (qp->qp_umap_dhp == dhp) {
828 				qp->qp_umap_dhp = NULL;
829 			}
830 			mutex_exit(&qp->qp_lock);
831 
832 		} else if (type == MLNX_UMAP_SRQMEM_RSRC) {
833 
834 			/* Use "value" to convert to SRQ handle */
835 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
836 			srq = (hermon_srqhdl_t)rsrcp->hr_addr;
837 
838 			/*
839 			 * Invalidate the handle to the userland mapping.
840 			 * Note: we ensure that the mapping being unmapped
841 			 * here is the current one for the QP.  This is
842 			 * more of a sanity check here since, unlike CQs
843 			 * (above) we do not support resize of QPs.
844 			 */
845 			mutex_enter(&srq->srq_lock);
846 			if (srq->srq_umap_dhp == dhp) {
847 				srq->srq_umap_dhp = NULL;
848 			}
849 			mutex_exit(&srq->srq_lock);
850 		}
851 	}
852 }
853 
854 
855 /*
856  * hermon_devmap_devmem_map()
857  *    Context: Can be called from kernel context.
858  */
859 /* ARGSUSED */
860 static int
861 hermon_devmap_dbrecmem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
862     offset_t off, size_t len, void **pvtp)
863 {
864 	hermon_state_t		*state;
865 	hermon_devmap_track_t	*dvm_track;
866 	hermon_cqhdl_t		cq;
867 	hermon_qphdl_t		qp;
868 	hermon_srqhdl_t		srq;
869 	minor_t			instance;
870 	uint64_t		key;
871 	uint_t			type;
872 
873 	/* Get Hermon softstate structure from instance */
874 	instance = HERMON_DEV_INSTANCE(dev);
875 	state = ddi_get_soft_state(hermon_statep, instance);
876 	if (state == NULL) {
877 		return (ENXIO);
878 	}
879 
880 	/*
881 	 * The bottom bits of "offset" are undefined (number depends on
882 	 * system PAGESIZE).  Shifting these off leaves us with a "key".
883 	 * The "key" is actually a combination of both a real key value
884 	 * (for the purpose of database lookup) and a "type" value.  Although
885 	 * we are not going to do any database lookup per se, we do want
886 	 * to extract the "key" and the "type" (to enable faster lookup of
887 	 * the appropriate CQ or QP handle).
888 	 */
889 	key  = off >> PAGESHIFT;
890 	type = key & MLNX_UMAP_RSRC_TYPE_MASK;
891 	key  = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
892 
893 	/*
894 	 * Allocate an entry to track the mapping and unmapping (specifically,
895 	 * partial unmapping) of this resource.
896 	 */
897 	dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
898 	    sizeof (hermon_devmap_track_t), KM_SLEEP);
899 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
900 	dvm_track->hdt_offset = off;
901 	dvm_track->hdt_state  = state;
902 	dvm_track->hdt_refcnt = 1;
903 	mutex_init(&dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
904 	    DDI_INTR_PRI(state->hs_intrmsi_pri));
905 
906 	/*
907 	 * Depending of the type of resource that has been mapped out, we
908 	 * need to update the QP or CQ handle to reflect that it has, in
909 	 * fact, been mapped.  This allows the driver code which frees a QP
910 	 * or a CQ to know whether it is appropriate to do a
911 	 * devmap_devmem_remap() to invalidate the userland mapping for the
912 	 * corresponding queue's memory.
913 	 */
914 	if (type == MLNX_UMAP_CQMEM_RSRC) {
915 
916 		/* Use "key" (CQ number) to do fast lookup of CQ handle */
917 		cq = hermon_cqhdl_from_cqnum(state, key);
918 
919 		/*
920 		 * Update the handle to the userland mapping.  Note:  If
921 		 * the CQ already has a valid userland mapping, then stop
922 		 * and return failure.
923 		 */
924 		mutex_enter(&cq->cq_lock);
925 		if (cq->cq_umap_dhp == NULL) {
926 			cq->cq_umap_dhp = dhp;
927 			dvm_track->hdt_size = cq->cq_cqinfo.qa_size;
928 			mutex_exit(&cq->cq_lock);
929 		} else {
930 			mutex_exit(&cq->cq_lock);
931 			goto umem_map_fail;
932 		}
933 
934 	} else if (type == MLNX_UMAP_QPMEM_RSRC) {
935 
936 		/* Use "key" (QP number) to do fast lookup of QP handle */
937 		qp = hermon_qphdl_from_qpnum(state, key);
938 
939 		/*
940 		 * Update the handle to the userland mapping.  Note:  If
941 		 * the CQ already has a valid userland mapping, then stop
942 		 * and return failure.
943 		 */
944 		mutex_enter(&qp->qp_lock);
945 		if (qp->qp_umap_dhp == NULL) {
946 			qp->qp_umap_dhp = dhp;
947 			dvm_track->hdt_size = qp->qp_wqinfo.qa_size;
948 			mutex_exit(&qp->qp_lock);
949 		} else {
950 			mutex_exit(&qp->qp_lock);
951 			goto umem_map_fail;
952 		}
953 
954 	} else if (type == MLNX_UMAP_SRQMEM_RSRC) {
955 
956 		/* Use "key" (SRQ number) to do fast lookup on SRQ handle */
957 		srq = hermon_srqhdl_from_srqnum(state, key);
958 
959 		/*
960 		 * Update the handle to the userland mapping.  Note:  If the
961 		 * SRQ already has a valid userland mapping, then stop and
962 		 * return failure.
963 		 */
964 		mutex_enter(&srq->srq_lock);
965 		if (srq->srq_umap_dhp == NULL) {
966 			srq->srq_umap_dhp = dhp;
967 			dvm_track->hdt_size = srq->srq_wqinfo.qa_size;
968 			mutex_exit(&srq->srq_lock);
969 		} else {
970 			mutex_exit(&srq->srq_lock);
971 			goto umem_map_fail;
972 		}
973 	}
974 
975 	/*
976 	 * Pass the private "Hermon devmap tracking structure" back.  This
977 	 * pointer will be returned in subsequent "unmap" callbacks.
978 	 */
979 	*pvtp = dvm_track;
980 
981 	return (DDI_SUCCESS);
982 
983 umem_map_fail:
984 	mutex_destroy(&dvm_track->hdt_lock);
985 	kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
986 	return (DDI_FAILURE);
987 }
988 
989 
990 /*
991  * hermon_devmap_dbrecmem_dup()
992  *    Context: Can be called from kernel context.
993  */
994 /* ARGSUSED */
995 static int
996 hermon_devmap_dbrecmem_dup(devmap_cookie_t dhp, void *pvtp,
997     devmap_cookie_t new_dhp, void **new_pvtp)
998 {
999 	hermon_state_t		*state;
1000 	hermon_devmap_track_t	*dvm_track, *new_dvm_track;
1001 	uint_t			maxprot;
1002 	int			status;
1003 
1004 	/*
1005 	 * Extract the Hermon softstate pointer from "Hermon devmap tracking
1006 	 * structure" (in "pvtp").
1007 	 */
1008 	dvm_track = (hermon_devmap_track_t *)pvtp;
1009 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1010 	state = dvm_track->hdt_state;
1011 
1012 	/*
1013 	 * Since this devmap_dup() entry point is generally called
1014 	 * when a process does fork(2), it is incumbent upon the driver
1015 	 * to insure that the child does not inherit a valid copy of
1016 	 * the parent's QP or CQ resource.  This is accomplished by using
1017 	 * devmap_devmem_remap() to invalidate the child's mapping to the
1018 	 * kernel memory.
1019 	 */
1020 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
1021 	status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
1022 	    dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
1023 	if (status != DDI_SUCCESS) {
1024 		HERMON_WARNING(state, "failed in hermon_devmap_dbrecmem_dup()");
1025 		return (status);
1026 	}
1027 
1028 	/*
1029 	 * Allocate a new entry to track the subsequent unmapping
1030 	 * (specifically, all partial unmappings) of the child's newly
1031 	 * invalidated resource.  Note: Setting the "hdt_size" field to
1032 	 * zero here is an indication to the devmap_unmap() entry point
1033 	 * that this mapping is invalid, and that its subsequent unmapping
1034 	 * should not affect any of the parent's CQ or QP resources.
1035 	 */
1036 	new_dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
1037 	    sizeof (hermon_devmap_track_t), KM_SLEEP);
1038 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*new_dvm_track))
1039 	new_dvm_track->hdt_offset = 0;
1040 	new_dvm_track->hdt_state  = state;
1041 	new_dvm_track->hdt_refcnt = 1;
1042 	new_dvm_track->hdt_size	  = 0;
1043 	mutex_init(&new_dvm_track->hdt_lock, NULL, MUTEX_DRIVER,
1044 	    DDI_INTR_PRI(state->hs_intrmsi_pri));
1045 	*new_pvtp = new_dvm_track;
1046 
1047 	return (DDI_SUCCESS);
1048 }
1049 
1050 
1051 /*
1052  * hermon_devmap_dbrecmem_unmap()
1053  *    Context: Can be called from kernel context.
1054  */
1055 /* ARGSUSED */
1056 static void
1057 hermon_devmap_dbrecmem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
1058     size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
1059     devmap_cookie_t new_dhp2, void **pvtp2)
1060 {
1061 	hermon_state_t 		*state;
1062 	hermon_rsrc_t 		*rsrcp;
1063 	hermon_devmap_track_t	*dvm_track;
1064 	hermon_cqhdl_t		cq;
1065 	hermon_qphdl_t		qp;
1066 	hermon_srqhdl_t		srq;
1067 	uint64_t		key, value;
1068 	uint_t			type;
1069 	uint_t			size;
1070 	int			status;
1071 
1072 	/*
1073 	 * Extract the Hermon softstate pointer from "Hermon devmap tracking
1074 	 * structure" (in "pvtp").
1075 	 */
1076 	dvm_track = (hermon_devmap_track_t *)pvtp;
1077 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1078 	state	  = dvm_track->hdt_state;
1079 
1080 	/*
1081 	 * Extract the "offset" from the "Hermon devmap tracking structure".
1082 	 * Note: The input argument "off" is ignored here because the
1083 	 * Hermon mapping interfaces define a very specific meaning to
1084 	 * each "logical offset".  Also extract the "key" and "type" encoded
1085 	 * in the logical offset.
1086 	 */
1087 	key  = dvm_track->hdt_offset >> PAGESHIFT;
1088 	type = key & MLNX_UMAP_RSRC_TYPE_MASK;
1089 	key  = key >> MLNX_UMAP_RSRC_TYPE_SHIFT;
1090 
1091 	/*
1092 	 * Extract the "size" of the mapping.  If this size is determined
1093 	 * to be zero, then it is an indication of a previously invalidated
1094 	 * mapping, and no CQ or QP resources should be affected.
1095 	 */
1096 	size = dvm_track->hdt_size;
1097 
1098 	/*
1099 	 * If only the "middle portion of a given mapping is being unmapped,
1100 	 * then we are effectively creating one new piece of mapped memory.
1101 	 * (Original region is divided into three pieces of which the middle
1102 	 * piece is being removed.  This leaves two pieces.  Since we started
1103 	 * with one piece and now have two pieces, we need to increment the
1104 	 * counter in the "Hermon devmap tracking structure".
1105 	 *
1106 	 * If, however, the whole mapped region is being unmapped, then we
1107 	 * have started with one region which we are completely removing.
1108 	 * In this case, we need to decrement the counter in the "Hermon
1109 	 * devmap tracking structure".
1110 	 *
1111 	 * In each of the remaining cases, we will have started with one
1112 	 * mapped region and ended with one (different) region.  So no counter
1113 	 * modification is necessary.
1114 	 */
1115 	mutex_enter(&dvm_track->hdt_lock);
1116 	if ((new_dhp1 == NULL) && (new_dhp2 == NULL)) {
1117 		dvm_track->hdt_refcnt--;
1118 	} else if ((new_dhp1 != NULL) && (new_dhp2 != NULL)) {
1119 		dvm_track->hdt_refcnt++;
1120 	}
1121 	mutex_exit(&dvm_track->hdt_lock);
1122 
1123 	/*
1124 	 * For each of the cases where the region is being divided, then we
1125 	 * need to pass back the "Hermon devmap tracking structure".  This way
1126 	 * we get it back when each of the remaining pieces is subsequently
1127 	 * unmapped.
1128 	 */
1129 	if (new_dhp1 != NULL) {
1130 		*pvtp1 = pvtp;
1131 	}
1132 	if (new_dhp2 != NULL) {
1133 		*pvtp2 = pvtp;
1134 	}
1135 
1136 	/*
1137 	 * If the "Hermon devmap tracking structure" is no longer being
1138 	 * referenced, then free it up.  Otherwise, return.
1139 	 */
1140 	if (dvm_track->hdt_refcnt == 0) {
1141 		mutex_destroy(&dvm_track->hdt_lock);
1142 		kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
1143 
1144 		/*
1145 		 * If the mapping was invalid (see explanation above), then
1146 		 * no further processing is necessary.
1147 		 */
1148 		if (size == 0) {
1149 			return;
1150 		}
1151 	} else {
1152 		return;
1153 	}
1154 
1155 	/*
1156 	 * Now that we can guarantee that the user memory is fully unmapped,
1157 	 * we can use the "key" and "type" values to try to find the entry
1158 	 * in the "userland resources database".  If it's found, then it
1159 	 * indicates that the queue memory (CQ or QP) has not yet been freed.
1160 	 * In this case, we update the corresponding CQ or QP handle to
1161 	 * indicate that the "devmap_devmem_remap()" call will be unnecessary.
1162 	 * If it's _not_ found, then it indicates that the CQ or QP memory
1163 	 * was, in fact, freed before it was unmapped (thus requiring a
1164 	 * previous invalidation by remapping - which will already have
1165 	 * been done in the free routine).
1166 	 */
1167 	status = hermon_umap_db_find(state->hs_instance, key, type, &value,
1168 	    0, NULL);
1169 	if (status == DDI_SUCCESS) {
1170 		/*
1171 		 * Depending on the type of the mapped resource (CQ or QP),
1172 		 * update handle to indicate that no invalidation remapping
1173 		 * will be necessary.
1174 		 */
1175 		if (type == MLNX_UMAP_CQMEM_RSRC) {
1176 
1177 			/* Use "value" to convert to CQ handle */
1178 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1179 			cq = (hermon_cqhdl_t)rsrcp->hr_addr;
1180 
1181 			/*
1182 			 * Invalidate the handle to the userland mapping.
1183 			 * Note: We must ensure that the mapping being
1184 			 * unmapped here is the current one for the CQ.  It
1185 			 * is possible that it might not be if this CQ has
1186 			 * been resized and the previous CQ memory has not
1187 			 * yet been unmapped.  But in that case, because of
1188 			 * the devmap_devmem_remap(), there is no longer any
1189 			 * association between the mapping and the real CQ
1190 			 * kernel memory.
1191 			 */
1192 			mutex_enter(&cq->cq_lock);
1193 			if (cq->cq_umap_dhp == dhp) {
1194 				cq->cq_umap_dhp = NULL;
1195 			}
1196 			mutex_exit(&cq->cq_lock);
1197 
1198 		} else if (type == MLNX_UMAP_QPMEM_RSRC) {
1199 
1200 			/* Use "value" to convert to QP handle */
1201 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1202 			qp = (hermon_qphdl_t)rsrcp->hr_addr;
1203 
1204 			/*
1205 			 * Invalidate the handle to the userland mapping.
1206 			 * Note: we ensure that the mapping being unmapped
1207 			 * here is the current one for the QP.  This is
1208 			 * more of a sanity check here since, unlike CQs
1209 			 * (above) we do not support resize of QPs.
1210 			 */
1211 			mutex_enter(&qp->qp_lock);
1212 			if (qp->qp_umap_dhp == dhp) {
1213 				qp->qp_umap_dhp = NULL;
1214 			}
1215 			mutex_exit(&qp->qp_lock);
1216 
1217 		} else if (type == MLNX_UMAP_SRQMEM_RSRC) {
1218 
1219 			/* Use "value" to convert to SRQ handle */
1220 			rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
1221 			srq = (hermon_srqhdl_t)rsrcp->hr_addr;
1222 
1223 			/*
1224 			 * Invalidate the handle to the userland mapping.
1225 			 * Note: we ensure that the mapping being unmapped
1226 			 * here is the current one for the QP.  This is
1227 			 * more of a sanity check here since, unlike CQs
1228 			 * (above) we do not support resize of QPs.
1229 			 */
1230 			mutex_enter(&srq->srq_lock);
1231 			if (srq->srq_umap_dhp == dhp) {
1232 				srq->srq_umap_dhp = NULL;
1233 			}
1234 			mutex_exit(&srq->srq_lock);
1235 		}
1236 	}
1237 }
1238 
1239 
1240 /*
1241  * hermon_devmap_devmem_map()
1242  *    Context: Can be called from kernel context.
1243  */
1244 /* ARGSUSED */
1245 static int
1246 hermon_devmap_devmem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags,
1247     offset_t off, size_t len, void **pvtp)
1248 {
1249 	hermon_state_t		*state;
1250 	hermon_devmap_track_t	*dvm_track;
1251 	minor_t			instance;
1252 
1253 	/* Get Hermon softstate structure from instance */
1254 	instance = HERMON_DEV_INSTANCE(dev);
1255 	state = ddi_get_soft_state(hermon_statep, instance);
1256 	if (state == NULL) {
1257 		return (ENXIO);
1258 	}
1259 
1260 	/*
1261 	 * Allocate an entry to track the mapping and unmapping of this
1262 	 * resource.  Note:  We don't need to initialize the "refcnt" or
1263 	 * "offset" fields here, nor do we need to initialize the mutex
1264 	 * used with the "refcnt".  Since UAR pages are single pages, they
1265 	 * are not subject to "partial" unmappings.  This makes these other
1266 	 * fields unnecessary.
1267 	 */
1268 	dvm_track = (hermon_devmap_track_t *)kmem_zalloc(
1269 	    sizeof (hermon_devmap_track_t), KM_SLEEP);
1270 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1271 	dvm_track->hdt_state  = state;
1272 	dvm_track->hdt_size   = (uint_t)PAGESIZE;
1273 
1274 	/*
1275 	 * Pass the private "Hermon devmap tracking structure" back.  This
1276 	 * pointer will be returned in a subsequent "unmap" callback.
1277 	 */
1278 	*pvtp = dvm_track;
1279 
1280 	return (DDI_SUCCESS);
1281 }
1282 
1283 
1284 /*
1285  * hermon_devmap_devmem_dup()
1286  *    Context: Can be called from kernel context.
1287  */
1288 /* ARGSUSED */
1289 static int
1290 hermon_devmap_devmem_dup(devmap_cookie_t dhp, void *pvtp,
1291     devmap_cookie_t new_dhp, void **new_pvtp)
1292 {
1293 	hermon_state_t		*state;
1294 	hermon_devmap_track_t	*dvm_track;
1295 	uint_t			maxprot;
1296 	int			status;
1297 
1298 	/*
1299 	 * Extract the Hermon softstate pointer from "Hermon devmap tracking
1300 	 * structure" (in "pvtp").  Note: If the tracking structure is NULL
1301 	 * here, it means that the mapping corresponds to an invalid mapping.
1302 	 * In this case, it can be safely ignored ("new_pvtp" set to NULL).
1303 	 */
1304 	dvm_track = (hermon_devmap_track_t *)pvtp;
1305 	if (dvm_track == NULL) {
1306 		*new_pvtp = NULL;
1307 		return (DDI_SUCCESS);
1308 	}
1309 
1310 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1311 	state = dvm_track->hdt_state;
1312 
1313 	/*
1314 	 * Since this devmap_dup() entry point is generally called
1315 	 * when a process does fork(2), it is incumbent upon the driver
1316 	 * to insure that the child does not inherit a valid copy of
1317 	 * the parent's resource.  This is accomplished by using
1318 	 * devmap_devmem_remap() to invalidate the child's mapping to the
1319 	 * kernel memory.
1320 	 */
1321 	maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
1322 	status = devmap_devmem_remap(new_dhp, state->hs_dip, 0, 0,
1323 	    dvm_track->hdt_size, maxprot, DEVMAP_MAPPING_INVALID, NULL);
1324 	if (status != DDI_SUCCESS) {
1325 		HERMON_WARNING(state, "failed in hermon_devmap_devmem_dup()");
1326 		return (status);
1327 	}
1328 
1329 	/*
1330 	 * Since the region is invalid, there is no need for us to
1331 	 * allocate and continue to track an additional "Hermon devmap
1332 	 * tracking structure".  Instead we return NULL here, which is an
1333 	 * indication to the devmap_unmap() entry point that this entry
1334 	 * can be safely ignored.
1335 	 */
1336 	*new_pvtp = NULL;
1337 
1338 	return (DDI_SUCCESS);
1339 }
1340 
1341 
1342 /*
1343  * hermon_devmap_devmem_unmap()
1344  *    Context: Can be called from kernel context.
1345  */
1346 /* ARGSUSED */
1347 static void
1348 hermon_devmap_devmem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off,
1349     size_t len, devmap_cookie_t new_dhp1, void **pvtp1,
1350     devmap_cookie_t new_dhp2, void **pvtp2)
1351 {
1352 	hermon_devmap_track_t	*dvm_track;
1353 
1354 	/*
1355 	 * Free up the "Hermon devmap tracking structure" (in "pvtp").
1356 	 * There cannot be "partial" unmappings here because all UAR pages
1357 	 * are single pages.  Note: If the tracking structure is NULL here,
1358 	 * it means that the mapping corresponds to an invalid mapping.  In
1359 	 * this case, it can be safely ignored.
1360 	 */
1361 	dvm_track = (hermon_devmap_track_t *)pvtp;
1362 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dvm_track))
1363 	if (dvm_track == NULL) {
1364 		return;
1365 	}
1366 
1367 	kmem_free(dvm_track, sizeof (hermon_devmap_track_t));
1368 }
1369 
1370 
1371 /*
1372  * hermon_umap_ci_data_in()
1373  *    Context: Can be called from user or kernel context.
1374  */
1375 /* ARGSUSED */
1376 ibt_status_t
1377 hermon_umap_ci_data_in(hermon_state_t *state, ibt_ci_data_flags_t flags,
1378     ibt_object_type_t object, void *hdl, void *data_p, size_t data_sz)
1379 {
1380 	int	status;
1381 
1382 	/*
1383 	 * Depending on the type of object about which additional information
1384 	 * is being provided (currently only MR is supported), we call the
1385 	 * appropriate resource-specific function.
1386 	 */
1387 	switch (object) {
1388 	case IBT_HDL_MR:
1389 		status = hermon_umap_mr_data_in((hermon_mrhdl_t)hdl,
1390 		    (ibt_mr_data_in_t *)data_p, data_sz);
1391 		if (status != DDI_SUCCESS) {
1392 			return (status);
1393 		}
1394 		break;
1395 
1396 	/*
1397 	 * For other possible valid IBT types, we return IBT_NOT_SUPPORTED,
1398 	 * since the Hermon driver does not support these.
1399 	 */
1400 	case IBT_HDL_HCA:
1401 	case IBT_HDL_QP:
1402 	case IBT_HDL_CQ:
1403 	case IBT_HDL_PD:
1404 	case IBT_HDL_MW:
1405 	case IBT_HDL_AH:
1406 	case IBT_HDL_SCHED:
1407 	case IBT_HDL_EEC:
1408 	case IBT_HDL_RDD:
1409 	case IBT_HDL_SRQ:
1410 		return (IBT_NOT_SUPPORTED);
1411 
1412 	/*
1413 	 * Any other types are invalid.
1414 	 */
1415 	default:
1416 		return (IBT_INVALID_PARAM);
1417 	}
1418 
1419 	return (DDI_SUCCESS);
1420 }
1421 
1422 
1423 /*
1424  * hermon_umap_mr_data_in()
1425  *    Context: Can be called from user or kernel context.
1426  */
1427 static ibt_status_t
1428 hermon_umap_mr_data_in(hermon_mrhdl_t mr, ibt_mr_data_in_t *data,
1429     size_t data_sz)
1430 {
1431 	if (data->mr_rev != IBT_MR_DATA_IN_IF_VERSION) {
1432 		return (IBT_NOT_SUPPORTED);
1433 	}
1434 
1435 	/* Check for valid MR handle pointer */
1436 	if (mr == NULL) {
1437 		return (IBT_MR_HDL_INVALID);
1438 	}
1439 
1440 	/* Check for valid MR input structure size */
1441 	if (data_sz < sizeof (ibt_mr_data_in_t)) {
1442 		return (IBT_INSUFF_RESOURCE);
1443 	}
1444 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1445 
1446 	/*
1447 	 * Ensure that the MR corresponds to userland memory and that it is
1448 	 * a currently valid memory region as well.
1449 	 */
1450 	mutex_enter(&mr->mr_lock);
1451 	if ((mr->mr_is_umem == 0) || (mr->mr_umemcookie == NULL)) {
1452 		mutex_exit(&mr->mr_lock);
1453 		return (IBT_MR_HDL_INVALID);
1454 	}
1455 
1456 	/*
1457 	 * If it has passed all the above checks, then extract the callback
1458 	 * function and argument from the input structure.  Copy them into
1459 	 * the MR handle.  This function will be called only if the memory
1460 	 * corresponding to the MR handle gets a umem_lockmemory() callback.
1461 	 */
1462 	mr->mr_umem_cbfunc = data->mr_func;
1463 	mr->mr_umem_cbarg1 = data->mr_arg1;
1464 	mr->mr_umem_cbarg2 = data->mr_arg2;
1465 	mutex_exit(&mr->mr_lock);
1466 
1467 	return (DDI_SUCCESS);
1468 }
1469 
1470 
1471 /*
1472  * hermon_umap_ci_data_out()
1473  *    Context: Can be called from user or kernel context.
1474  */
1475 /* ARGSUSED */
1476 ibt_status_t
1477 hermon_umap_ci_data_out(hermon_state_t *state, ibt_ci_data_flags_t flags,
1478     ibt_object_type_t object, void *hdl, void *data_p, size_t data_sz)
1479 {
1480 	int	status;
1481 
1482 	/*
1483 	 * Depending on the type of object about which additional information
1484 	 * is being requested (CQ or QP), we call the appropriate resource-
1485 	 * specific mapping function.
1486 	 */
1487 	switch (object) {
1488 	case IBT_HDL_CQ:
1489 		status = hermon_umap_cq_data_out((hermon_cqhdl_t)hdl,
1490 		    (mlnx_umap_cq_data_out_t *)data_p, data_sz);
1491 		if (status != DDI_SUCCESS) {
1492 			return (status);
1493 		}
1494 		break;
1495 
1496 	case IBT_HDL_QP:
1497 		status = hermon_umap_qp_data_out((hermon_qphdl_t)hdl,
1498 		    (mlnx_umap_qp_data_out_t *)data_p, data_sz);
1499 		if (status != DDI_SUCCESS) {
1500 			return (status);
1501 		}
1502 		break;
1503 
1504 	case IBT_HDL_SRQ:
1505 		status = hermon_umap_srq_data_out((hermon_srqhdl_t)hdl,
1506 		    (mlnx_umap_srq_data_out_t *)data_p, data_sz);
1507 		if (status != DDI_SUCCESS) {
1508 			return (status);
1509 		}
1510 		break;
1511 
1512 	case IBT_HDL_PD:
1513 		status = hermon_umap_pd_data_out((hermon_pdhdl_t)hdl,
1514 		    (mlnx_umap_pd_data_out_t *)data_p, data_sz);
1515 		if (status != DDI_SUCCESS) {
1516 			return (status);
1517 		}
1518 		break;
1519 
1520 	/*
1521 	 * For other possible valid IBT types, we return IBT_NOT_SUPPORTED,
1522 	 * since the Hermon driver does not support these.
1523 	 */
1524 	case IBT_HDL_HCA:
1525 	case IBT_HDL_MR:
1526 	case IBT_HDL_MW:
1527 	case IBT_HDL_AH:
1528 	case IBT_HDL_SCHED:
1529 	case IBT_HDL_EEC:
1530 	case IBT_HDL_RDD:
1531 		return (IBT_NOT_SUPPORTED);
1532 
1533 	/*
1534 	 * Any other types are invalid.
1535 	 */
1536 	default:
1537 		return (IBT_INVALID_PARAM);
1538 	}
1539 
1540 	return (DDI_SUCCESS);
1541 }
1542 
1543 
1544 /*
1545  * hermon_umap_cq_data_out()
1546  *    Context: Can be called from user or kernel context.
1547  */
1548 static ibt_status_t
1549 hermon_umap_cq_data_out(hermon_cqhdl_t cq, mlnx_umap_cq_data_out_t *data,
1550     size_t data_sz)
1551 {
1552 	/* Check for valid CQ handle pointer */
1553 	if (cq == NULL) {
1554 		return (IBT_CQ_HDL_INVALID);
1555 	}
1556 
1557 	/* Check for valid CQ mapping structure size */
1558 	if (data_sz < sizeof (mlnx_umap_cq_data_out_t)) {
1559 		return (IBT_INSUFF_RESOURCE);
1560 	}
1561 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1562 
1563 	/* deal with cq_alloc() verses cq_resize() */
1564 	if (cq->cq_resize_hdl) {
1565 		data->mcq_maplen = cq->cq_resize_hdl->cq_cqinfo.qa_size;
1566 		data->mcq_numcqe = cq->cq_resize_hdl->cq_bufsz;
1567 	} else {
1568 		data->mcq_maplen = cq->cq_cqinfo.qa_size;
1569 		data->mcq_numcqe = cq->cq_bufsz;
1570 	}
1571 
1572 	/*
1573 	 * If it has passed all the above checks, then fill in all the useful
1574 	 * mapping information (including the mapping offset that will be
1575 	 * passed back to the devmap() interface during a subsequent mmap()
1576 	 * call.
1577 	 *
1578 	 * The "offset" for CQ mmap()'s looks like this:
1579 	 * +----------------------------------------+--------+--------------+
1580 	 * |		   CQ Number		    |  0x33  | Reserved (0) |
1581 	 * +----------------------------------------+--------+--------------+
1582 	 *	   (64 - 8 - PAGESHIFT) bits	    8 bits	PAGESHIFT bits
1583 	 *
1584 	 * This returns information about the mapping offset, the length of
1585 	 * the CQ memory, the CQ number (for use in later CQ doorbells), the
1586 	 * number of CQEs the CQ memory can hold, and the size of each CQE.
1587 	 */
1588 	data->mcq_rev			= MLNX_UMAP_IF_VERSION;
1589 	data->mcq_mapoffset		= ((((uint64_t)cq->cq_cqnum <<
1590 	    MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_CQMEM_RSRC) << PAGESHIFT);
1591 	data->mcq_cqnum			= cq->cq_cqnum;
1592 	data->mcq_cqesz			= sizeof (hermon_hw_cqe_t);
1593 
1594 	/* doorbell record fields */
1595 	data->mcq_polldbr_mapoffset	= cq->cq_dbr_mapoffset;
1596 	data->mcq_polldbr_maplen	= PAGESIZE;
1597 	data->mcq_polldbr_offset	= (uintptr_t)cq->cq_arm_ci_vdbr &
1598 	    PAGEOFFSET;
1599 	data->mcq_armdbr_mapoffset	= cq->cq_dbr_mapoffset;
1600 	data->mcq_armdbr_maplen		= PAGESIZE;
1601 	data->mcq_armdbr_offset		= data->mcq_polldbr_offset + 4;
1602 
1603 	return (DDI_SUCCESS);
1604 }
1605 
1606 
1607 /*
1608  * hermon_umap_qp_data_out()
1609  *    Context: Can be called from user or kernel context.
1610  */
1611 static ibt_status_t
1612 hermon_umap_qp_data_out(hermon_qphdl_t qp, mlnx_umap_qp_data_out_t *data,
1613     size_t data_sz)
1614 {
1615 	/* Check for valid QP handle pointer */
1616 	if (qp == NULL) {
1617 		return (IBT_QP_HDL_INVALID);
1618 	}
1619 
1620 	/* Check for valid QP mapping structure size */
1621 	if (data_sz < sizeof (mlnx_umap_qp_data_out_t)) {
1622 		return (IBT_INSUFF_RESOURCE);
1623 	}
1624 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1625 
1626 	/*
1627 	 * If it has passed all the checks, then fill in all the useful
1628 	 * mapping information (including the mapping offset that will be
1629 	 * passed back to the devmap() interface during a subsequent mmap()
1630 	 * call.
1631 	 *
1632 	 * The "offset" for QP mmap()'s looks like this:
1633 	 * +----------------------------------------+--------+--------------+
1634 	 * |		   QP Number		    |  0x44  | Reserved (0) |
1635 	 * +----------------------------------------+--------+--------------+
1636 	 *	   (64 - 8 - PAGESHIFT) bits	    8 bits	PAGESHIFT bits
1637 	 *
1638 	 * This returns information about the mapping offset, the length of
1639 	 * the QP memory, and the QP number (for use in later send and recv
1640 	 * doorbells).  It also returns the following information for both
1641 	 * the receive work queue and the send work queue, respectively:  the
1642 	 * offset (from the base mapped address) of the start of the given
1643 	 * work queue, the 64-bit IB virtual address that corresponds to
1644 	 * the base mapped address (needed for posting WQEs though the
1645 	 * QP doorbells), the number of WQEs the given work queue can hold,
1646 	 * and the size of each WQE for the given work queue.
1647 	 */
1648 	data->mqp_rev		= MLNX_UMAP_IF_VERSION;
1649 	data->mqp_mapoffset	= ((((uint64_t)qp->qp_qpnum <<
1650 	    MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_QPMEM_RSRC) << PAGESHIFT);
1651 	data->mqp_maplen	= qp->qp_wqinfo.qa_size;
1652 	data->mqp_qpnum		= qp->qp_qpnum;
1653 
1654 	/*
1655 	 * If this QP is associated with a shared receive queue (SRQ),
1656 	 * then return invalid RecvQ parameters.  Otherwise, return
1657 	 * the proper parameter values.
1658 	 */
1659 	if (qp->qp_srq_en == HERMON_QP_SRQ_ENABLED) {
1660 		data->mqp_rq_off	= (uint32_t)qp->qp_wqinfo.qa_size;
1661 		data->mqp_rq_desc_addr	= (uint32_t)qp->qp_wqinfo.qa_size;
1662 		data->mqp_rq_numwqe	= 0;
1663 		data->mqp_rq_wqesz	= 0;
1664 		data->mqp_rdbr_mapoffset = 0;
1665 		data->mqp_rdbr_maplen	= 0;
1666 		data->mqp_rdbr_offset	= 0;
1667 	} else {
1668 		data->mqp_rq_off	= (uintptr_t)qp->qp_rq_buf -
1669 		    (uintptr_t)qp->qp_wqinfo.qa_buf_aligned;
1670 		data->mqp_rq_desc_addr	= (uint32_t)((uintptr_t)qp->qp_rq_buf -
1671 		    qp->qp_desc_off);
1672 		data->mqp_rq_numwqe	= qp->qp_rq_bufsz;
1673 		data->mqp_rq_wqesz	= (1 << qp->qp_rq_log_wqesz);
1674 
1675 		/* doorbell record fields */
1676 		data->mqp_rdbr_mapoffset = qp->qp_rdbr_mapoffset;
1677 		data->mqp_rdbr_maplen	= PAGESIZE;
1678 		data->mqp_rdbr_offset	= (uintptr_t)qp->qp_rq_vdbr &
1679 		    PAGEOFFSET;
1680 	}
1681 	data->mqp_sq_off		= (uintptr_t)qp->qp_sq_buf -
1682 	    (uintptr_t)qp->qp_wqinfo.qa_buf_aligned;
1683 	data->mqp_sq_desc_addr	= (uint32_t)((uintptr_t)qp->qp_sq_buf -
1684 	    qp->qp_desc_off);
1685 	data->mqp_sq_numwqe	= qp->qp_sq_bufsz;
1686 	data->mqp_sq_wqesz	= (1 << qp->qp_sq_log_wqesz);
1687 	data->mqp_sq_headroomwqes = qp->qp_sq_hdrmwqes;
1688 
1689 	/* doorbell record fields */
1690 	data->mqp_sdbr_mapoffset = 0;
1691 	data->mqp_sdbr_maplen	= 0;
1692 	data->mqp_sdbr_offset	= 0;
1693 
1694 	return (DDI_SUCCESS);
1695 }
1696 
1697 
1698 /*
1699  * hermon_umap_srq_data_out()
1700  *    Context: Can be called from user or kernel context.
1701  */
1702 static ibt_status_t
1703 hermon_umap_srq_data_out(hermon_srqhdl_t srq, mlnx_umap_srq_data_out_t *data,
1704     size_t data_sz)
1705 {
1706 	/* Check for valid SRQ handle pointer */
1707 	if (srq == NULL) {
1708 		return (IBT_SRQ_HDL_INVALID);
1709 	}
1710 
1711 	/* Check for valid SRQ mapping structure size */
1712 	if (data_sz < sizeof (mlnx_umap_srq_data_out_t)) {
1713 		return (IBT_INSUFF_RESOURCE);
1714 	}
1715 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1716 
1717 	/*
1718 	 * If it has passed all the checks, then fill in all the useful
1719 	 * mapping information (including the mapping offset that will be
1720 	 * passed back to the devmap() interface during a subsequent mmap()
1721 	 * call.
1722 	 *
1723 	 * The "offset" for SRQ mmap()'s looks like this:
1724 	 * +----------------------------------------+--------+--------------+
1725 	 * |		   SRQ Number		    |  0x66  | Reserved (0) |
1726 	 * +----------------------------------------+--------+--------------+
1727 	 *	   (64 - 8 - PAGESHIFT) bits	    8 bits	PAGESHIFT bits
1728 	 *
1729 	 * This returns information about the mapping offset, the length of the
1730 	 * SRQ memory, and the SRQ number (for use in later send and recv
1731 	 * doorbells).  It also returns the following information for the
1732 	 * shared receive queue: the offset (from the base mapped address) of
1733 	 * the start of the given work queue, the 64-bit IB virtual address
1734 	 * that corresponds to the base mapped address (needed for posting WQEs
1735 	 * though the QP doorbells), the number of WQEs the given work queue
1736 	 * can hold, and the size of each WQE for the given work queue.
1737 	 */
1738 	data->msrq_rev		= MLNX_UMAP_IF_VERSION;
1739 	data->msrq_mapoffset	= ((((uint64_t)srq->srq_srqnum <<
1740 	    MLNX_UMAP_RSRC_TYPE_SHIFT) | MLNX_UMAP_SRQMEM_RSRC) << PAGESHIFT);
1741 	data->msrq_maplen	= srq->srq_wqinfo.qa_size;
1742 	data->msrq_srqnum	= srq->srq_srqnum;
1743 
1744 	data->msrq_desc_addr	= (uint32_t)((uintptr_t)srq->srq_wq_buf -
1745 	    srq->srq_desc_off);
1746 	data->msrq_numwqe	= srq->srq_wq_bufsz;
1747 	data->msrq_wqesz	= (1 << srq->srq_wq_log_wqesz);
1748 
1749 	/* doorbell record fields */
1750 	data->msrq_rdbr_mapoffset = srq->srq_rdbr_mapoffset;
1751 	data->msrq_rdbr_maplen	= PAGESIZE;
1752 	data->msrq_rdbr_offset	= (uintptr_t)srq->srq_wq_vdbr &
1753 	    PAGEOFFSET;
1754 
1755 	return (DDI_SUCCESS);
1756 }
1757 
1758 
1759 /*
1760  * hermon_umap_pd_data_out()
1761  *    Context: Can be called from user or kernel context.
1762  */
1763 static ibt_status_t
1764 hermon_umap_pd_data_out(hermon_pdhdl_t pd, mlnx_umap_pd_data_out_t *data,
1765     size_t data_sz)
1766 {
1767 	/* Check for valid PD handle pointer */
1768 	if (pd == NULL) {
1769 		return (IBT_PD_HDL_INVALID);
1770 	}
1771 
1772 	/* Check for valid PD mapping structure size */
1773 	if (data_sz < sizeof (mlnx_umap_pd_data_out_t)) {
1774 		return (IBT_INSUFF_RESOURCE);
1775 	}
1776 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*data))
1777 
1778 	/*
1779 	 * If it has passed all the checks, then fill the PD table index
1780 	 * (the PD table allocated index for the PD pd_pdnum).
1781 	 */
1782 	data->mpd_rev		= MLNX_UMAP_IF_VERSION;
1783 	data->mpd_pdnum		= pd->pd_pdnum;
1784 
1785 	return (DDI_SUCCESS);
1786 }
1787 
1788 
1789 /*
1790  * hermon_umap_db_init()
1791  *    Context: Only called from attach() path context
1792  */
1793 void
1794 hermon_umap_db_init(void)
1795 {
1796 	/*
1797 	 * Initialize the lock used by the Hermon "userland resources database"
1798 	 * This is used to ensure atomic access to add, remove, and find
1799 	 * entries in the database.
1800 	 */
1801 	mutex_init(&hermon_userland_rsrc_db.hdl_umapdb_lock, NULL,
1802 	    MUTEX_DRIVER, NULL);
1803 
1804 	/*
1805 	 * Initialize the AVL tree used for the "userland resources
1806 	 * database".  Using an AVL tree here provides the ability to
1807 	 * scale the database size to large numbers of resources.  The
1808 	 * entries in the tree are "hermon_umap_db_entry_t" (see
1809 	 * hermon_umap.h).  The tree is searched with the help of the
1810 	 * hermon_umap_db_compare() routine.
1811 	 */
1812 	avl_create(&hermon_userland_rsrc_db.hdl_umapdb_avl,
1813 	    hermon_umap_db_compare, sizeof (hermon_umap_db_entry_t),
1814 	    offsetof(hermon_umap_db_entry_t, hdbe_avlnode));
1815 }
1816 
1817 
1818 /*
1819  * hermon_umap_db_fini()
1820  *    Context: Only called from attach() and/or detach() path contexts
1821  */
1822 void
1823 hermon_umap_db_fini(void)
1824 {
1825 	/* Destroy the AVL tree for the "userland resources database" */
1826 	avl_destroy(&hermon_userland_rsrc_db.hdl_umapdb_avl);
1827 
1828 	/* Destroy the lock for the "userland resources database" */
1829 	mutex_destroy(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1830 }
1831 
1832 
1833 /*
1834  * hermon_umap_db_alloc()
1835  *    Context: Can be called from user or kernel context.
1836  */
1837 hermon_umap_db_entry_t *
1838 hermon_umap_db_alloc(uint_t instance, uint64_t key, uint_t type, uint64_t value)
1839 {
1840 	hermon_umap_db_entry_t	*umapdb;
1841 
1842 	/* Allocate an entry to add to the "userland resources database" */
1843 	umapdb = kmem_zalloc(sizeof (hermon_umap_db_entry_t), KM_NOSLEEP);
1844 	if (umapdb == NULL) {
1845 		return (NULL);
1846 	}
1847 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
1848 
1849 	/* Fill in the fields in the database entry */
1850 	umapdb->hdbe_common.hdb_instance  = instance;
1851 	umapdb->hdbe_common.hdb_type	  = type;
1852 	umapdb->hdbe_common.hdb_key	  = key;
1853 	umapdb->hdbe_common.hdb_value	  = value;
1854 
1855 	return (umapdb);
1856 }
1857 
1858 
1859 /*
1860  * hermon_umap_db_free()
1861  *    Context: Can be called from user or kernel context.
1862  */
1863 void
1864 hermon_umap_db_free(hermon_umap_db_entry_t *umapdb)
1865 {
1866 	/* Free the database entry */
1867 	kmem_free(umapdb, sizeof (hermon_umap_db_entry_t));
1868 }
1869 
1870 
1871 /*
1872  * hermon_umap_db_add()
1873  *    Context: Can be called from user or kernel context.
1874  */
1875 void
1876 hermon_umap_db_add(hermon_umap_db_entry_t *umapdb)
1877 {
1878 	mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1879 	hermon_umap_db_add_nolock(umapdb);
1880 	mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1881 }
1882 
1883 
1884 /*
1885  * hermon_umap_db_add_nolock()
1886  *    Context: Can be called from user or kernel context.
1887  */
1888 void
1889 hermon_umap_db_add_nolock(hermon_umap_db_entry_t *umapdb)
1890 {
1891 	hermon_umap_db_query_t	query;
1892 	avl_index_t		where;
1893 
1894 	ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
1895 
1896 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
1897 
1898 	/*
1899 	 * Copy the common portion of the "to-be-added" database entry
1900 	 * into the "hermon_umap_db_query_t" structure.  We use this structure
1901 	 * (with no flags set) to find the appropriate location in the
1902 	 * "userland resources database" for the new entry to be added.
1903 	 *
1904 	 * Note: we expect that this entry should not be found in the
1905 	 * database (unless something bad has happened).
1906 	 */
1907 	query.hqdb_common = umapdb->hdbe_common;
1908 	query.hqdb_flags  = 0;
1909 	(void) avl_find(&hermon_userland_rsrc_db.hdl_umapdb_avl, &query,
1910 	    &where);
1911 
1912 	/*
1913 	 * Now, using the "where" field from the avl_find() operation
1914 	 * above, we will insert the new database entry ("umapdb").
1915 	 */
1916 	avl_insert(&hermon_userland_rsrc_db.hdl_umapdb_avl, umapdb,
1917 	    where);
1918 }
1919 
1920 
1921 /*
1922  * hermon_umap_db_find()
1923  *    Context: Can be called from user or kernel context.
1924  */
1925 int
1926 hermon_umap_db_find(uint_t instance, uint64_t key, uint_t type,
1927     uint64_t *value, uint_t flag, hermon_umap_db_entry_t	**umapdb)
1928 {
1929 	int	status;
1930 
1931 	mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1932 	status = hermon_umap_db_find_nolock(instance, key, type, value, flag,
1933 	    umapdb);
1934 	mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
1935 
1936 	return (status);
1937 }
1938 
1939 
1940 /*
1941  * hermon_umap_db_find_nolock()
1942  *    Context: Can be called from user or kernel context.
1943  */
1944 int
1945 hermon_umap_db_find_nolock(uint_t instance, uint64_t key, uint_t type,
1946     uint64_t *value, uint_t flags, hermon_umap_db_entry_t **umapdb)
1947 {
1948 	hermon_umap_db_query_t	query;
1949 	hermon_umap_db_entry_t	*entry;
1950 	avl_index_t		where;
1951 
1952 	ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
1953 
1954 	/*
1955 	 * Fill in key, type, instance, and flags values of the
1956 	 * hermon_umap_db_query_t in preparation for the database
1957 	 * lookup.
1958 	 */
1959 	query.hqdb_flags		= flags;
1960 	query.hqdb_common.hdb_key	= key;
1961 	query.hqdb_common.hdb_type	= type;
1962 	query.hqdb_common.hdb_instance	= instance;
1963 
1964 	/*
1965 	 * Perform the database query.  If no entry is found, then
1966 	 * return failure, else continue.
1967 	 */
1968 	entry = (hermon_umap_db_entry_t *)avl_find(
1969 	    &hermon_userland_rsrc_db.hdl_umapdb_avl, &query, &where);
1970 	if (entry == NULL) {
1971 		return (DDI_FAILURE);
1972 	}
1973 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*entry))
1974 
1975 	/*
1976 	 * If the flags argument specifies that the entry should
1977 	 * be removed if found, then call avl_remove() to remove
1978 	 * the entry from the database.
1979 	 */
1980 	if (flags & HERMON_UMAP_DB_REMOVE) {
1981 
1982 		avl_remove(&hermon_userland_rsrc_db.hdl_umapdb_avl, entry);
1983 
1984 		/*
1985 		 * The database entry is returned with the expectation
1986 		 * that the caller will use hermon_umap_db_free() to
1987 		 * free the entry's memory.  ASSERT that this is non-NULL.
1988 		 * NULL pointer should never be passed for the
1989 		 * HERMON_UMAP_DB_REMOVE case.
1990 		 */
1991 		ASSERT(umapdb != NULL);
1992 	}
1993 
1994 	/*
1995 	 * If the caller would like visibility to the database entry
1996 	 * (indicated through the use of a non-NULL "umapdb" argument),
1997 	 * then fill it in.
1998 	 */
1999 	if (umapdb != NULL) {
2000 		*umapdb = entry;
2001 	}
2002 
2003 	/* Extract value field from database entry and return success */
2004 	*value = entry->hdbe_common.hdb_value;
2005 
2006 	return (DDI_SUCCESS);
2007 }
2008 
2009 
2010 /*
2011  * hermon_umap_umemlock_cb()
2012  *    Context: Can be called from callback context.
2013  */
2014 void
2015 hermon_umap_umemlock_cb(ddi_umem_cookie_t *umem_cookie)
2016 {
2017 	hermon_umap_db_entry_t	*umapdb;
2018 	hermon_state_t		*state;
2019 	hermon_rsrc_t 		*rsrcp;
2020 	hermon_mrhdl_t		mr;
2021 	uint64_t		value;
2022 	uint_t			instance;
2023 	int			status;
2024 	void			(*mr_callback)(void *, void *);
2025 	void			*mr_cbarg1, *mr_cbarg2;
2026 
2027 	/*
2028 	 * If this was userland memory, then we need to remove its entry
2029 	 * from the "userland resources database".  Note:  We use the
2030 	 * HERMON_UMAP_DB_IGNORE_INSTANCE flag here because we don't know
2031 	 * which instance was used when the entry was added (but we want
2032 	 * to know after the entry is found using the other search criteria).
2033 	 */
2034 	status = hermon_umap_db_find(0, (uint64_t)(uintptr_t)umem_cookie,
2035 	    MLNX_UMAP_MRMEM_RSRC, &value, (HERMON_UMAP_DB_REMOVE |
2036 	    HERMON_UMAP_DB_IGNORE_INSTANCE), &umapdb);
2037 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*umapdb))
2038 	if (status == DDI_SUCCESS) {
2039 		instance = umapdb->hdbe_common.hdb_instance;
2040 		state = ddi_get_soft_state(hermon_statep, instance);
2041 		if (state == NULL) {
2042 			cmn_err(CE_WARN, "Unable to match Hermon instance\n");
2043 			return;
2044 		}
2045 
2046 		/* Free the database entry */
2047 		hermon_umap_db_free(umapdb);
2048 
2049 		/* Use "value" to convert to an MR handle */
2050 		rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
2051 		mr = (hermon_mrhdl_t)rsrcp->hr_addr;
2052 
2053 		/*
2054 		 * If a callback has been provided, call it first.  This
2055 		 * callback is expected to do any cleanup necessary to
2056 		 * guarantee that the subsequent MR deregister (below)
2057 		 * will succeed.  Specifically, this means freeing up memory
2058 		 * windows which might have been associated with the MR.
2059 		 */
2060 		mutex_enter(&mr->mr_lock);
2061 		mr_callback = mr->mr_umem_cbfunc;
2062 		mr_cbarg1   = mr->mr_umem_cbarg1;
2063 		mr_cbarg2   = mr->mr_umem_cbarg2;
2064 		mutex_exit(&mr->mr_lock);
2065 		if (mr_callback != NULL) {
2066 			mr_callback(mr_cbarg1, mr_cbarg2);
2067 		}
2068 
2069 		/*
2070 		 * Then call hermon_mr_deregister() to release the resources
2071 		 * associated with the MR handle.  Note: Because this routine
2072 		 * will also check for whether the ddi_umem_cookie_t is in the
2073 		 * database, it will take responsibility for disabling the
2074 		 * memory region and calling ddi_umem_unlock().
2075 		 */
2076 		status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
2077 		    HERMON_SLEEP);
2078 		if (status != DDI_SUCCESS) {
2079 			HERMON_WARNING(state, "Unexpected failure in "
2080 			    "deregister from callback\n");
2081 		}
2082 	}
2083 }
2084 
2085 
2086 /*
2087  * hermon_umap_db_compare()
2088  *    Context: Can be called from user or kernel context.
2089  */
2090 static int
2091 hermon_umap_db_compare(const void *q, const void *e)
2092 {
2093 	hermon_umap_db_common_t	*entry_common, *query_common;
2094 	uint_t			query_flags;
2095 
2096 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*((hermon_umap_db_query_t *)q)))
2097 
2098 	entry_common = &((hermon_umap_db_entry_t *)e)->hdbe_common;
2099 	query_common = &((hermon_umap_db_query_t *)q)->hqdb_common;
2100 	query_flags  = ((hermon_umap_db_query_t *)q)->hqdb_flags;
2101 
2102 	/*
2103 	 * The first comparison is done on the "key" value in "query"
2104 	 * and "entry".  If they are not equal, then the appropriate
2105 	 * search direction is returned.  Else, we continue by
2106 	 * comparing "type".
2107 	 */
2108 	if (query_common->hdb_key < entry_common->hdb_key) {
2109 		return (-1);
2110 	} else if (query_common->hdb_key > entry_common->hdb_key) {
2111 		return (+1);
2112 	}
2113 
2114 	/*
2115 	 * If the search reaches this point, then "query" and "entry"
2116 	 * have equal key values.  So we continue be comparing their
2117 	 * "type" values.  Again, if they are not equal, then the
2118 	 * appropriate search direction is returned.  Else, we continue
2119 	 * by comparing "instance".
2120 	 */
2121 	if (query_common->hdb_type < entry_common->hdb_type) {
2122 		return (-1);
2123 	} else if (query_common->hdb_type > entry_common->hdb_type) {
2124 		return (+1);
2125 	}
2126 
2127 	/*
2128 	 * If the search reaches this point, then "query" and "entry"
2129 	 * have exactly the same key and type values.  Now we consult
2130 	 * the "flags" field in the query to determine whether the
2131 	 * "instance" is relevant to the search.  If the
2132 	 * HERMON_UMAP_DB_IGNORE_INSTANCE flags is set, then return
2133 	 * success (0) here.  Otherwise, continue the search by comparing
2134 	 * instance values and returning the appropriate search direction.
2135 	 */
2136 	if (query_flags & HERMON_UMAP_DB_IGNORE_INSTANCE) {
2137 		return (0);
2138 	}
2139 
2140 	/*
2141 	 * If the search has reached this point, then "query" and "entry"
2142 	 * can only be differentiated by their instance values.  If these
2143 	 * are not equal, then return the appropriate search direction.
2144 	 * Else, we return success (0).
2145 	 */
2146 	if (query_common->hdb_instance < entry_common->hdb_instance) {
2147 		return (-1);
2148 	} else if (query_common->hdb_instance > entry_common->hdb_instance) {
2149 		return (+1);
2150 	}
2151 
2152 	/* Everything matches... so return success */
2153 	return (0);
2154 }
2155 
2156 
2157 /*
2158  * hermon_umap_db_set_onclose_cb()
2159  *    Context: Can be called from user or kernel context.
2160  */
2161 int
2162 hermon_umap_db_set_onclose_cb(dev_t dev, uint64_t flag,
2163     int (*callback)(void *), void *arg)
2164 {
2165 	hermon_umap_db_priv_t	*priv;
2166 	hermon_umap_db_entry_t	*umapdb;
2167 	minor_t			instance;
2168 	uint64_t		value;
2169 	int			status;
2170 
2171 	instance = HERMON_DEV_INSTANCE(dev);
2172 	if (instance == (minor_t)-1) {
2173 		return (DDI_FAILURE);
2174 	}
2175 
2176 	if (flag != HERMON_ONCLOSE_FLASH_INPROGRESS) {
2177 		return (DDI_FAILURE);
2178 	}
2179 
2180 	/*
2181 	 * Grab the lock for the "userland resources database" and find
2182 	 * the entry corresponding to this minor number.  Once it's found,
2183 	 * allocate (if necessary) and add an entry (in the "hdb_priv"
2184 	 * field) to indicate that further processing may be needed during
2185 	 * Hermon's close() handling.
2186 	 */
2187 	mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2188 	status = hermon_umap_db_find_nolock(instance, dev,
2189 	    MLNX_UMAP_PID_RSRC, &value, 0, &umapdb);
2190 	if (status != DDI_SUCCESS) {
2191 		mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2192 		return (DDI_FAILURE);
2193 	}
2194 
2195 	priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
2196 	if (priv == NULL) {
2197 		priv = (hermon_umap_db_priv_t *)kmem_zalloc(
2198 		    sizeof (hermon_umap_db_priv_t), KM_NOSLEEP);
2199 		if (priv == NULL) {
2200 			mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2201 			return (DDI_FAILURE);
2202 		}
2203 	}
2204 
2205 	/*
2206 	 * Save away the callback and argument to be used during Hermon's
2207 	 * close() processing.
2208 	 */
2209 	priv->hdp_cb	= callback;
2210 	priv->hdp_arg	= arg;
2211 
2212 	umapdb->hdbe_common.hdb_priv = (void *)priv;
2213 	mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2214 
2215 	return (DDI_SUCCESS);
2216 }
2217 
2218 
2219 /*
2220  * hermon_umap_db_clear_onclose_cb()
2221  *    Context: Can be called from user or kernel context.
2222  */
2223 int
2224 hermon_umap_db_clear_onclose_cb(dev_t dev, uint64_t flag)
2225 {
2226 	hermon_umap_db_priv_t	*priv;
2227 	hermon_umap_db_entry_t	*umapdb;
2228 	minor_t			instance;
2229 	uint64_t		value;
2230 	int			status;
2231 
2232 	instance = HERMON_DEV_INSTANCE(dev);
2233 	if (instance == (minor_t)-1) {
2234 		return (DDI_FAILURE);
2235 	}
2236 
2237 	if (flag != HERMON_ONCLOSE_FLASH_INPROGRESS) {
2238 		return (DDI_FAILURE);
2239 	}
2240 
2241 	/*
2242 	 * Grab the lock for the "userland resources database" and find
2243 	 * the entry corresponding to this minor number.  Once it's found,
2244 	 * remove the entry (in the "hdb_priv" field) that indicated the
2245 	 * need for further processing during Hermon's close().  Free the
2246 	 * entry, if appropriate.
2247 	 */
2248 	mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2249 	status = hermon_umap_db_find_nolock(instance, dev,
2250 	    MLNX_UMAP_PID_RSRC, &value, 0, &umapdb);
2251 	if (status != DDI_SUCCESS) {
2252 		mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2253 		return (DDI_FAILURE);
2254 	}
2255 
2256 	priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
2257 	if (priv != NULL) {
2258 		kmem_free(priv, sizeof (hermon_umap_db_priv_t));
2259 		priv = NULL;
2260 	}
2261 
2262 	umapdb->hdbe_common.hdb_priv = (void *)priv;
2263 	mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
2264 	return (DDI_SUCCESS);
2265 }
2266 
2267 
2268 /*
2269  * hermon_umap_db_clear_onclose_cb()
2270  *    Context: Can be called from user or kernel context.
2271  */
2272 int
2273 hermon_umap_db_handle_onclose_cb(hermon_umap_db_priv_t *priv)
2274 {
2275 	int	(*callback)(void *);
2276 
2277 	ASSERT(MUTEX_HELD(&hermon_userland_rsrc_db.hdl_umapdb_lock));
2278 
2279 	/*
2280 	 * Call the callback.
2281 	 *    Note: Currently there is only one callback (in "hdp_cb"), but
2282 	 *    in the future there may be more, depending on what other types
2283 	 *    of interaction there are between userland processes and the
2284 	 *    driver.
2285 	 */
2286 	callback = priv->hdp_cb;
2287 	return (callback(priv->hdp_arg));
2288 }
2289