xref: /illumos-gate/usr/src/lib/librsm/common/rsmgen.c (revision 1da57d55)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #include <string.h>
31 #include <strings.h>
32 #include <sys/types.h>
33 #include <sys/stat.h>
34 #include <sys/mman.h>
35 #include <sys/uio.h>
36 #include <sys/sysmacros.h>
37 #include <unistd.h>
38 #include <errno.h>
39 #include <assert.h>
40 #include <malloc.h>
41 #include <fcntl.h>
42 #include <dlfcn.h>
43 #include <sched.h>
44 
45 #include <rsmapi.h>
46 #include <sys/rsm/rsmndi.h>
47 #include <rsmlib_in.h>
48 #include <sys/rsm/rsm.h>
49 
50 /* lint -w2 */
51 
52 extern rsm_node_id_t rsm_local_nodeid;
53 extern int loopback_getv(rsm_scat_gath_t *);
54 extern int loopback_putv(rsm_scat_gath_t *);
55 
56 static rsm_ndlib_attr_t _rsm_genlib_attr = {
57 	B_TRUE,		/* mapping needed for put/get */
58 	B_FALSE		/* mapping needed for putv/getv */
59 };
60 
61 static int
__rsm_import_connect(rsmapi_controller_handle_t controller,rsm_node_id_t node_id,rsm_memseg_id_t segment_id,rsm_permission_t perm,rsm_memseg_import_handle_t * im_memseg)62 __rsm_import_connect(
63     rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
64     rsm_memseg_id_t segment_id, rsm_permission_t perm,
65     rsm_memseg_import_handle_t *im_memseg) {
66 
67 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
68 	    "__rsm_import_connect: enter\n"));
69 
70 	controller = controller;
71 	node_id = node_id;
72 	segment_id = segment_id;
73 	perm = perm;
74 	im_memseg = im_memseg;
75 
76 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
77 	    "__rsm_import_connect: exit\n"));
78 
79 	return (RSM_SUCCESS);
80 }
81 
82 static int
__rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg)83 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
84 
85 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
86 	    "__rsm_import_disconnect: enter\n"));
87 
88 	im_memseg = im_memseg;
89 
90 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
91 	    "__rsm_import_disconnect: exit\n"));
92 
93 	return (RSM_SUCCESS);
94 }
95 
96 /*
97  * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
98  * We can have a new ops vector that makes that assumption.
99  */
100 
101 static int
__rsm_get8x8(rsm_memseg_import_handle_t im_memseg,off_t off,uint8_t * datap,ulong_t rep_cnt,boolean_t swap)102 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
103     uint8_t *datap,
104     ulong_t rep_cnt,
105     boolean_t swap)
106 {
107 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
108 	uint8_t *data_addr =
109 	    (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
110 	uint_t i = 0;
111 	int	e;
112 
113 	swap = swap;
114 
115 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
116 	    "__rsm_import_get8x8: enter\n"));
117 
118 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
119 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
120 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
121 		if (e != RSM_SUCCESS) {
122 			return (e);
123 		}
124 	}
125 
126 	for (i = 0; i < rep_cnt; i++) {
127 		datap[i] = data_addr[i];
128 	}
129 
130 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
131 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
132 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
133 		if (e != RSM_SUCCESS) {
134 			return (e);
135 		}
136 	}
137 
138 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
139 	    "__rsm_import_get8x8: exit\n"));
140 
141 	return (RSM_SUCCESS);
142 }
143 
144 static int
__rsm_get16x16(rsm_memseg_import_handle_t im_memseg,off_t off,uint16_t * datap,ulong_t rep_cnt,boolean_t swap)145 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
146     uint16_t *datap,
147     ulong_t rep_cnt,
148     boolean_t swap)
149 {
150 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
151 	uint16_t *data_addr =
152 	    /* LINTED */
153 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
154 	uint_t i = 0;
155 	int	e;
156 
157 	swap = swap;
158 
159 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
160 	    "__rsm_import_get16x16: enter\n"));
161 
162 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
163 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
164 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
165 		if (e != RSM_SUCCESS) {
166 			return (e);
167 		}
168 	}
169 
170 	for (i = 0; i < rep_cnt; i++) {
171 		datap[i] = data_addr[i];
172 	}
173 
174 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
175 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
176 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
177 		if (e != RSM_SUCCESS) {
178 			return (e);
179 		}
180 	}
181 
182 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
183 	    "__rsm_import_get16x16: exit\n"));
184 
185 	return (RSM_SUCCESS);
186 }
187 
188 static int
__rsm_get32x32(rsm_memseg_import_handle_t im_memseg,off_t off,uint32_t * datap,ulong_t rep_cnt,boolean_t swap)189 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
190     uint32_t *datap,
191     ulong_t rep_cnt,
192     boolean_t swap)
193 {
194 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
195 	uint32_t *data_addr =
196 	    /* LINTED */
197 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
198 	uint_t i = 0;
199 	int	e;
200 
201 	swap = swap;
202 
203 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
204 	    "__rsm_import_get32x32: enter\n"));
205 
206 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
207 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
208 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
209 		if (e != RSM_SUCCESS) {
210 			return (e);
211 		}
212 	}
213 
214 	for (i = 0; i < rep_cnt; i++) {
215 		datap[i] = data_addr[i];
216 	}
217 
218 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
219 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
220 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
221 		if (e != RSM_SUCCESS) {
222 			return (e);
223 		}
224 	}
225 
226 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
227 	    "__rsm_import_get32x32: exit\n"));
228 
229 	return (RSM_SUCCESS);
230 }
231 
232 static int
__rsm_get64x64(rsm_memseg_import_handle_t im_memseg,off_t off,uint64_t * datap,ulong_t rep_cnt,boolean_t swap)233 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
234     uint64_t *datap,
235     ulong_t rep_cnt,
236     boolean_t swap)
237 {
238 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
239 	uint64_t *data_addr =
240 	    /* LINTED */
241 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
242 	uint_t i = 0;
243 	int	e;
244 
245 	swap = swap;
246 
247 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
248 	    "__rsm_import_get64x64: enter\n"));
249 
250 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
251 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
252 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
253 		if (e != RSM_SUCCESS) {
254 			return (e);
255 		}
256 	}
257 
258 	for (i = 0; i < rep_cnt; i++) {
259 		datap[i] = data_addr[i];
260 	}
261 
262 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
263 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
264 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
265 		if (e != RSM_SUCCESS) {
266 			return (e);
267 		}
268 	}
269 
270 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
271 	    "__rsm_import_get64x64: exit\n"));
272 
273 	return (RSM_SUCCESS);
274 }
275 
276 	/*
277 	 * import side memory segment operations (write access functions):
278 	 */
279 
280 /*
281  * XXX: Each one of the following cases ought to be a separate function loaded
282  * into a segment access ops vector. We determine the correct function at
283  * segment connect time. When a new controller is register, we can decode
284  * it's direct_access_size attribute and load the correct function. For
285  * loop back we need to create a special ops vector that bypasses all of
286  * this stuff.
287  *
288  * XXX: We need to create a special interrupt queue for the library to handle
289  * partial writes in the remote process.
290  */
291 static int
__rsm_put8x8(rsm_memseg_import_handle_t im_memseg,off_t off,uint8_t * datap,ulong_t rep_cnt,boolean_t swap)292 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
293     uint8_t *datap,
294     ulong_t rep_cnt,
295     boolean_t swap)
296 {
297 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
298 	uint8_t *data_addr =
299 	    (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
300 	uint_t i = 0;
301 	int	e;
302 
303 	swap = swap;
304 
305 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
306 	    "__rsm_put8x8: enter\n"));
307 
308 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
309 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
310 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
311 		if (e != RSM_SUCCESS) {
312 			return (e);
313 		}
314 	}
315 
316 	for (i = 0; i < rep_cnt; i++) {
317 		data_addr[i] = datap[i];
318 	}
319 
320 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
321 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
322 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
323 		if (e != RSM_SUCCESS) {
324 			return (e);
325 		}
326 	}
327 
328 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
329 	    "__rsm_put8x8: exit\n"));
330 
331 	return (RSM_SUCCESS);
332 }
333 
334 static int
__rsm_put16x16(rsm_memseg_import_handle_t im_memseg,off_t off,uint16_t * datap,ulong_t rep_cnt,boolean_t swap)335 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
336     uint16_t *datap,
337     ulong_t rep_cnt,
338     boolean_t swap)
339 {
340 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
341 	uint16_t *data_addr =
342 	    /* LINTED */
343 	    (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
344 	uint_t i = 0;
345 	int	e;
346 
347 	swap = swap;
348 
349 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
350 	    "__rsm_put16x16: enter\n"));
351 
352 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
353 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
354 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
355 		if (e != RSM_SUCCESS) {
356 			return (e);
357 		}
358 	}
359 
360 	for (i = 0; i < rep_cnt; i++) {
361 		data_addr[i] = datap[i];
362 	}
363 
364 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
365 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
366 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
367 		if (e != RSM_SUCCESS) {
368 			return (e);
369 		}
370 	}
371 
372 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
373 	    "__rsm_put16x16: exit\n"));
374 
375 	return (RSM_SUCCESS);
376 }
377 
378 static int
__rsm_put32x32(rsm_memseg_import_handle_t im_memseg,off_t off,uint32_t * datap,ulong_t rep_cnt,boolean_t swap)379 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
380     uint32_t *datap,
381     ulong_t rep_cnt,
382     boolean_t swap)
383 {
384 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
385 	uint32_t *data_addr =
386 	    /* LINTED */
387 	    (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
388 	uint_t i = 0;
389 	int	e;
390 
391 	swap = swap;
392 
393 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
394 	    "__rsm_put32x32: enter\n"));
395 
396 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
397 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
398 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
399 		if (e != RSM_SUCCESS) {
400 			return (e);
401 		}
402 	}
403 
404 	for (i = 0; i < rep_cnt; i++) {
405 		data_addr[i] = datap[i];
406 	}
407 
408 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
409 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
410 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
411 		if (e != RSM_SUCCESS) {
412 			return (e);
413 		}
414 	}
415 
416 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
417 	    "__rsm_put32x32: exit\n"));
418 
419 	return (RSM_SUCCESS);
420 }
421 
422 static int
__rsm_put64x64(rsm_memseg_import_handle_t im_memseg,off_t off,uint64_t * datap,ulong_t rep_cnt,boolean_t swap)423 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
424     uint64_t *datap,
425     ulong_t rep_cnt,
426     boolean_t swap)
427 {
428 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
429 	uint64_t *data_addr =
430 	    /* LINTED */
431 	    (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
432 	uint_t i = 0;
433 	int	e;
434 
435 	swap = swap;
436 
437 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
438 	    "__rsm_put64x64: enter\n"));
439 
440 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
441 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
442 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
443 		if (e != RSM_SUCCESS) {
444 			return (e);
445 		}
446 	}
447 
448 	for (i = 0; i < rep_cnt; i++) {
449 		data_addr[i] = datap[i];
450 	}
451 
452 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
453 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
454 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
455 		if (e != RSM_SUCCESS) {
456 			return (e);
457 		}
458 	}
459 
460 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
461 	    "__rsm_put64x64: exit\n"));
462 
463 	return (RSM_SUCCESS);
464 }
465 
466 static int
__rsm_get(rsm_memseg_import_handle_t im_memseg,off_t offset,void * dst_addr,size_t length)467 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
468     size_t length)
469 {
470 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
471 	int		e;
472 
473 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
474 	    "__rsm_get: enter\n"));
475 
476 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
477 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
478 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
479 		if (e != RSM_SUCCESS) {
480 			return (e);
481 		}
482 	}
483 
484 	(void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
485 	    dst_addr, length);
486 
487 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
488 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
489 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
490 		if (e != RSM_SUCCESS) {
491 			return (e);
492 		}
493 	}
494 
495 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
496 	    "__rsm_get: exit\n"));
497 
498 	return (RSM_SUCCESS);
499 }
500 
501 static int
__rsm_getv(rsm_scat_gath_t * sg_io)502 __rsm_getv(rsm_scat_gath_t *sg_io)
503 {
504 	rsm_iovec_t 	*iovec = sg_io->iovec;
505 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
506 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
507 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
508 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
509 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
510 	rsmseg_handle_t *seg_hndl;
511 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
512 	int e, i;
513 
514 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
515 	    "__rsm_getv: enter\n"));
516 
517 	/*
518 	 * Use loopback for single node operations.
519 	 * replace local handles with virtual addresses
520 	 */
521 
522 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
523 		/*
524 		 * To use the loopback optimization map the segment
525 		 * here implicitly.
526 		 */
527 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
528 			caddr_t	va;
529 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
530 			    PROT_READ|PROT_WRITE,
531 			    MAP_SHARED|MAP_NORESERVE,
532 			    im_seg_hndl->rsmseg_fd, 0);
533 
534 			if (va == MAP_FAILED) {
535 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
536 				    "implicit map failed:%d\n", errno));
537 				if (errno == EINVAL)
538 					return (RSMERR_BAD_MEM_ALIGNMENT);
539 				else if (errno == ENOMEM || errno == ENXIO ||
540 				    errno == EOVERFLOW)
541 					return (RSMERR_BAD_LENGTH);
542 				else if (errno == EAGAIN)
543 					return (RSMERR_INSUFFICIENT_RESOURCES);
544 				else
545 					return (errno);
546 			}
547 
548 			im_seg_hndl->rsmseg_vaddr = va;
549 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
550 			im_seg_hndl->rsmseg_mapoffset = 0;
551 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
552 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
553 		}
554 
555 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
556 			l_iovec_start = l_iovec = malloc(iovec_size);
557 		else
558 			l_iovec_start = l_iovec = l_iovec_arr;
559 
560 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
561 		for (i = 0; i < sg_io->io_request_count; i++) {
562 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
563 				/* Get the surrogate export segment handle */
564 				seg_hndl = (rsmseg_handle_t *)
565 				    l_iovec->local.handle;
566 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
567 				l_iovec->io_type = RSM_VA_TYPE;
568 			}
569 			l_iovec++;
570 		}
571 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
572 		e = loopback_getv(sg_io);
573 		sg_io->iovec = iovec;
574 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
575 			free(l_iovec_start);
576 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
577 		    "__rsm_getv: exit\n"));
578 		return (e);
579 	}
580 
581 	/* for the Kernel Agent, replace local handles with segment ids */
582 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
583 		ka_iovec_start = ka_iovec = malloc(iovec_size);
584 	else
585 		ka_iovec_start = ka_iovec = ka_iovec_arr;
586 
587 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
588 	for (i = 0; i < sg_io->io_request_count; i++) {
589 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
590 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
591 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
592 		}
593 		ka_iovec++;
594 	}
595 
596 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
597 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
598 	sg_io->iovec = iovec;
599 
600 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
601 		free(ka_iovec_start);
602 
603 	if (e < 0) {
604 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
605 		    " RSM_IOCTL_GETV failed\n"));
606 		return (errno);
607 	}
608 
609 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
610 	    "__rsm_getv: exit\n"));
611 
612 	return (RSM_SUCCESS);
613 }
614 
615 
616 static int
__rsm_put(rsm_memseg_import_handle_t im_memseg,off_t offset,void * src_addr,size_t length)617 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
618     size_t length)
619 {
620 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
621 	int		e;
622 
623 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
624 	    "__rsm_put: enter\n"));
625 
626 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
627 		e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
628 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
629 		if (e != RSM_SUCCESS) {
630 			return (e);
631 		}
632 	}
633 
634 	bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
635 	    length);
636 
637 	if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
638 		e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
639 		    (rsm_barrier_handle_t)seg->rsmseg_barrier);
640 		if (e != RSM_SUCCESS) {
641 			return (e);
642 		}
643 	}
644 
645 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
646 	    "__rsm_put: exit\n"));
647 
648 	return (RSM_SUCCESS);
649 }
650 
651 static int
__rsm_putv(rsm_scat_gath_t * sg_io)652 __rsm_putv(rsm_scat_gath_t *sg_io)
653 {
654 	rsm_iovec_t 	*iovec = sg_io->iovec;
655 	rsmka_iovec_t	ka_iovec_arr[RSM_MAX_IOVLEN];
656 	rsmka_iovec_t	*ka_iovec, *ka_iovec_start;
657 	rsmka_iovec_t	l_iovec_arr[RSM_MAX_IOVLEN];
658 	rsmka_iovec_t	*l_iovec, *l_iovec_start;
659 	rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
660 	rsmseg_handle_t *seg_hndl;
661 	int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
662 	int e, i;
663 
664 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
665 	    "__rsm_putv: enter\n"));
666 
667 	/*
668 	 * Use loopback for single node operations.
669 	 * replace local handles with virtual addresses
670 	 */
671 
672 	if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
673 		/*
674 		 * To use the loopback optimization map the segment
675 		 * here implicitly.
676 		 */
677 		if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
678 			caddr_t	va;
679 			va = mmap(NULL, im_seg_hndl->rsmseg_size,
680 			    PROT_READ|PROT_WRITE,
681 			    MAP_SHARED|MAP_NORESERVE,
682 			    im_seg_hndl->rsmseg_fd, 0);
683 
684 			if (va == MAP_FAILED) {
685 				DBPRINTF((RSM_LIBRARY, RSM_ERR,
686 				    "implicit map failed:%d\n", errno));
687 				if (errno == EINVAL)
688 					return (RSMERR_BAD_MEM_ALIGNMENT);
689 				else if (errno == ENOMEM || errno == ENXIO ||
690 				    errno == EOVERFLOW)
691 					return (RSMERR_BAD_LENGTH);
692 				else if (errno == EAGAIN)
693 					return (RSMERR_INSUFFICIENT_RESOURCES);
694 				else
695 					return (errno);
696 			}
697 			im_seg_hndl->rsmseg_vaddr = va;
698 			im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
699 			im_seg_hndl->rsmseg_mapoffset = 0;
700 			im_seg_hndl->rsmseg_state = IMPORT_MAP;
701 			im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
702 		}
703 
704 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
705 			l_iovec_start = l_iovec = malloc(iovec_size);
706 		else
707 			l_iovec_start = l_iovec = l_iovec_arr;
708 
709 		bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
710 		for (i = 0; i < sg_io->io_request_count; i++) {
711 			if (l_iovec->io_type == RSM_HANDLE_TYPE) {
712 				/* Get the surrogate export segment handle */
713 				seg_hndl = (rsmseg_handle_t *)
714 				    l_iovec->local.handle;
715 				l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
716 				l_iovec->io_type = RSM_VA_TYPE;
717 			}
718 			l_iovec++;
719 		}
720 		sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
721 		e = loopback_putv(sg_io);
722 		sg_io->iovec = iovec;
723 
724 		if (sg_io->io_request_count > RSM_MAX_IOVLEN)
725 			free(l_iovec_start);
726 
727 		DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
728 		    "__rsm_putv: exit\n"));
729 
730 
731 		return (e);
732 	}
733 
734 	/* for the Kernel Agent, replace local handles with segment ids */
735 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
736 		ka_iovec_start = ka_iovec = malloc(iovec_size);
737 	else
738 		ka_iovec_start = ka_iovec = ka_iovec_arr;
739 
740 	bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
741 
742 	for (i = 0; i < sg_io->io_request_count; i++) {
743 		if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
744 			seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
745 			ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
746 		}
747 		ka_iovec++;
748 	}
749 
750 	sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
751 	e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
752 	sg_io->iovec = iovec;
753 
754 	if (sg_io->io_request_count > RSM_MAX_IOVLEN)
755 		free(ka_iovec_start);
756 
757 	if (e < 0) {
758 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
759 		    " RSM_IOCTL_PUTV failed\n"));
760 		return (errno);
761 	}
762 
763 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
764 	    "__rsm_putv: exit\n"));
765 
766 	return (RSM_SUCCESS);
767 }
768 
769 	/*
770 	 * import side memory segment operations (barriers):
771 	 */
772 static int
__rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,rsm_barrier_type_t type,rsm_barrier_handle_t barrier)773 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
774     rsm_barrier_type_t type,
775     rsm_barrier_handle_t barrier)
776 {
777 	rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
778 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
779 
780 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
781 	    ""
782 	    "__rsm_memseg_import_init_barrier: enter\n"));
783 
784 	type = type;
785 
786 	if (!seg) {
787 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
788 		    "invalid segment handle\n"));
789 		return (RSMERR_BAD_SEG_HNDL);
790 	}
791 	if (!bar) {
792 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
793 		    "invalid barrier handle\n"));
794 		return (RSMERR_BAD_BARRIER_PTR);
795 	}
796 
797 	/* XXX: fix later. We only support span-of-node barriers */
798 
799 	bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
800 	if (bar->rsmgenbar_data == NULL) {
801 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
802 		    "not enough memory\n"));
803 		return (RSMERR_INSUFFICIENT_MEM);
804 	}
805 	bar->rsmgenbar_seg = seg;
806 
807 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
808 	    "__rsm_memseg_import_init_barrier: exit\n"));
809 
810 	return (RSM_SUCCESS);
811 }
812 
813 static int
__rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)814 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
815 {
816 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
817 	rsmseg_handle_t *seg;
818 	rsm_ioctlmsg_t msg;
819 
820 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
821 	    "__rsm_memseg_import_open_barrier: enter\n"));
822 
823 	if (!bar) {
824 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
825 		    "invalid barrier pointer\n"));
826 		return (RSMERR_BAD_BARRIER_PTR);
827 	}
828 
829 	if ((seg = bar->rsmgenbar_seg) == 0) {
830 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
831 		    "uninitialized barrier\n"));
832 		return (RSMERR_BARRIER_UNINITIALIZED);
833 	}
834 
835 /* lint -save -e718 -e746 */
836 	msg.bar = *(bar->rsmgenbar_data);
837 	if (ioctl(seg->rsmseg_fd,
838 	    RSM_IOCTL_BAR_OPEN, &msg) < 0) {
839 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
840 		    " RSM_IOCTL_BAR_OPEN failed\n"));
841 /* lint -restore */
842 		return (RSMERR_BARRIER_OPEN_FAILED);
843 	}
844 
845 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
846 	    "__rsm_memseg_import_open_barrier: exit\n"));
847 
848 	return (RSM_SUCCESS);
849 }
850 
851 static int
__rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)852 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
853 {
854 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
855 	rsmseg_handle_t *seg;
856 	rsm_ioctlmsg_t msg;
857 
858 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
859 	    "__rsm_memseg_import_order_barrier: enter\n"));
860 
861 	if (!bar) {
862 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
863 		    "invalid barrier\n"));
864 		return (RSMERR_BAD_BARRIER_PTR);
865 	}
866 	if ((seg = bar->rsmgenbar_seg) == 0) {
867 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
868 		    "uninitialized barrier\n"));
869 		return (RSMERR_BARRIER_UNINITIALIZED);
870 	}
871 
872 	msg.bar = *(bar->rsmgenbar_data);
873 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
874 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
875 		    "RSM_IOCTL_BAR_ORDER failed\n"));
876 		return (RSMERR_BARRIER_FAILURE);
877 	}
878 
879 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
880 	    "__rsm_memseg_import_order_barrier: exit\n"));
881 
882 	return (RSM_SUCCESS);
883 }
884 
885 static int
__rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)886 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
887 {
888 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
889 	rsmseg_handle_t *seg;
890 	rsm_ioctlmsg_t msg;
891 
892 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
893 	    "__rsm_memseg_import_close_barrier: enter\n"));
894 
895 	if (!bar) {
896 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
897 		    "invalid barrier\n"));
898 		return (RSMERR_BAD_BARRIER_PTR);
899 	}
900 	if ((seg = bar->rsmgenbar_seg) == 0) {
901 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
902 		    "uninitialized barrier\n"));
903 		return (RSMERR_BARRIER_UNINITIALIZED);
904 	}
905 
906 	msg.bar = *(bar->rsmgenbar_data);
907 	if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
908 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
909 		    " RSM_IOCTL_BAR_CLOSE failed\n"));
910 		return (RSMERR_BARRIER_FAILURE);
911 	}
912 
913 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
914 	    "__rsm_memseg_import_close_barrier: exit\n"));
915 
916 	return (RSM_SUCCESS);
917 }
918 
919 static int
__rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)920 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
921 {
922 	rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
923 
924 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
925 	    "__rsm_memseg_import_destroy_barrier: enter\n"));
926 
927 	if (!bar) {
928 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
929 		    "invalid barrier\n"));
930 		return (RSMERR_BAD_BARRIER_PTR);
931 	}
932 
933 	free((void *) bar->rsmgenbar_data);
934 
935 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
936 	    "__rsm_memseg_import_destroy_barrier: exit\n"));
937 
938 	return (RSM_SUCCESS);
939 }
940 
941 /* lint -w1 */
942 static int
__rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,rsm_barrier_mode_t * mode)943 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
944     rsm_barrier_mode_t *mode)
945 {
946 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
947 	    "__rsm_memseg_import_get_mode: enter\n"));
948 
949 	im_memseg = im_memseg; mode = mode;
950 
951 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
952 	    "__rsm_memseg_import_get_mode: exit\n"));
953 
954 	return (RSM_SUCCESS);
955 }
956 static int
__rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,rsm_barrier_mode_t mode)957 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
958 				rsm_barrier_mode_t mode)
959 {
960 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
961 	    "__rsm_memseg_import_set_mode: enter\n"));
962 
963 	im_memseg = im_memseg; mode = mode;
964 
965 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
966 	    "__rsm_memseg_import_set_mode: exit\n"));
967 
968 	return (RSM_SUCCESS);
969 }
970 
971 static int
__rsm_create_memory_handle(rsmapi_controller_handle_t controller,rsm_localmemory_handle_t * local_hndl_p,caddr_t local_va,size_t len)972 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
973     rsm_localmemory_handle_t *local_hndl_p,
974     caddr_t local_va, size_t len)
975 {
976 	rsm_memseg_export_handle_t memseg;
977 	rsmapi_access_entry_t	acl[1];
978 	rsm_memseg_id_t segid = 0;
979 	size_t size;
980 	int e;
981 
982 
983 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
984 	    "__rsm_create_memory_handle: enter\n"));
985 
986 	/*
987 	 * create a surrogate segment (local memory will be locked down).
988 	 */
989 	size =  roundup(len, PAGESIZE);
990 	e = rsm_memseg_export_create(controller, &memseg,
991 	    (void *)local_va, size,
992 	    RSM_ALLOW_REBIND);
993 	if (e != RSM_SUCCESS) {
994 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
995 		    "export create failed\n"));
996 		return (e);
997 	}
998 
999 	/*
1000 	 * Publish the segment to the local node only.  If the segment
1001 	 * length is very large then don't publish to the adapter driver
1002 	 * because that will consume too much DVMA space - this is indicated
1003 	 * to the Kernel Agent using null permissions.  DVMA binding will
1004 	 * be done when the RDMA is set up.
1005 	 */
1006 	acl[0].ae_node = rsm_local_nodeid;
1007 	if (len > RSM_MAX_HANDLE_DVMA)
1008 		acl[0].ae_permission = 0;
1009 	else
1010 		acl[0].ae_permission = RSM_PERM_RDWR;
1011 
1012 	e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1013 	if (e != RSM_SUCCESS) {
1014 		DBPRINTF((RSM_LIBRARY, RSM_ERR,
1015 		    "export publish failed\n"));
1016 		rsm_memseg_export_destroy(memseg);
1017 		return (e);
1018 	}
1019 
1020 	/* Use the surrogate seghandle as the local memory handle */
1021 	*local_hndl_p = (rsm_localmemory_handle_t)memseg;
1022 
1023 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1024 	    "__rsm_create_memory_handle: exit\n"));
1025 
1026 	return (e);
1027 }
1028 
1029 static int
__rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)1030 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1031 {
1032 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1033 	    "__rsm_free_memory_handle: enter\n"));
1034 
1035 	rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1036 
1037 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1038 	    "__rsm_free_memory_handle: exit\n"));
1039 
1040 	return (RSM_SUCCESS);
1041 }
1042 
1043 static int
__rsm_get_lib_attr(rsm_ndlib_attr_t ** libattrp)1044 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1045 {
1046 
1047 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1048 	    "__rsm_get_lib_attr: enter\n"));
1049 
1050 	*libattrp = &_rsm_genlib_attr;
1051 
1052 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1053 	    "__rsm_get_lib_attr: exit\n"));
1054 
1055 	return (RSM_SUCCESS);
1056 }
1057 
1058 static int
__rsm_closedevice(rsmapi_controller_handle_t cntr_handle)1059 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1060 {
1061 
1062 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1063 	    "__rsm_closedevice: enter\n"));
1064 
1065 	cntr_handle = cntr_handle;
1066 
1067 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1068 	    "__rsm_closedevice: exit\n"));
1069 
1070 	return (RSM_SUCCESS);
1071 }
1072 
1073 void
__rsmdefault_setops(rsm_segops_t * segops)1074 __rsmdefault_setops(rsm_segops_t *segops)
1075 {
1076 
1077 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1078 	    "__rsmdefault_setops: enter\n"));
1079 
1080 	if (segops->rsm_memseg_import_connect == NULL) {
1081 		segops->rsm_memseg_import_connect = __rsm_import_connect;
1082 	}
1083 	if (segops->rsm_memseg_import_disconnect == NULL) {
1084 		segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1085 	}
1086 
1087 	if (segops->rsm_memseg_import_get8 == NULL) {
1088 		segops->rsm_memseg_import_get8 = __rsm_get8x8;
1089 	}
1090 	if (segops->rsm_memseg_import_get16 == NULL) {
1091 		segops->rsm_memseg_import_get16 = __rsm_get16x16;
1092 	}
1093 	if (segops->rsm_memseg_import_get32 == NULL) {
1094 		segops->rsm_memseg_import_get32 = __rsm_get32x32;
1095 	}
1096 	if (segops->rsm_memseg_import_get64 == NULL) {
1097 		segops->rsm_memseg_import_get64 = __rsm_get64x64;
1098 	}
1099 	if (segops->rsm_memseg_import_get == NULL) {
1100 		segops->rsm_memseg_import_get = __rsm_get;
1101 	}
1102 
1103 	if (segops->rsm_memseg_import_put8 == NULL) {
1104 		segops->rsm_memseg_import_put8 = __rsm_put8x8;
1105 	}
1106 	if (segops->rsm_memseg_import_put16 == NULL) {
1107 		segops->rsm_memseg_import_put16 = __rsm_put16x16;
1108 	}
1109 	if (segops->rsm_memseg_import_put32 == NULL) {
1110 		segops->rsm_memseg_import_put32 = __rsm_put32x32;
1111 	}
1112 	if (segops->rsm_memseg_import_put64 == NULL) {
1113 		segops->rsm_memseg_import_put64 = __rsm_put64x64;
1114 	}
1115 	if (segops->rsm_memseg_import_put == NULL) {
1116 		segops->rsm_memseg_import_put = __rsm_put;
1117 	}
1118 
1119 	if (segops->rsm_memseg_import_putv == NULL) {
1120 		segops->rsm_memseg_import_putv = __rsm_putv;
1121 	}
1122 
1123 	if (segops->rsm_memseg_import_getv == NULL) {
1124 		segops->rsm_memseg_import_getv = __rsm_getv;
1125 	}
1126 
1127 	if (segops->rsm_create_localmemory_handle == NULL) {
1128 		segops->rsm_create_localmemory_handle =
1129 		    __rsm_create_memory_handle;
1130 	}
1131 
1132 	if (segops->rsm_free_localmemory_handle == NULL) {
1133 		segops->rsm_free_localmemory_handle =
1134 		    __rsm_free_memory_handle;
1135 	}
1136 
1137 	/* XXX: Need to support barrier functions */
1138 	if (segops->rsm_memseg_import_init_barrier == NULL) {
1139 		segops->rsm_memseg_import_init_barrier =
1140 		    __rsm_memseg_import_init_barrier;
1141 	}
1142 	if (segops->rsm_memseg_import_open_barrier == NULL) {
1143 		segops->rsm_memseg_import_open_barrier =
1144 		    __rsm_memseg_import_open_barrier;
1145 	}
1146 	if (segops->rsm_memseg_import_order_barrier == NULL) {
1147 		segops->rsm_memseg_import_order_barrier =
1148 		    __rsm_memseg_import_order_barrier;
1149 	}
1150 	if (segops->rsm_memseg_import_close_barrier == NULL) {
1151 		segops->rsm_memseg_import_close_barrier =
1152 		    __rsm_memseg_import_close_barrier;
1153 	}
1154 	if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1155 		segops->rsm_memseg_import_destroy_barrier =
1156 		    __rsm_memseg_import_destroy_barrier;
1157 	}
1158 
1159 	if (segops->rsm_memseg_import_get_mode == NULL) {
1160 		segops->rsm_memseg_import_get_mode =
1161 		    __rsm_memseg_import_get_mode;
1162 	}
1163 	if (segops->rsm_memseg_import_set_mode == NULL) {
1164 		segops->rsm_memseg_import_set_mode =
1165 		    __rsm_memseg_import_set_mode;
1166 	}
1167 
1168 	if (segops->rsm_get_lib_attr == NULL) {
1169 		segops->rsm_get_lib_attr =
1170 		    __rsm_get_lib_attr;
1171 	}
1172 
1173 	if (segops->rsm_closedevice == NULL) {
1174 		segops->rsm_closedevice =
1175 		    __rsm_closedevice;
1176 	}
1177 
1178 
1179 	DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1180 	    "__rsmdefault_setops: exit\n"));
1181 
1182 }
1183