1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/exacct.h>
29 #include <sys/exacct_catalog.h>
30 #include <sys/exacct_impl.h>
31 
32 #ifndef	_KERNEL
33 #include <limits.h>
34 #include <errno.h>
35 #include <poll.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #else
39 #include <sys/systm.h>
40 #endif
41 
42 /*
43  * extended accounting file core routines
44  *
45  *   Routines shared by libexacct and the kernel for the definition,
46  *   construction and packing of extended accounting (exacct) records.
47  *
48  * Locking
49  *   All routines in this file use ea_alloc(), which is a malloc() wrapper
50  *   in userland and a kmem_alloc(..., KM_SLEEP) wrapper in the kernel.
51  *   Accordingly, all routines require a context suitable for KM_SLEEP
52  *   allocations.
53  */
54 
55 #define	DEFAULT_ENTRIES 4
56 
57 /*
58  * ea_alloc() and ea_free() provide a wrapper for the common
59  * exacct code offering access to either the kmem allocator, or to libc's
60  * malloc.
61  */
62 void *
ea_alloc(size_t size)63 ea_alloc(size_t size)
64 {
65 #ifndef _KERNEL
66 	void *p;
67 
68 	while ((p = malloc(size)) == NULL && errno == EAGAIN)
69 		(void) poll(NULL, 0, 10 * MILLISEC);
70 	if (p == NULL) {
71 		EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
72 	} else {
73 		EXACCT_SET_ERR(EXR_OK);
74 	}
75 	return (p);
76 #else
77 	return (kmem_alloc(size, KM_SLEEP));
78 #endif
79 }
80 
81 #ifndef _KERNEL
82 /*ARGSUSED*/
83 #endif
84 void
ea_free(void * ptr,size_t size)85 ea_free(void *ptr, size_t size)
86 {
87 #ifndef _KERNEL
88 	free(ptr);
89 #else
90 	kmem_free(ptr, size);
91 #endif
92 }
93 
94 /*
95  * ea_strdup() returns a pointer that, if non-NULL, must be freed using
96  * ea_strfree() once its useful life ends.
97  */
98 char *
ea_strdup(const char * ptr)99 ea_strdup(const char *ptr)
100 {
101 	/* Sets exacct_errno. */
102 	char *p = ea_alloc(strlen(ptr) + 1);
103 	if (p != NULL) {
104 		bcopy(ptr, p, strlen(ptr) + 1);
105 	}
106 	return (p);
107 }
108 
109 /*
110  * ea_strfree() frees a string allocated with ea_strdup().
111  */
112 void
ea_strfree(char * ptr)113 ea_strfree(char *ptr)
114 {
115 #ifndef _KERNEL
116 	free(ptr);
117 #else
118 	kmem_free(ptr, strlen(ptr) + 1);
119 #endif
120 }
121 
122 /*
123  * ea_cond_memcpy_at_offset() provides a simple conditional memcpy() that allows
124  * us to write a pack routine that returns a valid buffer size, copying only in
125  * the case that a non-NULL buffer is provided.
126  */
127 static void
ea_cond_memcpy_at_offset(void * dst,size_t offset,size_t dstsize,void * src,size_t size)128 ea_cond_memcpy_at_offset(void *dst, size_t offset, size_t dstsize, void *src,
129     size_t size)
130 {
131 	char *cdst = dst;
132 	char *csrc = src;
133 
134 	if (dst == NULL || src == NULL || size == 0 || offset + size > dstsize)
135 		return;
136 
137 	bcopy(csrc, cdst + offset, size);
138 }
139 
140 /*
141  * exacct_order{16,32,64}() are byte-swapping routines that place the native
142  * data indicated by the input pointer in big-endian order.  Each exacct_order
143  * function is its own inverse.
144  */
145 #ifndef _LITTLE_ENDIAN
146 /*ARGSUSED*/
147 #endif /* _LITTLE_ENDIAN */
148 void
exacct_order16(uint16_t * in)149 exacct_order16(uint16_t *in)
150 {
151 #ifdef _LITTLE_ENDIAN
152 	uint8_t s;
153 	union {
154 		uint16_t agg;
155 		uint8_t arr[2];
156 	} t;
157 
158 	t.agg = *in;
159 
160 	s = t.arr[0];
161 	t.arr[0] = t.arr[1];
162 	t.arr[1] = s;
163 
164 	*in = t.agg;
165 #endif /* _LITTLE_ENDIAN */
166 }
167 
168 #ifndef _LITTLE_ENDIAN
169 /*ARGSUSED*/
170 #endif /* _LITTLE_ENDIAN */
171 void
exacct_order32(uint32_t * in)172 exacct_order32(uint32_t *in)
173 {
174 #ifdef _LITTLE_ENDIAN
175 	uint16_t s;
176 	union {
177 		uint32_t agg;
178 		uint16_t arr[2];
179 	} t;
180 
181 	t.agg = *in;
182 	exacct_order16(&t.arr[0]);
183 	exacct_order16(&t.arr[1]);
184 
185 	s = t.arr[0];
186 	t.arr[0] = t.arr[1];
187 	t.arr[1] = s;
188 
189 	*in = t.agg;
190 #endif /* _LITTLE_ENDIAN */
191 }
192 
193 #ifndef _LITTLE_ENDIAN
194 /*ARGSUSED*/
195 #endif /* _LITTLE_ENDIAN */
196 void
exacct_order64(uint64_t * in)197 exacct_order64(uint64_t *in)
198 {
199 #ifdef _LITTLE_ENDIAN
200 	uint32_t s;
201 	union {
202 		uint64_t agg;
203 		uint32_t arr[2];
204 	} t;
205 
206 	t.agg = *in;
207 	exacct_order32(&t.arr[0]);
208 	exacct_order32(&t.arr[1]);
209 
210 	s = t.arr[0];
211 	t.arr[0] = t.arr[1];
212 	t.arr[1] = s;
213 
214 	*in = t.agg;
215 #endif /* _LITTLE_ENDIAN */
216 }
217 
218 int
ea_match_object_catalog(ea_object_t * obj,ea_catalog_t catmask)219 ea_match_object_catalog(ea_object_t *obj, ea_catalog_t catmask)
220 {
221 	ea_catalog_t catval = obj->eo_catalog;
222 
223 #define	EM_MATCH(v, m, M)	((m & M) == 0 || (v & M) == (m & M))
224 	return (EM_MATCH(catval, catmask, EXT_TYPE_MASK) &&
225 	    EM_MATCH(catval, catmask, EXC_CATALOG_MASK) &&
226 	    EM_MATCH(catval, catmask, EXD_DATA_MASK));
227 #undef EM_MATCH
228 }
229 
230 int
ea_set_item(ea_object_t * obj,ea_catalog_t tag,const void * value,size_t valsize)231 ea_set_item(ea_object_t *obj, ea_catalog_t tag,
232     const void *value, size_t valsize)
233 {
234 	ea_item_t *item = &obj->eo_item;
235 
236 	if ((tag & EXT_TYPE_MASK) == EXT_GROUP) {
237 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
238 		return (-1);
239 	}
240 
241 	bzero(obj, sizeof (ea_object_t));
242 	obj->eo_type = EO_ITEM;
243 	obj->eo_catalog = tag;
244 
245 	switch (obj->eo_catalog & EXT_TYPE_MASK) {
246 	case EXT_UINT8:
247 		item->ei_u.ei_u_uint8 = *(uint8_t *)value;
248 		item->ei_size = sizeof (uint8_t);
249 		break;
250 	case EXT_UINT16:
251 		item->ei_u.ei_u_uint16 = *(uint16_t *)value;
252 		item->ei_size = sizeof (uint16_t);
253 		break;
254 	case EXT_UINT32:
255 		item->ei_u.ei_u_uint32 = *(uint32_t *)value;
256 		item->ei_size = sizeof (uint32_t);
257 		break;
258 	case EXT_UINT64:
259 		item->ei_u.ei_u_uint64 = *(uint64_t *)value;
260 		item->ei_size = sizeof (uint64_t);
261 		break;
262 	case EXT_DOUBLE:
263 		item->ei_u.ei_u_double = *(double *)value;
264 		item->ei_size = sizeof (double);
265 		break;
266 	case EXT_STRING:
267 		if ((item->ei_string = ea_strdup((char *)value)) == NULL) {
268 			/* exacct_errno set above. */
269 			return (-1);
270 		}
271 		item->ei_size = strlen(item->ei_string) + 1;
272 		break;
273 	case EXT_EXACCT_OBJECT:
274 		if ((item->ei_object = ea_alloc(valsize)) == NULL) {
275 			/* exacct_errno set above. */
276 			return (-1);
277 		}
278 		bcopy(value, item->ei_object, valsize);
279 		item->ei_size = valsize;
280 		break;
281 	case EXT_RAW:
282 		if ((item->ei_raw = ea_alloc(valsize)) == NULL) {
283 			/* exacct_errno set above. */
284 			return (-1);
285 		}
286 		bcopy(value, item->ei_raw, valsize);
287 		item->ei_size = valsize;
288 		break;
289 	default:
290 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
291 		return (-1);
292 	}
293 
294 	EXACCT_SET_ERR(EXR_OK);
295 	return (0);
296 }
297 
298 int
ea_set_group(ea_object_t * obj,ea_catalog_t tag)299 ea_set_group(ea_object_t *obj, ea_catalog_t tag)
300 {
301 	if ((tag & EXT_TYPE_MASK) != EXT_GROUP) {
302 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
303 		return (-1);
304 	}
305 
306 	bzero(obj, sizeof (ea_object_t));
307 
308 	obj->eo_type = EO_GROUP;
309 	obj->eo_catalog = tag;
310 	obj->eo_u.eo_u_group.eg_nobjs = 0;
311 	obj->eo_u.eo_u_group.eg_objs = NULL;
312 
313 	EXACCT_SET_ERR(EXR_OK);
314 	return (0);
315 }
316 
317 void
ea_free_object(ea_object_t * obj,int flag)318 ea_free_object(ea_object_t *obj, int flag)
319 {
320 	ea_object_t *next = obj;
321 	ea_object_t *save;
322 
323 	while (next != NULL) {
324 		if (next->eo_type == EO_GROUP) {
325 			ea_free_object(next->eo_group.eg_objs, flag);
326 		} else if (next->eo_type == EO_ITEM) {
327 			switch (next->eo_catalog & EXT_TYPE_MASK) {
328 			case EXT_STRING:
329 				if (flag == EUP_ALLOC)
330 					ea_strfree(next->eo_item.ei_string);
331 				break;
332 			case EXT_RAW:
333 			case EXT_EXACCT_OBJECT:
334 				if (flag == EUP_ALLOC)
335 					ea_free(next->eo_item.ei_raw,
336 					    next->eo_item.ei_size);
337 				break;
338 			default:
339 				/* No action required for other types. */
340 				break;
341 			}
342 		}
343 		/* No action required for EO_NONE. */
344 
345 		save = next;
346 		next = next->eo_next;
347 #ifdef _KERNEL
348 		kmem_cache_free(exacct_object_cache, save);
349 #else
350 		ea_free(save, sizeof (ea_object_t));
351 #endif /* _KERNEL */
352 	}
353 }
354 
355 int
ea_free_item(ea_object_t * obj,int flag)356 ea_free_item(ea_object_t *obj, int flag)
357 {
358 	if (obj->eo_type != EO_ITEM) {
359 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
360 		return (-1);
361 	}
362 
363 	switch (obj->eo_catalog & EXT_TYPE_MASK) {
364 	case EXT_STRING:
365 		if (flag == EUP_ALLOC)
366 			ea_strfree(obj->eo_item.ei_string);
367 		break;
368 	case EXT_RAW:
369 	case EXT_EXACCT_OBJECT:
370 		if (flag == EUP_ALLOC)
371 			ea_free(obj->eo_item.ei_raw, obj->eo_item.ei_size);
372 		break;
373 	default:
374 		/* No action required for other types. */
375 		break;
376 	}
377 
378 	obj->eo_catalog = 0;
379 	obj->eo_type = EO_NONE;
380 	EXACCT_SET_ERR(EXR_OK);
381 	return (0);
382 }
383 
384 static void
ea_attach_object(ea_object_t ** objp,ea_object_t * obj)385 ea_attach_object(ea_object_t **objp, ea_object_t *obj)
386 {
387 	ea_object_t *tp;
388 
389 	tp = *objp;
390 	*objp = obj;
391 	obj->eo_next = tp;
392 }
393 
394 int
ea_attach_to_object(ea_object_t * root,ea_object_t * obj)395 ea_attach_to_object(ea_object_t *root, ea_object_t *obj)
396 {
397 	if (obj->eo_type == EO_GROUP || obj->eo_type == EO_ITEM) {
398 		ea_attach_object(&root->eo_next, obj);
399 		EXACCT_SET_ERR(EXR_OK);
400 		return (0);
401 	} else {
402 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
403 		return (-1);
404 	}
405 }
406 
407 /*
408  * ea_attach_to_group() takes a group object and an additional exacct object and
409  * attaches the latter to the object list of the former.  The attached exacct
410  * object can be the head of a chain of objects.  If group isn't actually an
411  * object of type EO_GROUP, do nothing, such that we don't destroy its contents.
412  */
413 int
ea_attach_to_group(ea_object_t * group,ea_object_t * obj)414 ea_attach_to_group(ea_object_t *group, ea_object_t *obj)
415 {
416 	uint_t n = 0;
417 	ea_object_t *next;
418 	ea_object_t **nextp;
419 
420 	if (group->eo_type != EO_GROUP) {
421 		EXACCT_SET_ERR(EXR_INVALID_OBJ);
422 		return (-1);
423 	}
424 
425 	for (next = obj; next != NULL; next = next->eo_next)
426 		n++;
427 
428 	group->eo_group.eg_nobjs += n;
429 
430 	for (nextp = &group->eo_group.eg_objs; *nextp != NULL;
431 	    nextp = &(*nextp)->eo_next)
432 		continue;
433 
434 	ea_attach_object(nextp, obj);
435 	EXACCT_SET_ERR(EXR_OK);
436 	return (0);
437 }
438 
439 /*
440  * ea_pack_object takes the given exacct object series beginning with obj and
441  * places it in buf.  Since ea_pack_object needs to be runnable in kernel
442  * context, we construct it to use its own stack of state.  Specifically, we
443  * store the locations of the sizes of open records (records whose construction
444  * is in progress).  curr_frame is used to indicate the current frame.  Just
445  * prior to decrementing curr_frame, we must ensure that the correct size for
446  * that frame is placed in the given offset.
447  */
448 struct es_frame {
449 	ea_object_t	*esf_obj;
450 	ea_size_t	esf_size;
451 	ea_size_t	esf_bksize;
452 	ea_size_t	esf_offset;
453 };
454 
455 static void
incr_parent_frames(struct es_frame * base,int n,size_t amt)456 incr_parent_frames(struct es_frame *base, int n, size_t amt)
457 {
458 	int i;
459 
460 	for (i = 0; i <= n; i++) {
461 		base[i].esf_size += amt;
462 		base[i].esf_bksize += amt;
463 	}
464 }
465 
466 size_t
ea_pack_object(ea_object_t * obj,void * buf,size_t bufsize)467 ea_pack_object(ea_object_t *obj, void *buf, size_t bufsize)
468 {
469 	struct es_frame *estack;
470 	uint_t neframes;
471 	ea_object_t *curr_obj = obj;
472 	int curr_frame = 0;
473 	size_t curr_pos = 0;
474 	ea_size_t placeholder = 0;
475 	int end_of_group = 0;
476 	uint32_t gp_backskip = sizeof (ea_catalog_t) + sizeof (ea_size_t) +
477 	    sizeof (uint32_t) + sizeof (uint32_t);
478 	uint32_t lge_backskip;
479 
480 	exacct_order32(&gp_backskip);
481 	estack = ea_alloc(sizeof (struct es_frame) * DEFAULT_ENTRIES);
482 	if (estack == NULL) {
483 		/* exacct_errno set above. */
484 		return ((size_t)-1);
485 	}
486 	bzero(estack, sizeof (struct es_frame) * DEFAULT_ENTRIES);
487 	neframes = DEFAULT_ENTRIES;
488 
489 	/*
490 	 * 1.  Start with the current object.
491 	 */
492 	for (;;) {
493 		void *src;
494 		size_t size;
495 
496 		/*
497 		 * 1a.  If at the bottom of the stack, we are done.
498 		 * If at the end of a group, place the correct size at the head
499 		 * of the chain, the correct backskip amount in the next
500 		 * position in the buffer, and retreat to the previous frame.
501 		 */
502 		if (end_of_group) {
503 			if (--curr_frame < 0) {
504 				break;
505 			}
506 
507 			exacct_order64(&estack[curr_frame].esf_size);
508 			ea_cond_memcpy_at_offset(buf,
509 			    estack[curr_frame].esf_offset, bufsize,
510 			    &estack[curr_frame].esf_size, sizeof (ea_size_t));
511 			exacct_order64(&estack[curr_frame].esf_size);
512 
513 			/*
514 			 * Note that the large backskip is only 32 bits, whereas
515 			 * an object can be up to 2^64 bytes long.  If an object
516 			 * is greater than 2^32 bytes long set the large
517 			 * backskip to 0.  This will  prevent the file being
518 			 * read backwards by causing EOF to be returned when the
519 			 * big object is encountered, but reading forwards will
520 			 * still be OK as it ignores the large backskip field.
521 			 */
522 			estack[curr_frame].esf_bksize += sizeof (uint32_t);
523 
524 			lge_backskip =
525 			    estack[curr_frame].esf_bksize > UINT_MAX
526 			    ? 0 : (uint32_t)estack[curr_frame].esf_bksize;
527 			exacct_order32(&lge_backskip);
528 			ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
529 			    &lge_backskip, sizeof (lge_backskip));
530 
531 			curr_pos += sizeof (uint32_t);
532 			incr_parent_frames(estack, curr_frame,
533 			    sizeof (uint32_t));
534 
535 			if ((curr_obj = estack[curr_frame].esf_obj) != NULL) {
536 				end_of_group = 0;
537 				estack[curr_frame].esf_obj = NULL;
538 				estack[curr_frame].esf_size = 0;
539 				estack[curr_frame].esf_bksize = 0;
540 			} else {
541 				continue;
542 			}
543 		}
544 
545 		/*
546 		 * 2.  Write the catalog tag.
547 		 */
548 		exacct_order32(&curr_obj->eo_catalog);
549 		ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
550 		    &curr_obj->eo_catalog, sizeof (ea_catalog_t));
551 		exacct_order32(&curr_obj->eo_catalog);
552 
553 		incr_parent_frames(estack, curr_frame, sizeof (ea_catalog_t));
554 		estack[curr_frame].esf_size -= sizeof (ea_catalog_t);
555 		curr_pos += sizeof (ea_catalog_t);
556 		estack[curr_frame].esf_offset = curr_pos;
557 
558 		/*
559 		 * 2a. If this type is of variable size, reserve space for the
560 		 * size field.
561 		 */
562 		switch (curr_obj->eo_catalog & EXT_TYPE_MASK) {
563 		case EXT_GROUP:
564 		case EXT_STRING:
565 		case EXT_EXACCT_OBJECT:
566 		case EXT_RAW:
567 			exacct_order64(&placeholder);
568 			ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
569 			    &placeholder, sizeof (ea_size_t));
570 			exacct_order64(&placeholder);
571 
572 			incr_parent_frames(estack, curr_frame,
573 			    sizeof (ea_size_t));
574 			estack[curr_frame].esf_size -= sizeof (ea_size_t);
575 			curr_pos += sizeof (ea_size_t);
576 			break;
577 		default:
578 			break;
579 		}
580 
581 		if (curr_obj->eo_type == EO_GROUP) {
582 			/*
583 			 * 3A.  If it's a group put its next pointer, size, and
584 			 * size position on the stack, add 1 to the stack,
585 			 * set the current object to eg_objs, and goto 1.
586 			 */
587 			estack[curr_frame].esf_obj = curr_obj->eo_next;
588 
589 			/*
590 			 * 3Aa. Insert the number of objects in the group.
591 			 */
592 			exacct_order32(&curr_obj->eo_group.eg_nobjs);
593 			ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
594 			    &curr_obj->eo_group.eg_nobjs,
595 			    sizeof (uint32_t));
596 			exacct_order32(&curr_obj->eo_group.eg_nobjs);
597 
598 			incr_parent_frames(estack, curr_frame,
599 			    sizeof (uint32_t));
600 			curr_pos += sizeof (uint32_t);
601 
602 			/*
603 			 * 3Ab. Insert a backskip of the appropriate size.
604 			 */
605 			ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
606 			    &gp_backskip, sizeof (uint32_t));
607 
608 			incr_parent_frames(estack, curr_frame,
609 			    sizeof (uint32_t));
610 			curr_pos += sizeof (uint32_t);
611 
612 			curr_frame++;
613 
614 			if (curr_frame >= neframes) {
615 				/*
616 				 * Expand the eframe stack to handle the
617 				 * requested depth.
618 				 */
619 				uint_t new_neframes = 2 * neframes;
620 				struct es_frame *new_estack =
621 				    ea_alloc(new_neframes *
622 				    sizeof (struct es_frame));
623 				if (new_estack == NULL) {
624 					ea_free(estack, neframes *
625 					    sizeof (struct es_frame));
626 					/* exacct_errno set above. */
627 					return ((size_t)-1);
628 				}
629 
630 				bzero(new_estack, new_neframes *
631 				    sizeof (struct es_frame));
632 				bcopy(estack, new_estack, neframes *
633 				    sizeof (struct es_frame));
634 
635 				ea_free(estack, neframes *
636 				    sizeof (struct es_frame));
637 				estack = new_estack;
638 				neframes = new_neframes;
639 			} else {
640 				bzero(&estack[curr_frame],
641 				    sizeof (struct es_frame));
642 			}
643 
644 			estack[curr_frame].esf_offset = curr_pos;
645 			if ((curr_obj = curr_obj->eo_group.eg_objs) == NULL) {
646 				end_of_group = 1;
647 			}
648 
649 			continue;
650 		}
651 
652 		/*
653 		 * 3B. Otherwise we're considering an item: add its ei_size to
654 		 * all sizes on the stack, and copy its size into position.
655 		 */
656 		switch (curr_obj->eo_catalog & EXT_TYPE_MASK) {
657 		case EXT_UINT8:
658 			src = &curr_obj->eo_item.ei_uint8;
659 			size = sizeof (uint8_t);
660 			break;
661 		case EXT_UINT16:
662 			src = &curr_obj->eo_item.ei_uint16;
663 			size = sizeof (uint16_t);
664 			exacct_order16(src);
665 			break;
666 		case EXT_UINT32:
667 			src = &curr_obj->eo_item.ei_uint32;
668 			size = sizeof (uint32_t);
669 			exacct_order32(src);
670 			break;
671 		case EXT_UINT64:
672 			src = &curr_obj->eo_item.ei_uint64;
673 			size = sizeof (uint64_t);
674 			exacct_order64(src);
675 			break;
676 		case EXT_DOUBLE:
677 			src = &curr_obj->eo_item.ei_double;
678 			size = sizeof (double);
679 			exacct_order64((uint64_t *)src);
680 			break;
681 		case EXT_STRING:
682 			src = curr_obj->eo_item.ei_string;
683 			size = curr_obj->eo_item.ei_size;
684 			break;
685 		case EXT_EXACCT_OBJECT:
686 			src = curr_obj->eo_item.ei_object;
687 			size = curr_obj->eo_item.ei_size;
688 			break;
689 		case EXT_RAW:
690 			src = curr_obj->eo_item.ei_raw;
691 			size = curr_obj->eo_item.ei_size;
692 			break;
693 		case EXT_NONE:
694 		default:
695 			src = NULL;
696 			size = 0;
697 			break;
698 		}
699 
700 		ea_cond_memcpy_at_offset(buf, curr_pos, bufsize, src, size);
701 		incr_parent_frames(estack, curr_frame, size);
702 		curr_pos += size;
703 
704 		/*
705 		 * 4. Write the large backskip amount into the buffer.
706 		 * See above for note about why this may be set to 0.
707 		 */
708 		incr_parent_frames(estack, curr_frame, sizeof (uint32_t));
709 
710 		lge_backskip = estack[curr_frame].esf_bksize > UINT_MAX
711 		    ? 0 : (uint32_t)estack[curr_frame].esf_bksize;
712 		exacct_order32(&lge_backskip);
713 		ea_cond_memcpy_at_offset(buf, curr_pos, bufsize,
714 		    &lge_backskip, sizeof (lge_backskip));
715 
716 		curr_pos += sizeof (uint32_t);
717 
718 		switch (curr_obj->eo_catalog & EXT_TYPE_MASK) {
719 		case EXT_RAW:
720 		case EXT_STRING:
721 		case EXT_EXACCT_OBJECT:
722 			exacct_order64(&estack[curr_frame].esf_size);
723 			ea_cond_memcpy_at_offset(buf,
724 			    estack[curr_frame].esf_offset, bufsize,
725 			    &estack[curr_frame].esf_size, sizeof (ea_size_t));
726 			exacct_order64(&estack[curr_frame].esf_size);
727 			break;
728 		case EXT_UINT16:
729 			exacct_order16(src);
730 			break;
731 		case EXT_UINT32:
732 			exacct_order32(src);
733 			break;
734 		case EXT_UINT64:
735 			exacct_order64(src);
736 			break;
737 		case EXT_DOUBLE:
738 			exacct_order64((uint64_t *)src);
739 			break;
740 		default:
741 			break;
742 		}
743 
744 		/*
745 		 * 5.  If ei_next is NULL, we are at the end of a group.a  If
746 		 * not, move on to the next item on the list.
747 		 */
748 		if (curr_obj->eo_next == NULL) {
749 			end_of_group = 1;
750 		} else {
751 			curr_obj = curr_obj->eo_next;
752 			estack[curr_frame].esf_obj = NULL;
753 			estack[curr_frame].esf_size = 0;
754 			estack[curr_frame].esf_bksize = 0;
755 		}
756 	}
757 
758 	ea_free(estack, neframes * sizeof (struct es_frame));
759 	EXACCT_SET_ERR(EXR_OK);
760 	return (curr_pos);
761 }
762