1 /*
2  *  GRUB  --  GRand Unified Bootloader
3  *  Copyright (C) 1999,2000,2001,2002,2003,2004  Free Software Foundation, Inc.
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19 /*
20  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
21  * Use is subject to license terms.
22  */
23 
24 /*
25  * The zfs plug-in routines for GRUB are:
26  *
27  * zfs_mount() - locates a valid uberblock of the root pool and reads
28  *		in its MOS at the memory address MOS.
29  *
30  * zfs_open() - locates a plain file object by following the MOS
31  *		and places its dnode at the memory address DNODE.
32  *
33  * zfs_read() - read in the data blocks pointed by the DNODE.
34  *
35  * ZFS_SCRATCH is used as a working area.
36  *
37  * (memory addr)   MOS      DNODE	ZFS_SCRATCH
38  *		    |         |          |
39  *	    +-------V---------V----------V---------------+
40  *   memory |       | dnode   | dnode    |  scratch      |
41  *	    |       | 512B    | 512B     |  area         |
42  *	    +--------------------------------------------+
43  */
44 
45 #ifdef	FSYS_ZFS
46 
47 #include "shared.h"
48 #include "filesys.h"
49 #include "fsys_zfs.h"
50 
51 /* cache for a file block of the currently zfs_open()-ed file */
52 static void *file_buf = NULL;
53 static uint64_t file_start = 0;
54 static uint64_t file_end = 0;
55 
56 /* cache for a dnode block */
57 static dnode_phys_t *dnode_buf = NULL;
58 static dnode_phys_t *dnode_mdn = NULL;
59 static uint64_t dnode_start = 0;
60 static uint64_t dnode_end = 0;
61 
62 static uint64_t pool_guid = 0;
63 static uberblock_t current_uberblock;
64 static char *stackbase;
65 
66 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
67 {
68 	{"inherit", 0},			/* ZIO_COMPRESS_INHERIT */
69 	{"on", lzjb_decompress}, 	/* ZIO_COMPRESS_ON */
70 	{"off", 0},			/* ZIO_COMPRESS_OFF */
71 	{"lzjb", lzjb_decompress},	/* ZIO_COMPRESS_LZJB */
72 	{"empty", 0}			/* ZIO_COMPRESS_EMPTY */
73 };
74 
75 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
76 
77 /*
78  * Our own version of bcmp().
79  */
80 static int
81 zfs_bcmp(const void *s1, const void *s2, size_t n)
82 {
83 	const uchar_t *ps1 = s1;
84 	const uchar_t *ps2 = s2;
85 
86 	if (s1 != s2 && n != 0) {
87 		do {
88 			if (*ps1++ != *ps2++)
89 				return (1);
90 		} while (--n != 0);
91 	}
92 
93 	return (0);
94 }
95 
96 /*
97  * Our own version of log2().  Same thing as highbit()-1.
98  */
99 static int
100 zfs_log2(uint64_t num)
101 {
102 	int i = 0;
103 
104 	while (num > 1) {
105 		i++;
106 		num = num >> 1;
107 	}
108 
109 	return (i);
110 }
111 
112 /* Checksum Functions */
113 static void
114 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
115 {
116 	ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
117 }
118 
119 /* Checksum Table and Values */
120 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
121 	NULL,			NULL,			0, 0,	"inherit",
122 	NULL,			NULL,			0, 0,	"on",
123 	zio_checksum_off,	zio_checksum_off,	0, 0,	"off",
124 	zio_checksum_SHA256,	zio_checksum_SHA256,	1, 1,	"label",
125 	zio_checksum_SHA256,	zio_checksum_SHA256,	1, 1,	"gang_header",
126 	NULL,			NULL,			0, 0,	"zilog",
127 	fletcher_2_native,	fletcher_2_byteswap,	0, 0,	"fletcher2",
128 	fletcher_4_native,	fletcher_4_byteswap,	1, 0,	"fletcher4",
129 	zio_checksum_SHA256,	zio_checksum_SHA256,	1, 0,	"SHA256",
130 	NULL,			NULL,			0, 0,	"zilog2",
131 };
132 
133 /*
134  * zio_checksum_verify: Provides support for checksum verification.
135  *
136  * Fletcher2, Fletcher4, and SHA256 are supported.
137  *
138  * Return:
139  * 	-1 = Failure
140  *	 0 = Success
141  */
142 static int
143 zio_checksum_verify(blkptr_t *bp, char *data, int size)
144 {
145 	zio_cksum_t zc = bp->blk_cksum;
146 	uint32_t checksum = BP_GET_CHECKSUM(bp);
147 	int byteswap = BP_SHOULD_BYTESWAP(bp);
148 	zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
149 	zio_checksum_info_t *ci = &zio_checksum_table[checksum];
150 	zio_cksum_t actual_cksum, expected_cksum;
151 
152 	/* byteswap is not supported */
153 	if (byteswap)
154 		return (-1);
155 
156 	if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
157 		return (-1);
158 
159 	if (ci->ci_eck) {
160 		expected_cksum = zec->zec_cksum;
161 		zec->zec_cksum = zc;
162 		ci->ci_func[0](data, size, &actual_cksum);
163 		zec->zec_cksum = expected_cksum;
164 		zc = expected_cksum;
165 
166 	} else {
167 		ci->ci_func[byteswap](data, size, &actual_cksum);
168 	}
169 
170 	if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
171 	    (actual_cksum.zc_word[1] - zc.zc_word[1]) |
172 	    (actual_cksum.zc_word[2] - zc.zc_word[2]) |
173 	    (actual_cksum.zc_word[3] - zc.zc_word[3]))
174 		return (-1);
175 
176 	return (0);
177 }
178 
179 /*
180  * vdev_label_start returns the physical disk offset (in bytes) of
181  * label "l".
182  */
183 static uint64_t
184 vdev_label_start(uint64_t psize, int l)
185 {
186 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
187 	    0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
188 }
189 
190 /*
191  * vdev_uberblock_compare takes two uberblock structures and returns an integer
192  * indicating the more recent of the two.
193  * 	Return Value = 1 if ub2 is more recent
194  * 	Return Value = -1 if ub1 is more recent
195  * The most recent uberblock is determined using its transaction number and
196  * timestamp.  The uberblock with the highest transaction number is
197  * considered "newer".  If the transaction numbers of the two blocks match, the
198  * timestamps are compared to determine the "newer" of the two.
199  */
200 static int
201 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
202 {
203 	if (ub1->ub_txg < ub2->ub_txg)
204 		return (-1);
205 	if (ub1->ub_txg > ub2->ub_txg)
206 		return (1);
207 
208 	if (ub1->ub_timestamp < ub2->ub_timestamp)
209 		return (-1);
210 	if (ub1->ub_timestamp > ub2->ub_timestamp)
211 		return (1);
212 
213 	return (0);
214 }
215 
216 /*
217  * Three pieces of information are needed to verify an uberblock: the magic
218  * number, the version number, and the checksum.
219  *
220  * Currently Implemented: version number, magic number
221  * Need to Implement: checksum
222  *
223  * Return:
224  *     0 - Success
225  *    -1 - Failure
226  */
227 static int
228 uberblock_verify(uberblock_phys_t *ub, uint64_t offset)
229 {
230 
231 	uberblock_t *uber = &ub->ubp_uberblock;
232 	blkptr_t bp;
233 
234 	BP_ZERO(&bp);
235 	BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
236 	BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
237 	ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
238 
239 	if (zio_checksum_verify(&bp, (char *)ub, UBERBLOCK_SIZE) != 0)
240 		return (-1);
241 
242 	if (uber->ub_magic == UBERBLOCK_MAGIC &&
243 	    uber->ub_version > 0 && uber->ub_version <= SPA_VERSION)
244 		return (0);
245 
246 	return (-1);
247 }
248 
249 /*
250  * Find the best uberblock.
251  * Return:
252  *    Success - Pointer to the best uberblock.
253  *    Failure - NULL
254  */
255 static uberblock_phys_t *
256 find_bestub(uberblock_phys_t *ub_array, uint64_t sector)
257 {
258 	uberblock_phys_t *ubbest = NULL;
259 	uint64_t offset;
260 	int i;
261 
262 	for (i = 0; i < (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT); i++) {
263 		offset = (sector << SPA_MINBLOCKSHIFT) +
264 		    VDEV_UBERBLOCK_OFFSET(i);
265 		if (uberblock_verify(&ub_array[i], offset) == 0) {
266 			if (ubbest == NULL) {
267 				ubbest = &ub_array[i];
268 			} else if (vdev_uberblock_compare(
269 			    &(ub_array[i].ubp_uberblock),
270 			    &(ubbest->ubp_uberblock)) > 0) {
271 				ubbest = &ub_array[i];
272 			}
273 		}
274 	}
275 
276 	return (ubbest);
277 }
278 
279 /*
280  * Read a block of data based on the gang block address dva,
281  * and put its data in buf.
282  *
283  * Return:
284  *	0 - success
285  *	1 - failure
286  */
287 static int
288 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
289 {
290 	zio_gbh_phys_t *zio_gb;
291 	uint64_t offset, sector;
292 	blkptr_t tmpbp;
293 	int i;
294 
295 	zio_gb = (zio_gbh_phys_t *)stack;
296 	stack += SPA_GANGBLOCKSIZE;
297 	offset = DVA_GET_OFFSET(dva);
298 	sector =  DVA_OFFSET_TO_PHYS_SECTOR(offset);
299 
300 	/* read in the gang block header */
301 	if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
302 		grub_printf("failed to read in a gang block header\n");
303 		return (1);
304 	}
305 
306 	/* self checksuming the gang block header */
307 	BP_ZERO(&tmpbp);
308 	BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
309 	BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
310 	ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
311 	    DVA_GET_OFFSET(dva), bp->blk_birth, 0);
312 	if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
313 		grub_printf("failed to checksum a gang block header\n");
314 		return (1);
315 	}
316 
317 	for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
318 		if (zio_gb->zg_blkptr[i].blk_birth == 0)
319 			continue;
320 
321 		if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
322 			return (1);
323 		buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
324 	}
325 
326 	return (0);
327 }
328 
329 /*
330  * Read in a block of raw data to buf.
331  *
332  * Return:
333  *	0 - success
334  *	1 - failure
335  */
336 static int
337 zio_read_data(blkptr_t *bp, void *buf, char *stack)
338 {
339 	int i, psize;
340 
341 	psize = BP_GET_PSIZE(bp);
342 
343 	/* pick a good dva from the block pointer */
344 	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
345 		uint64_t offset, sector;
346 
347 		if (bp->blk_dva[i].dva_word[0] == 0 &&
348 		    bp->blk_dva[i].dva_word[1] == 0)
349 			continue;
350 
351 		if (DVA_GET_GANG(&bp->blk_dva[i])) {
352 			if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
353 				return (0);
354 		} else {
355 			/* read in a data block */
356 			offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
357 			sector =  DVA_OFFSET_TO_PHYS_SECTOR(offset);
358 			if (devread(sector, 0, psize, buf))
359 				return (0);
360 		}
361 	}
362 
363 	return (1);
364 }
365 
366 /*
367  * Read in a block of data, verify its checksum, decompress if needed,
368  * and put the uncompressed data in buf.
369  *
370  * Return:
371  *	0 - success
372  *	errnum - failure
373  */
374 static int
375 zio_read(blkptr_t *bp, void *buf, char *stack)
376 {
377 	int lsize, psize, comp;
378 	char *retbuf;
379 
380 	comp = BP_GET_COMPRESS(bp);
381 	lsize = BP_GET_LSIZE(bp);
382 	psize = BP_GET_PSIZE(bp);
383 
384 	if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
385 	    (comp != ZIO_COMPRESS_OFF &&
386 	    decomp_table[comp].decomp_func == NULL)) {
387 		grub_printf("compression algorithm not supported\n");
388 		return (ERR_FSYS_CORRUPT);
389 	}
390 
391 	if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
392 		grub_printf("not enough memory allocated\n");
393 		return (ERR_WONT_FIT);
394 	}
395 
396 	retbuf = buf;
397 	if (comp != ZIO_COMPRESS_OFF) {
398 		buf = stack;
399 		stack += psize;
400 	}
401 
402 	if (zio_read_data(bp, buf, stack)) {
403 		grub_printf("zio_read_data failed\n");
404 		return (ERR_FSYS_CORRUPT);
405 	}
406 
407 	if (zio_checksum_verify(bp, buf, psize) != 0) {
408 		grub_printf("checksum verification failed\n");
409 		return (ERR_FSYS_CORRUPT);
410 	}
411 
412 	if (comp != ZIO_COMPRESS_OFF)
413 		decomp_table[comp].decomp_func(buf, retbuf, psize, lsize);
414 
415 	return (0);
416 }
417 
418 /*
419  * Get the block from a block id.
420  * push the block onto the stack.
421  *
422  * Return:
423  * 	0 - success
424  * 	errnum - failure
425  */
426 static int
427 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
428 {
429 	int idx, level;
430 	blkptr_t *bp_array = dn->dn_blkptr;
431 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
432 	blkptr_t *bp, *tmpbuf;
433 
434 	bp = (blkptr_t *)stack;
435 	stack += sizeof (blkptr_t);
436 
437 	tmpbuf = (blkptr_t *)stack;
438 	stack += 1<<dn->dn_indblkshift;
439 
440 	for (level = dn->dn_nlevels - 1; level >= 0; level--) {
441 		idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
442 		*bp = bp_array[idx];
443 		if (level == 0)
444 			tmpbuf = buf;
445 		if (BP_IS_HOLE(bp)) {
446 			grub_memset(buf, 0,
447 			    dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
448 			break;
449 		} else if (errnum = zio_read(bp, tmpbuf, stack)) {
450 			return (errnum);
451 		}
452 
453 		bp_array = tmpbuf;
454 	}
455 
456 	return (0);
457 }
458 
459 /*
460  * mzap_lookup: Looks up property described by "name" and returns the value
461  * in "value".
462  *
463  * Return:
464  *	0 - success
465  *	errnum - failure
466  */
467 static int
468 mzap_lookup(mzap_phys_t *zapobj, int objsize, char *name,
469 	uint64_t *value)
470 {
471 	int i, chunks;
472 	mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
473 
474 	chunks = objsize/MZAP_ENT_LEN - 1;
475 	for (i = 0; i < chunks; i++) {
476 		if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
477 			*value = mzap_ent[i].mze_value;
478 			return (0);
479 		}
480 	}
481 
482 	return (ERR_FSYS_CORRUPT);
483 }
484 
485 static uint64_t
486 zap_hash(uint64_t salt, const char *name)
487 {
488 	static uint64_t table[256];
489 	const uint8_t *cp;
490 	uint8_t c;
491 	uint64_t crc = salt;
492 
493 	if (table[128] == 0) {
494 		uint64_t *ct;
495 		int i, j;
496 		for (i = 0; i < 256; i++) {
497 			for (ct = table + i, *ct = i, j = 8; j > 0; j--)
498 				*ct = (*ct >> 1) ^ (-(*ct & 1) &
499 				    ZFS_CRC64_POLY);
500 		}
501 	}
502 
503 	if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
504 		errnum = ERR_FSYS_CORRUPT;
505 		return (0);
506 	}
507 
508 	for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
509 		crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
510 
511 	/*
512 	 * Only use 28 bits, since we need 4 bits in the cookie for the
513 	 * collision differentiator.  We MUST use the high bits, since
514 	 * those are the onces that we first pay attention to when
515 	 * chosing the bucket.
516 	 */
517 	crc &= ~((1ULL << (64 - 28)) - 1);
518 
519 	return (crc);
520 }
521 
522 /*
523  * Only to be used on 8-bit arrays.
524  * array_len is actual len in bytes (not encoded le_value_length).
525  * buf is null-terminated.
526  */
527 static int
528 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
529     int array_len, const char *buf)
530 {
531 	int bseen = 0;
532 
533 	while (bseen < array_len) {
534 		struct zap_leaf_array *la =
535 		    &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
536 		int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
537 
538 		if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
539 			return (0);
540 
541 		if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
542 			break;
543 		chunk = la->la_next;
544 		bseen += toread;
545 	}
546 	return (bseen == array_len);
547 }
548 
549 /*
550  * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
551  * value for the property "name".
552  *
553  * Return:
554  *	0 - success
555  *	errnum - failure
556  */
557 static int
558 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
559     const char *name, uint64_t *value)
560 {
561 	uint16_t chunk;
562 	struct zap_leaf_entry *le;
563 
564 	/* Verify if this is a valid leaf block */
565 	if (l->l_hdr.lh_block_type != ZBT_LEAF)
566 		return (ERR_FSYS_CORRUPT);
567 	if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
568 		return (ERR_FSYS_CORRUPT);
569 
570 	for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
571 	    chunk != CHAIN_END; chunk = le->le_next) {
572 
573 		if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
574 			return (ERR_FSYS_CORRUPT);
575 
576 		le = ZAP_LEAF_ENTRY(l, blksft, chunk);
577 
578 		/* Verify the chunk entry */
579 		if (le->le_type != ZAP_CHUNK_ENTRY)
580 			return (ERR_FSYS_CORRUPT);
581 
582 		if (le->le_hash != h)
583 			continue;
584 
585 		if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
586 		    le->le_name_length, name)) {
587 
588 			struct zap_leaf_array *la;
589 			uint8_t *ip;
590 
591 			if (le->le_int_size != 8 || le->le_value_length != 1)
592 				return (ERR_FSYS_CORRUPT);
593 
594 			/* get the uint64_t property value */
595 			la = &ZAP_LEAF_CHUNK(l, blksft,
596 			    le->le_value_chunk).l_array;
597 			ip = la->la_array;
598 
599 			*value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
600 			    (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
601 			    (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
602 			    (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
603 
604 			return (0);
605 		}
606 	}
607 
608 	return (ERR_FSYS_CORRUPT);
609 }
610 
611 /*
612  * Fat ZAP lookup
613  *
614  * Return:
615  *	0 - success
616  *	errnum - failure
617  */
618 static int
619 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
620     char *name, uint64_t *value, char *stack)
621 {
622 	zap_leaf_phys_t *l;
623 	uint64_t hash, idx, blkid;
624 	int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
625 
626 	/* Verify if this is a fat zap header block */
627 	if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
628 	    zap->zap_flags != 0)
629 		return (ERR_FSYS_CORRUPT);
630 
631 	hash = zap_hash(zap->zap_salt, name);
632 	if (errnum)
633 		return (errnum);
634 
635 	/* get block id from index */
636 	if (zap->zap_ptrtbl.zt_numblks != 0) {
637 		/* external pointer tables not supported */
638 		return (ERR_FSYS_CORRUPT);
639 	}
640 	idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
641 	blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
642 
643 	/* Get the leaf block */
644 	l = (zap_leaf_phys_t *)stack;
645 	stack += 1<<blksft;
646 	if ((1<<blksft) < sizeof (zap_leaf_phys_t))
647 		return (ERR_FSYS_CORRUPT);
648 	if (errnum = dmu_read(zap_dnode, blkid, l, stack))
649 		return (errnum);
650 
651 	return (zap_leaf_lookup(l, blksft, hash, name, value));
652 }
653 
654 /*
655  * Read in the data of a zap object and find the value for a matching
656  * property name.
657  *
658  * Return:
659  *	0 - success
660  *	errnum - failure
661  */
662 static int
663 zap_lookup(dnode_phys_t *zap_dnode, char *name, uint64_t *val, char *stack)
664 {
665 	uint64_t block_type;
666 	int size;
667 	void *zapbuf;
668 
669 	/* Read in the first block of the zap object data. */
670 	zapbuf = stack;
671 	size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
672 	stack += size;
673 	if (errnum = dmu_read(zap_dnode, 0, zapbuf, stack))
674 		return (errnum);
675 
676 	block_type = *((uint64_t *)zapbuf);
677 
678 	if (block_type == ZBT_MICRO) {
679 		return (mzap_lookup(zapbuf, size, name, val));
680 	} else if (block_type == ZBT_HEADER) {
681 		/* this is a fat zap */
682 		return (fzap_lookup(zap_dnode, zapbuf, name,
683 		    val, stack));
684 	}
685 
686 	return (ERR_FSYS_CORRUPT);
687 }
688 
689 /*
690  * Get the dnode of an object number from the metadnode of an object set.
691  *
692  * Input
693  *	mdn - metadnode to get the object dnode
694  *	objnum - object number for the object dnode
695  *	buf - data buffer that holds the returning dnode
696  *	stack - scratch area
697  *
698  * Return:
699  *	0 - success
700  *	errnum - failure
701  */
702 static int
703 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
704 	char *stack)
705 {
706 	uint64_t blkid, blksz; /* the block id this object dnode is in */
707 	int epbs; /* shift of number of dnodes in a block */
708 	int idx; /* index within a block */
709 	dnode_phys_t *dnbuf;
710 
711 	blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
712 	epbs = zfs_log2(blksz) - DNODE_SHIFT;
713 	blkid = objnum >> epbs;
714 	idx = objnum & ((1<<epbs)-1);
715 
716 	if (dnode_buf != NULL && dnode_mdn == mdn &&
717 	    objnum >= dnode_start && objnum < dnode_end) {
718 		grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
719 		VERIFY_DN_TYPE(buf, type);
720 		return (0);
721 	}
722 
723 	if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
724 		dnbuf = dnode_buf;
725 		dnode_mdn = mdn;
726 		dnode_start = blkid << epbs;
727 		dnode_end = (blkid + 1) << epbs;
728 	} else {
729 		dnbuf = (dnode_phys_t *)stack;
730 		stack += blksz;
731 	}
732 
733 	if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
734 		return (errnum);
735 
736 	grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
737 	VERIFY_DN_TYPE(buf, type);
738 
739 	return (0);
740 }
741 
742 /*
743  * Check if this is a special file that resides at the top
744  * dataset of the pool. Currently this is the GRUB menu,
745  * boot signature and boot signature backup.
746  * str starts with '/'.
747  */
748 static int
749 is_top_dataset_file(char *str)
750 {
751 	char *tptr;
752 
753 	if ((tptr = grub_strstr(str, "menu.lst")) &&
754 	    (tptr[8] == '\0' || tptr[8] == ' ') &&
755 	    *(tptr-1) == '/')
756 		return (1);
757 
758 	if (grub_strncmp(str, BOOTSIGN_DIR"/",
759 	    grub_strlen(BOOTSIGN_DIR) + 1) == 0)
760 		return (1);
761 
762 	if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
763 		return (1);
764 
765 	return (0);
766 }
767 
768 /*
769  * Get the file dnode for a given file name where mdn is the meta dnode
770  * for this ZFS object set. When found, place the file dnode in dn.
771  * The 'path' argument will be mangled.
772  *
773  * Return:
774  *	0 - success
775  *	errnum - failure
776  */
777 static int
778 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
779     char *stack)
780 {
781 	uint64_t objnum, version;
782 	char *cname, ch;
783 
784 	if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
785 	    dn, stack))
786 		return (errnum);
787 
788 	if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
789 		return (errnum);
790 	if (version > ZPL_VERSION)
791 		return (-1);
792 
793 	if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
794 		return (errnum);
795 
796 	if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
797 	    dn, stack))
798 		return (errnum);
799 
800 	/* skip leading slashes */
801 	while (*path == '/')
802 		path++;
803 
804 	while (*path && !isspace(*path)) {
805 
806 		/* get the next component name */
807 		cname = path;
808 		while (*path && !isspace(*path) && *path != '/')
809 			path++;
810 		ch = *path;
811 		*path = 0;   /* ensure null termination */
812 
813 		if (errnum = zap_lookup(dn, cname, &objnum, stack))
814 			return (errnum);
815 
816 		objnum = ZFS_DIRENT_OBJ(objnum);
817 		if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
818 			return (errnum);
819 
820 		*path = ch;
821 		while (*path == '/')
822 			path++;
823 	}
824 
825 	/* We found the dnode for this file. Verify if it is a plain file. */
826 	VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
827 
828 	return (0);
829 }
830 
831 /*
832  * Get the default 'bootfs' property value from the rootpool.
833  *
834  * Return:
835  *	0 - success
836  *	errnum -failure
837  */
838 static int
839 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
840 {
841 	uint64_t objnum = 0;
842 	dnode_phys_t *dn = (dnode_phys_t *)stack;
843 	stack += DNODE_SIZE;
844 
845 	if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
846 	    DMU_OT_OBJECT_DIRECTORY, dn, stack))
847 		return (errnum);
848 
849 	/*
850 	 * find the object number for 'pool_props', and get the dnode
851 	 * of the 'pool_props'.
852 	 */
853 	if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
854 		return (ERR_FILESYSTEM_NOT_FOUND);
855 
856 	if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
857 		return (errnum);
858 
859 	if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
860 		return (ERR_FILESYSTEM_NOT_FOUND);
861 
862 	if (!objnum)
863 		return (ERR_FILESYSTEM_NOT_FOUND);
864 
865 	*obj = objnum;
866 	return (0);
867 }
868 
869 /*
870  * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
871  * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
872  * of pool/rootfs.
873  *
874  * If no fsname and no obj are given, return the DSL_DIR metadnode.
875  * If fsname is given, return its metadnode and its matching object number.
876  * If only obj is given, return the metadnode for this object number.
877  *
878  * Return:
879  *	0 - success
880  *	errnum - failure
881  */
882 static int
883 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
884     dnode_phys_t *mdn, char *stack)
885 {
886 	uint64_t objnum, headobj;
887 	char *cname, ch;
888 	blkptr_t *bp;
889 	objset_phys_t *osp;
890 	int issnapshot = 0;
891 	char *snapname;
892 
893 	if (fsname == NULL && obj) {
894 		headobj = *obj;
895 		goto skip;
896 	}
897 
898 	if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
899 	    DMU_OT_OBJECT_DIRECTORY, mdn, stack))
900 		return (errnum);
901 
902 	if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
903 	    stack))
904 		return (errnum);
905 
906 	if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
907 		return (errnum);
908 
909 	if (fsname == NULL) {
910 		headobj =
911 		    ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
912 		goto skip;
913 	}
914 
915 	/* take out the pool name */
916 	while (*fsname && !isspace(*fsname) && *fsname != '/')
917 		fsname++;
918 
919 	while (*fsname && !isspace(*fsname)) {
920 		uint64_t childobj;
921 
922 		while (*fsname == '/')
923 			fsname++;
924 
925 		cname = fsname;
926 		while (*fsname && !isspace(*fsname) && *fsname != '/')
927 			fsname++;
928 		ch = *fsname;
929 		*fsname = 0;
930 
931 		snapname = cname;
932 		while (*snapname && !isspace(*snapname) && *snapname != '@')
933 			snapname++;
934 		if (*snapname == '@') {
935 			issnapshot = 1;
936 			*snapname = 0;
937 		}
938 		childobj =
939 		    ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
940 		if (errnum = dnode_get(mosmdn, childobj,
941 		    DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
942 			return (errnum);
943 
944 		if (zap_lookup(mdn, cname, &objnum, stack))
945 			return (ERR_FILESYSTEM_NOT_FOUND);
946 
947 		if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
948 		    mdn, stack))
949 			return (errnum);
950 
951 		*fsname = ch;
952 		if (issnapshot)
953 			*snapname = '@';
954 	}
955 	headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
956 	if (obj)
957 		*obj = headobj;
958 
959 skip:
960 	if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
961 		return (errnum);
962 	if (issnapshot) {
963 		uint64_t snapobj;
964 
965 		snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
966 		    ds_snapnames_zapobj;
967 
968 		if (errnum = dnode_get(mosmdn, snapobj,
969 		    DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
970 			return (errnum);
971 		if (zap_lookup(mdn, snapname + 1, &headobj, stack))
972 			return (ERR_FILESYSTEM_NOT_FOUND);
973 		if (errnum = dnode_get(mosmdn, headobj,
974 		    DMU_OT_DSL_DATASET, mdn, stack))
975 			return (errnum);
976 		if (obj)
977 			*obj = headobj;
978 	}
979 
980 	bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
981 	osp = (objset_phys_t *)stack;
982 	stack += sizeof (objset_phys_t);
983 	if (errnum = zio_read(bp, osp, stack))
984 		return (errnum);
985 
986 	grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
987 
988 	return (0);
989 }
990 
991 /*
992  * For a given XDR packed nvlist, verify the first 4 bytes and move on.
993  *
994  * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
995  *
996  *      encoding method/host endian     (4 bytes)
997  *      nvl_version                     (4 bytes)
998  *      nvl_nvflag                      (4 bytes)
999  *	encoded nvpairs:
1000  *		encoded size of the nvpair      (4 bytes)
1001  *		decoded size of the nvpair      (4 bytes)
1002  *		name string size                (4 bytes)
1003  *		name string data                (sizeof(NV_ALIGN4(string))
1004  *		data type                       (4 bytes)
1005  *		# of elements in the nvpair     (4 bytes)
1006  *		data
1007  *      2 zero's for the last nvpair
1008  *		(end of the entire list)	(8 bytes)
1009  *
1010  * Return:
1011  *	0 - success
1012  *	1 - failure
1013  */
1014 static int
1015 nvlist_unpack(char *nvlist, char **out)
1016 {
1017 	/* Verify if the 1st and 2nd byte in the nvlist are valid. */
1018 	if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1019 		return (1);
1020 
1021 	nvlist += 4;
1022 	*out = nvlist;
1023 	return (0);
1024 }
1025 
1026 static char *
1027 nvlist_array(char *nvlist, int index)
1028 {
1029 	int i, encode_size;
1030 
1031 	for (i = 0; i < index; i++) {
1032 		/* skip the header, nvl_version, and nvl_nvflag */
1033 		nvlist = nvlist + 4 * 2;
1034 
1035 		while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1036 			nvlist += encode_size; /* goto the next nvpair */
1037 
1038 		nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1039 	}
1040 
1041 	return (nvlist);
1042 }
1043 
1044 static int
1045 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1046     int *nelmp)
1047 {
1048 	int name_len, type, slen, encode_size;
1049 	char *nvpair, *nvp_name, *strval = val;
1050 	uint64_t *intval = val;
1051 
1052 	/* skip the header, nvl_version, and nvl_nvflag */
1053 	nvlist = nvlist + 4 * 2;
1054 
1055 	/*
1056 	 * Loop thru the nvpair list
1057 	 * The XDR representation of an integer is in big-endian byte order.
1058 	 */
1059 	while (encode_size = BSWAP_32(*(uint32_t *)nvlist))  {
1060 
1061 		nvpair = nvlist + 4 * 2; /* skip the encode/decode size */
1062 
1063 		name_len = BSWAP_32(*(uint32_t *)nvpair);
1064 		nvpair += 4;
1065 
1066 		nvp_name = nvpair;
1067 		nvpair = nvpair + ((name_len + 3) & ~3); /* align */
1068 
1069 		type = BSWAP_32(*(uint32_t *)nvpair);
1070 		nvpair += 4;
1071 
1072 		if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1073 		    type == valtype) {
1074 			int nelm;
1075 
1076 			if ((nelm = BSWAP_32(*(uint32_t *)nvpair)) < 1)
1077 				return (1);
1078 			nvpair += 4;
1079 
1080 			switch (valtype) {
1081 			case DATA_TYPE_STRING:
1082 				slen = BSWAP_32(*(uint32_t *)nvpair);
1083 				nvpair += 4;
1084 				grub_memmove(strval, nvpair, slen);
1085 				strval[slen] = '\0';
1086 				return (0);
1087 
1088 			case DATA_TYPE_UINT64:
1089 				*intval = BSWAP_64(*(uint64_t *)nvpair);
1090 				return (0);
1091 
1092 			case DATA_TYPE_NVLIST:
1093 				*(void **)val = (void *)nvpair;
1094 				return (0);
1095 
1096 			case DATA_TYPE_NVLIST_ARRAY:
1097 				*(void **)val = (void *)nvpair;
1098 				if (nelmp)
1099 					*nelmp = nelm;
1100 				return (0);
1101 			}
1102 		}
1103 
1104 		nvlist += encode_size; /* goto the next nvpair */
1105 	}
1106 
1107 	return (1);
1108 }
1109 
1110 /*
1111  * Check if this vdev is online and is in a good state.
1112  */
1113 static int
1114 vdev_validate(char *nv)
1115 {
1116 	uint64_t ival;
1117 
1118 	if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1119 	    DATA_TYPE_UINT64, NULL) == 0 ||
1120 	    nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1121 	    DATA_TYPE_UINT64, NULL) == 0 ||
1122 	    nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1123 	    DATA_TYPE_UINT64, NULL) == 0)
1124 		return (ERR_DEV_VALUES);
1125 
1126 	return (0);
1127 }
1128 
1129 /*
1130  * Get a valid vdev pathname/devid from the boot device.
1131  * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1132  */
1133 static int
1134 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1135     int is_spare)
1136 {
1137 	char type[16];
1138 
1139 	if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1140 	    NULL))
1141 		return (ERR_FSYS_CORRUPT);
1142 
1143 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1144 		uint64_t guid;
1145 
1146 		if (vdev_validate(nv) != 0)
1147 			return (ERR_NO_BOOTPATH);
1148 
1149 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1150 		    &guid, DATA_TYPE_UINT64, NULL) != 0)
1151 			return (ERR_NO_BOOTPATH);
1152 
1153 		if (guid != inguid)
1154 			return (ERR_NO_BOOTPATH);
1155 
1156 		/* for a spare vdev, pick the disk labeled with "is_spare" */
1157 		if (is_spare) {
1158 			uint64_t spare = 0;
1159 			(void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1160 			    &spare, DATA_TYPE_UINT64, NULL);
1161 			if (!spare)
1162 				return (ERR_NO_BOOTPATH);
1163 		}
1164 
1165 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1166 		    bootpath, DATA_TYPE_STRING, NULL) != 0)
1167 			bootpath[0] = '\0';
1168 
1169 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1170 		    devid, DATA_TYPE_STRING, NULL) != 0)
1171 			devid[0] = '\0';
1172 
1173 		if (strlen(bootpath) >= MAXPATHLEN ||
1174 		    strlen(devid) >= MAXPATHLEN)
1175 			return (ERR_WONT_FIT);
1176 
1177 		return (0);
1178 
1179 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1180 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1181 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1182 		int nelm, i;
1183 		char *child;
1184 
1185 		if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1186 		    DATA_TYPE_NVLIST_ARRAY, &nelm))
1187 			return (ERR_FSYS_CORRUPT);
1188 
1189 		for (i = 0; i < nelm; i++) {
1190 			char *child_i;
1191 
1192 			child_i = nvlist_array(child, i);
1193 			if (vdev_get_bootpath(child_i, inguid, devid,
1194 			    bootpath, is_spare) == 0)
1195 				return (0);
1196 		}
1197 	}
1198 
1199 	return (ERR_NO_BOOTPATH);
1200 }
1201 
1202 /*
1203  * Check the disk label information and retrieve needed vdev name-value pairs.
1204  *
1205  * Return:
1206  *	0 - success
1207  *	ERR_* - failure
1208  */
1209 int
1210 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1211     char *outpath, uint64_t *outguid)
1212 {
1213 	vdev_phys_t *vdev;
1214 	uint64_t pool_state, txg = 0;
1215 	char *nvlist, *nv;
1216 	uint64_t diskguid;
1217 	uint64_t version;
1218 
1219 	sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1220 
1221 	/* Read in the vdev name-value pair list (112K). */
1222 	if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1223 		return (ERR_READ);
1224 
1225 	vdev = (vdev_phys_t *)stack;
1226 	stack += sizeof (vdev_phys_t);
1227 
1228 	if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1229 		return (ERR_FSYS_CORRUPT);
1230 
1231 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1232 	    DATA_TYPE_UINT64, NULL))
1233 		return (ERR_FSYS_CORRUPT);
1234 
1235 	if (pool_state == POOL_STATE_DESTROYED)
1236 		return (ERR_FILESYSTEM_NOT_FOUND);
1237 
1238 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1239 	    current_rootpool, DATA_TYPE_STRING, NULL))
1240 		return (ERR_FSYS_CORRUPT);
1241 
1242 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1243 	    DATA_TYPE_UINT64, NULL))
1244 		return (ERR_FSYS_CORRUPT);
1245 
1246 	/* not an active device */
1247 	if (txg == 0)
1248 		return (ERR_NO_BOOTPATH);
1249 
1250 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, &version,
1251 	    DATA_TYPE_UINT64, NULL))
1252 		return (ERR_FSYS_CORRUPT);
1253 	if (version > SPA_VERSION)
1254 		return (ERR_NEWER_VERSION);
1255 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1256 	    DATA_TYPE_NVLIST, NULL))
1257 		return (ERR_FSYS_CORRUPT);
1258 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1259 	    DATA_TYPE_UINT64, NULL))
1260 		return (ERR_FSYS_CORRUPT);
1261 	if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1262 		return (ERR_NO_BOOTPATH);
1263 	if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1264 	    DATA_TYPE_UINT64, NULL))
1265 		return (ERR_FSYS_CORRUPT);
1266 	return (0);
1267 }
1268 
1269 /*
1270  * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1271  * to the memory address MOS.
1272  *
1273  * Return:
1274  *	1 - success
1275  *	0 - failure
1276  */
1277 int
1278 zfs_mount(void)
1279 {
1280 	char *stack;
1281 	int label = 0;
1282 	uberblock_phys_t *ub_array, *ubbest;
1283 	objset_phys_t *osp;
1284 	char tmp_bootpath[MAXNAMELEN];
1285 	char tmp_devid[MAXNAMELEN];
1286 	uint64_t tmp_guid;
1287 	uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1288 	int err = errnum; /* preserve previous errnum state */
1289 
1290 	/* if it's our first time here, zero the best uberblock out */
1291 	if (best_drive == 0 && best_part == 0 && find_best_root) {
1292 		grub_memset(&current_uberblock, 0, sizeof (uberblock_t));
1293 		pool_guid = 0;
1294 	}
1295 
1296 	stackbase = ZFS_SCRATCH;
1297 	stack = stackbase;
1298 	ub_array = (uberblock_phys_t *)stack;
1299 	stack += VDEV_UBERBLOCK_RING;
1300 
1301 	osp = (objset_phys_t *)stack;
1302 	stack += sizeof (objset_phys_t);
1303 	adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1304 
1305 	for (label = 0; label < VDEV_LABELS; label++) {
1306 
1307 		/*
1308 		 * some eltorito stacks don't give us a size and
1309 		 * we end up setting the size to MAXUINT, further
1310 		 * some of these devices stop working once a single
1311 		 * read past the end has been issued. Checking
1312 		 * for a maximum part_length and skipping the backup
1313 		 * labels at the end of the slice/partition/device
1314 		 * avoids breaking down on such devices.
1315 		 */
1316 		if (part_length == MAXUINT && label == 2)
1317 			break;
1318 
1319 		uint64_t sector = vdev_label_start(adjpl,
1320 		    label) >> SPA_MINBLOCKSHIFT;
1321 
1322 		/* Read in the uberblock ring (128K). */
1323 		if (devread(sector  +
1324 		    ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >>
1325 		    SPA_MINBLOCKSHIFT), 0, VDEV_UBERBLOCK_RING,
1326 		    (char *)ub_array) == 0)
1327 			continue;
1328 
1329 		if ((ubbest = find_bestub(ub_array, sector)) != NULL &&
1330 		    zio_read(&ubbest->ubp_uberblock.ub_rootbp, osp, stack)
1331 		    == 0) {
1332 
1333 			VERIFY_OS_TYPE(osp, DMU_OST_META);
1334 
1335 			if (check_pool_label(sector, stack, tmp_devid,
1336 			    tmp_bootpath, &tmp_guid))
1337 				continue;
1338 			if (pool_guid == 0)
1339 				pool_guid = tmp_guid;
1340 
1341 			if (find_best_root && ((pool_guid != tmp_guid) ||
1342 			    vdev_uberblock_compare(&ubbest->ubp_uberblock,
1343 			    &(current_uberblock)) <= 0))
1344 				continue;
1345 
1346 			/* Got the MOS. Save it at the memory addr MOS. */
1347 			grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1348 			grub_memmove(&current_uberblock,
1349 			    &ubbest->ubp_uberblock, sizeof (uberblock_t));
1350 			grub_memmove(current_bootpath, tmp_bootpath,
1351 			    MAXNAMELEN);
1352 			grub_memmove(current_devid, tmp_devid,
1353 			    grub_strlen(tmp_devid));
1354 			is_zfs_mount = 1;
1355 			return (1);
1356 		}
1357 	}
1358 
1359 	/*
1360 	 * While some fs impls. (tftp) rely on setting and keeping
1361 	 * global errnums set, others won't reset it and will break
1362 	 * when issuing rawreads. The goal here is to simply not
1363 	 * have zfs mount attempts impact the previous state.
1364 	 */
1365 	errnum = err;
1366 	return (0);
1367 }
1368 
1369 /*
1370  * zfs_open() locates a file in the rootpool by following the
1371  * MOS and places the dnode of the file in the memory address DNODE.
1372  *
1373  * Return:
1374  *	1 - success
1375  *	0 - failure
1376  */
1377 int
1378 zfs_open(char *filename)
1379 {
1380 	char *stack;
1381 	dnode_phys_t *mdn;
1382 
1383 	file_buf = NULL;
1384 	stackbase = ZFS_SCRATCH;
1385 	stack = stackbase;
1386 
1387 	mdn = (dnode_phys_t *)stack;
1388 	stack += sizeof (dnode_phys_t);
1389 
1390 	dnode_mdn = NULL;
1391 	dnode_buf = (dnode_phys_t *)stack;
1392 	stack += 1<<DNODE_BLOCK_SHIFT;
1393 
1394 	/*
1395 	 * menu.lst is placed at the root pool filesystem level,
1396 	 * do not goto 'current_bootfs'.
1397 	 */
1398 	if (is_top_dataset_file(filename)) {
1399 		if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1400 			return (0);
1401 
1402 		current_bootfs_obj = 0;
1403 	} else {
1404 		if (current_bootfs[0] == '\0') {
1405 			/* Get the default root filesystem object number */
1406 			if (errnum = get_default_bootfsobj(MOS,
1407 			    &current_bootfs_obj, stack))
1408 				return (0);
1409 
1410 			if (errnum = get_objset_mdn(MOS, NULL,
1411 			    &current_bootfs_obj, mdn, stack))
1412 				return (0);
1413 		} else {
1414 			if (errnum = get_objset_mdn(MOS, current_bootfs,
1415 			    &current_bootfs_obj, mdn, stack)) {
1416 				grub_memset(current_bootfs, 0, MAXNAMELEN);
1417 				return (0);
1418 			}
1419 		}
1420 	}
1421 
1422 	if (dnode_get_path(mdn, filename, DNODE, stack)) {
1423 		errnum = ERR_FILE_NOT_FOUND;
1424 		return (0);
1425 	}
1426 
1427 	/* get the file size and set the file position to 0 */
1428 	filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1429 	filepos = 0;
1430 
1431 	dnode_buf = NULL;
1432 	return (1);
1433 }
1434 
1435 /*
1436  * zfs_read reads in the data blocks pointed by the DNODE.
1437  *
1438  * Return:
1439  *	len - the length successfully read in to the buffer
1440  *	0   - failure
1441  */
1442 int
1443 zfs_read(char *buf, int len)
1444 {
1445 	char *stack;
1446 	char *tmpbuf;
1447 	int blksz, length, movesize;
1448 
1449 	if (file_buf == NULL) {
1450 		file_buf = stackbase;
1451 		stackbase += SPA_MAXBLOCKSIZE;
1452 		file_start = file_end = 0;
1453 	}
1454 	stack = stackbase;
1455 
1456 	/*
1457 	 * If offset is in memory, move it into the buffer provided and return.
1458 	 */
1459 	if (filepos >= file_start && filepos+len <= file_end) {
1460 		grub_memmove(buf, file_buf + filepos - file_start, len);
1461 		filepos += len;
1462 		return (len);
1463 	}
1464 
1465 	blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1466 
1467 	/*
1468 	 * Entire Dnode is too big to fit into the space available.  We
1469 	 * will need to read it in chunks.  This could be optimized to
1470 	 * read in as large a chunk as there is space available, but for
1471 	 * now, this only reads in one data block at a time.
1472 	 */
1473 	length = len;
1474 	while (length) {
1475 		/*
1476 		 * Find requested blkid and the offset within that block.
1477 		 */
1478 		uint64_t blkid = filepos / blksz;
1479 
1480 		if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1481 			return (0);
1482 
1483 		file_start = blkid * blksz;
1484 		file_end = file_start + blksz;
1485 
1486 		movesize = MIN(length, file_end - filepos);
1487 
1488 		grub_memmove(buf, file_buf + filepos - file_start,
1489 		    movesize);
1490 		buf += movesize;
1491 		length -= movesize;
1492 		filepos += movesize;
1493 	}
1494 
1495 	return (len);
1496 }
1497 
1498 /*
1499  * No-Op
1500  */
1501 int
1502 zfs_embed(int *start_sector, int needed_sectors)
1503 {
1504 	return (1);
1505 }
1506 
1507 #endif /* FSYS_ZFS */
1508