xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 42b14111)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Portions Copyright 2010 Robert Milkowski
25  *
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2014 Integros [integros.com]
30  */
31 
32 /*
33  * ZFS volume emulation driver.
34  *
35  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36  * Volumes are accessed through the symbolic links named:
37  *
38  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40  *
41  * These links are created by the /dev filesystem (sdev_zvolops.c).
42  * Volumes are persistent through reboot.  No user command needs to be
43  * run before opening and using a device.
44  */
45 
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/errno.h>
49 #include <sys/uio.h>
50 #include <sys/buf.h>
51 #include <sys/modctl.h>
52 #include <sys/open.h>
53 #include <sys/kmem.h>
54 #include <sys/conf.h>
55 #include <sys/cmn_err.h>
56 #include <sys/stat.h>
57 #include <sys/zap.h>
58 #include <sys/spa.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zio.h>
61 #include <sys/dmu_traverse.h>
62 #include <sys/dnode.h>
63 #include <sys/dsl_dataset.h>
64 #include <sys/dsl_prop.h>
65 #include <sys/dkio.h>
66 #include <sys/efi_partition.h>
67 #include <sys/byteorder.h>
68 #include <sys/pathname.h>
69 #include <sys/ddi.h>
70 #include <sys/sunddi.h>
71 #include <sys/crc32.h>
72 #include <sys/dirent.h>
73 #include <sys/policy.h>
74 #include <sys/fs/zfs.h>
75 #include <sys/zfs_ioctl.h>
76 #include <sys/mkdev.h>
77 #include <sys/zil.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_disk.h>
82 #include <sys/vdev_impl.h>
83 #include <sys/vdev_raidz.h>
84 #include <sys/zvol.h>
85 #include <sys/dumphdr.h>
86 #include <sys/zil_impl.h>
87 #include <sys/dbuf.h>
88 #include <sys/dmu_tx.h>
89 #include <sys/zfeature.h>
90 #include <sys/zio_checksum.h>
91 #include <sys/zil_impl.h>
92 
93 #include "zfs_namecheck.h"
94 
95 void *zfsdev_state;
96 static char *zvol_tag = "zvol_tag";
97 
98 #define	ZVOL_DUMPSIZE		"dumpsize"
99 
100 /*
101  * This lock protects the zfsdev_state structure from being modified
102  * while it's being used, e.g. an open that comes in before a create
103  * finishes.  It also protects temporary opens of the dataset so that,
104  * e.g., an open doesn't get a spurious EBUSY.
105  */
106 kmutex_t zfsdev_state_lock;
107 static uint32_t zvol_minors;
108 
109 typedef struct zvol_extent {
110 	list_node_t	ze_node;
111 	dva_t		ze_dva;		/* dva associated with this extent */
112 	uint64_t	ze_nblks;	/* number of blocks in extent */
113 } zvol_extent_t;
114 
115 /*
116  * The in-core state of each volume.
117  */
118 typedef struct zvol_state {
119 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
120 	uint64_t	zv_volsize;	/* amount of space we advertise */
121 	uint64_t	zv_volblocksize; /* volume block size */
122 	minor_t		zv_minor;	/* minor number */
123 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
124 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
125 	objset_t	*zv_objset;	/* objset handle */
126 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
127 	uint32_t	zv_total_opens;	/* total open count */
128 	zilog_t		*zv_zilog;	/* ZIL handle */
129 	list_t		zv_extents;	/* List of extents for dump */
130 	znode_t		zv_znode;	/* for range locking */
131 	dmu_buf_t	*zv_dbuf;	/* bonus handle */
132 } zvol_state_t;
133 
134 /*
135  * zvol specific flags
136  */
137 #define	ZVOL_RDONLY	0x1
138 #define	ZVOL_DUMPIFIED	0x2
139 #define	ZVOL_EXCL	0x4
140 #define	ZVOL_WCE	0x8
141 
142 /*
143  * zvol maximum transfer in one DMU tx.
144  */
145 int zvol_maxphys = DMU_MAX_ACCESS/2;
146 
147 /*
148  * Toggle unmap functionality.
149  */
150 boolean_t zvol_unmap_enabled = B_TRUE;
151 
152 /*
153  * If true, unmaps requested as synchronous are executed synchronously,
154  * otherwise all unmaps are asynchronous.
155  */
156 boolean_t zvol_unmap_sync_enabled = B_FALSE;
157 
158 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
159     nvlist_t *, nvlist_t *);
160 static int zvol_remove_zv(zvol_state_t *);
161 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
162     struct lwb *lwb, zio_t *zio);
163 static int zvol_dumpify(zvol_state_t *zv);
164 static int zvol_dump_fini(zvol_state_t *zv);
165 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
166 
167 static void
168 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
169 {
170 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
171 
172 	zv->zv_volsize = volsize;
173 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
174 	    "Size", volsize) == DDI_SUCCESS);
175 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
176 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
177 
178 	/* Notify specfs to invalidate the cached size */
179 	spec_size_invalidate(dev, VBLK);
180 	spec_size_invalidate(dev, VCHR);
181 }
182 
183 int
184 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
185 {
186 	if (volsize == 0)
187 		return (SET_ERROR(EINVAL));
188 
189 	if (volsize % blocksize != 0)
190 		return (SET_ERROR(EINVAL));
191 
192 #ifdef _ILP32
193 	if (volsize - 1 > SPEC_MAXOFFSET_T)
194 		return (SET_ERROR(EOVERFLOW));
195 #endif
196 	return (0);
197 }
198 
199 int
200 zvol_check_volblocksize(uint64_t volblocksize)
201 {
202 	if (volblocksize < SPA_MINBLOCKSIZE ||
203 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
204 	    !ISP2(volblocksize))
205 		return (SET_ERROR(EDOM));
206 
207 	return (0);
208 }
209 
210 int
211 zvol_get_stats(objset_t *os, nvlist_t *nv)
212 {
213 	int error;
214 	dmu_object_info_t doi;
215 	uint64_t val;
216 
217 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
218 	if (error)
219 		return (error);
220 
221 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
222 
223 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
224 
225 	if (error == 0) {
226 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
227 		    doi.doi_data_block_size);
228 	}
229 
230 	return (error);
231 }
232 
233 static zvol_state_t *
234 zvol_minor_lookup(const char *name)
235 {
236 	minor_t minor;
237 	zvol_state_t *zv;
238 
239 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
240 
241 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
242 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
243 		if (zv == NULL)
244 			continue;
245 		if (strcmp(zv->zv_name, name) == 0)
246 			return (zv);
247 	}
248 
249 	return (NULL);
250 }
251 
252 /* extent mapping arg */
253 struct maparg {
254 	zvol_state_t	*ma_zv;
255 	uint64_t	ma_blks;
256 };
257 
258 /*ARGSUSED*/
259 static int
260 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
261     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
262 {
263 	struct maparg *ma = arg;
264 	zvol_extent_t *ze;
265 	int bs = ma->ma_zv->zv_volblocksize;
266 
267 	if (bp == NULL || BP_IS_HOLE(bp) ||
268 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
269 		return (0);
270 
271 	VERIFY(!BP_IS_EMBEDDED(bp));
272 
273 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
274 	ma->ma_blks++;
275 
276 	/* Abort immediately if we have encountered gang blocks */
277 	if (BP_IS_GANG(bp))
278 		return (SET_ERROR(EFRAGS));
279 
280 	/*
281 	 * See if the block is at the end of the previous extent.
282 	 */
283 	ze = list_tail(&ma->ma_zv->zv_extents);
284 	if (ze &&
285 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
286 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
287 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
288 		ze->ze_nblks++;
289 		return (0);
290 	}
291 
292 	dprintf_bp(bp, "%s", "next blkptr:");
293 
294 	/* start a new extent */
295 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
296 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
297 	ze->ze_nblks = 1;
298 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
299 	return (0);
300 }
301 
302 static void
303 zvol_free_extents(zvol_state_t *zv)
304 {
305 	zvol_extent_t *ze;
306 
307 	while (ze = list_head(&zv->zv_extents)) {
308 		list_remove(&zv->zv_extents, ze);
309 		kmem_free(ze, sizeof (zvol_extent_t));
310 	}
311 }
312 
313 static int
314 zvol_get_lbas(zvol_state_t *zv)
315 {
316 	objset_t *os = zv->zv_objset;
317 	struct maparg	ma;
318 	int		err;
319 
320 	ma.ma_zv = zv;
321 	ma.ma_blks = 0;
322 	zvol_free_extents(zv);
323 
324 	/* commit any in-flight changes before traversing the dataset */
325 	txg_wait_synced(dmu_objset_pool(os), 0);
326 	err = traverse_dataset(dmu_objset_ds(os), 0,
327 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
328 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
329 		zvol_free_extents(zv);
330 		return (err ? err : EIO);
331 	}
332 
333 	return (0);
334 }
335 
336 /* ARGSUSED */
337 void
338 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
339 {
340 	zfs_creat_t *zct = arg;
341 	nvlist_t *nvprops = zct->zct_props;
342 	int error;
343 	uint64_t volblocksize, volsize;
344 
345 	VERIFY(nvlist_lookup_uint64(nvprops,
346 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
347 	if (nvlist_lookup_uint64(nvprops,
348 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
349 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
350 
351 	/*
352 	 * These properties must be removed from the list so the generic
353 	 * property setting step won't apply to them.
354 	 */
355 	VERIFY(nvlist_remove_all(nvprops,
356 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
357 	(void) nvlist_remove_all(nvprops,
358 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
359 
360 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
361 	    DMU_OT_NONE, 0, tx);
362 	ASSERT(error == 0);
363 
364 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
365 	    DMU_OT_NONE, 0, tx);
366 	ASSERT(error == 0);
367 
368 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
369 	ASSERT(error == 0);
370 }
371 
372 /*
373  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
374  * implement DKIOCFREE/free-long-range.
375  */
376 static int
377 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
378 {
379 	uint64_t offset, length;
380 
381 	if (byteswap)
382 		byteswap_uint64_array(lr, sizeof (*lr));
383 
384 	offset = lr->lr_offset;
385 	length = lr->lr_length;
386 
387 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
388 }
389 
390 /*
391  * Replay a TX_WRITE ZIL transaction that didn't get committed
392  * after a system failure
393  */
394 static int
395 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
396 {
397 	objset_t *os = zv->zv_objset;
398 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
399 	uint64_t offset, length;
400 	dmu_tx_t *tx;
401 	int error;
402 
403 	if (byteswap)
404 		byteswap_uint64_array(lr, sizeof (*lr));
405 
406 	offset = lr->lr_offset;
407 	length = lr->lr_length;
408 
409 	/* If it's a dmu_sync() block, write the whole block */
410 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
411 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
412 		if (length < blocksize) {
413 			offset -= offset % blocksize;
414 			length = blocksize;
415 		}
416 	}
417 
418 	tx = dmu_tx_create(os);
419 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
420 	error = dmu_tx_assign(tx, TXG_WAIT);
421 	if (error) {
422 		dmu_tx_abort(tx);
423 	} else {
424 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
425 		dmu_tx_commit(tx);
426 	}
427 
428 	return (error);
429 }
430 
431 /* ARGSUSED */
432 static int
433 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
434 {
435 	return (SET_ERROR(ENOTSUP));
436 }
437 
438 /*
439  * Callback vectors for replaying records.
440  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
441  */
442 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
443 	zvol_replay_err,	/* 0 no such transaction type */
444 	zvol_replay_err,	/* TX_CREATE */
445 	zvol_replay_err,	/* TX_MKDIR */
446 	zvol_replay_err,	/* TX_MKXATTR */
447 	zvol_replay_err,	/* TX_SYMLINK */
448 	zvol_replay_err,	/* TX_REMOVE */
449 	zvol_replay_err,	/* TX_RMDIR */
450 	zvol_replay_err,	/* TX_LINK */
451 	zvol_replay_err,	/* TX_RENAME */
452 	zvol_replay_write,	/* TX_WRITE */
453 	zvol_replay_truncate,	/* TX_TRUNCATE */
454 	zvol_replay_err,	/* TX_SETATTR */
455 	zvol_replay_err,	/* TX_ACL */
456 	zvol_replay_err,	/* TX_CREATE_ACL */
457 	zvol_replay_err,	/* TX_CREATE_ATTR */
458 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
459 	zvol_replay_err,	/* TX_MKDIR_ACL */
460 	zvol_replay_err,	/* TX_MKDIR_ATTR */
461 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
462 	zvol_replay_err,	/* TX_WRITE2 */
463 };
464 
465 int
466 zvol_name2minor(const char *name, minor_t *minor)
467 {
468 	zvol_state_t *zv;
469 
470 	mutex_enter(&zfsdev_state_lock);
471 	zv = zvol_minor_lookup(name);
472 	if (minor && zv)
473 		*minor = zv->zv_minor;
474 	mutex_exit(&zfsdev_state_lock);
475 	return (zv ? 0 : -1);
476 }
477 
478 /*
479  * Create a minor node (plus a whole lot more) for the specified volume.
480  */
481 int
482 zvol_create_minor(const char *name)
483 {
484 	zfs_soft_state_t *zs;
485 	zvol_state_t *zv;
486 	objset_t *os;
487 	dmu_object_info_t doi;
488 	minor_t minor = 0;
489 	char chrbuf[30], blkbuf[30];
490 	int error;
491 
492 	mutex_enter(&zfsdev_state_lock);
493 
494 	if (zvol_minor_lookup(name) != NULL) {
495 		mutex_exit(&zfsdev_state_lock);
496 		return (SET_ERROR(EEXIST));
497 	}
498 
499 	/* lie and say we're read-only */
500 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
501 
502 	if (error) {
503 		mutex_exit(&zfsdev_state_lock);
504 		return (error);
505 	}
506 
507 	if ((minor = zfsdev_minor_alloc()) == 0) {
508 		dmu_objset_disown(os, FTAG);
509 		mutex_exit(&zfsdev_state_lock);
510 		return (SET_ERROR(ENXIO));
511 	}
512 
513 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
514 		dmu_objset_disown(os, FTAG);
515 		mutex_exit(&zfsdev_state_lock);
516 		return (SET_ERROR(EAGAIN));
517 	}
518 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
519 	    (char *)name);
520 
521 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
522 
523 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
524 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
525 		ddi_soft_state_free(zfsdev_state, minor);
526 		dmu_objset_disown(os, FTAG);
527 		mutex_exit(&zfsdev_state_lock);
528 		return (SET_ERROR(EAGAIN));
529 	}
530 
531 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
532 
533 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
534 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
535 		ddi_remove_minor_node(zfs_dip, chrbuf);
536 		ddi_soft_state_free(zfsdev_state, minor);
537 		dmu_objset_disown(os, FTAG);
538 		mutex_exit(&zfsdev_state_lock);
539 		return (SET_ERROR(EAGAIN));
540 	}
541 
542 	zs = ddi_get_soft_state(zfsdev_state, minor);
543 	zs->zss_type = ZSST_ZVOL;
544 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
545 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
546 	zv->zv_min_bs = DEV_BSHIFT;
547 	zv->zv_minor = minor;
548 	zv->zv_objset = os;
549 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
550 		zv->zv_flags |= ZVOL_RDONLY;
551 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
552 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
553 	    sizeof (rl_t), offsetof(rl_t, r_node));
554 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
555 	    offsetof(zvol_extent_t, ze_node));
556 	/* get and cache the blocksize */
557 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
558 	ASSERT(error == 0);
559 	zv->zv_volblocksize = doi.doi_data_block_size;
560 
561 	if (spa_writeable(dmu_objset_spa(os))) {
562 		if (zil_replay_disable)
563 			zil_destroy(dmu_objset_zil(os), B_FALSE);
564 		else
565 			zil_replay(os, zv, zvol_replay_vector);
566 	}
567 	dmu_objset_disown(os, FTAG);
568 	zv->zv_objset = NULL;
569 
570 	zvol_minors++;
571 
572 	mutex_exit(&zfsdev_state_lock);
573 
574 	return (0);
575 }
576 
577 /*
578  * Remove minor node for the specified volume.
579  */
580 static int
581 zvol_remove_zv(zvol_state_t *zv)
582 {
583 	char nmbuf[20];
584 	minor_t minor = zv->zv_minor;
585 
586 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
587 	if (zv->zv_total_opens != 0)
588 		return (SET_ERROR(EBUSY));
589 
590 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
591 	ddi_remove_minor_node(zfs_dip, nmbuf);
592 
593 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
594 	ddi_remove_minor_node(zfs_dip, nmbuf);
595 
596 	avl_destroy(&zv->zv_znode.z_range_avl);
597 	mutex_destroy(&zv->zv_znode.z_range_lock);
598 
599 	kmem_free(zv, sizeof (zvol_state_t));
600 
601 	ddi_soft_state_free(zfsdev_state, minor);
602 
603 	zvol_minors--;
604 	return (0);
605 }
606 
607 int
608 zvol_remove_minor(const char *name)
609 {
610 	zvol_state_t *zv;
611 	int rc;
612 
613 	mutex_enter(&zfsdev_state_lock);
614 	if ((zv = zvol_minor_lookup(name)) == NULL) {
615 		mutex_exit(&zfsdev_state_lock);
616 		return (SET_ERROR(ENXIO));
617 	}
618 	rc = zvol_remove_zv(zv);
619 	mutex_exit(&zfsdev_state_lock);
620 	return (rc);
621 }
622 
623 int
624 zvol_first_open(zvol_state_t *zv)
625 {
626 	objset_t *os;
627 	uint64_t volsize;
628 	int error;
629 	uint64_t readonly;
630 
631 	/* lie and say we're read-only */
632 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
633 	    zvol_tag, &os);
634 	if (error)
635 		return (error);
636 
637 	zv->zv_objset = os;
638 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
639 	if (error) {
640 		ASSERT(error == 0);
641 		dmu_objset_disown(os, zvol_tag);
642 		return (error);
643 	}
644 
645 	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
646 	if (error) {
647 		dmu_objset_disown(os, zvol_tag);
648 		return (error);
649 	}
650 
651 	zvol_size_changed(zv, volsize);
652 	zv->zv_zilog = zil_open(os, zvol_get_data);
653 
654 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
655 	    NULL) == 0);
656 	if (readonly || dmu_objset_is_snapshot(os) ||
657 	    !spa_writeable(dmu_objset_spa(os)))
658 		zv->zv_flags |= ZVOL_RDONLY;
659 	else
660 		zv->zv_flags &= ~ZVOL_RDONLY;
661 	return (error);
662 }
663 
664 void
665 zvol_last_close(zvol_state_t *zv)
666 {
667 	zil_close(zv->zv_zilog);
668 	zv->zv_zilog = NULL;
669 
670 	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
671 	zv->zv_dbuf = NULL;
672 
673 	/*
674 	 * Evict cached data
675 	 */
676 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
677 	    !(zv->zv_flags & ZVOL_RDONLY))
678 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
679 	dmu_objset_evict_dbufs(zv->zv_objset);
680 
681 	dmu_objset_disown(zv->zv_objset, zvol_tag);
682 	zv->zv_objset = NULL;
683 }
684 
685 int
686 zvol_prealloc(zvol_state_t *zv)
687 {
688 	objset_t *os = zv->zv_objset;
689 	dmu_tx_t *tx;
690 	uint64_t refd, avail, usedobjs, availobjs;
691 	uint64_t resid = zv->zv_volsize;
692 	uint64_t off = 0;
693 
694 	/* Check the space usage before attempting to allocate the space */
695 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
696 	if (avail < zv->zv_volsize)
697 		return (SET_ERROR(ENOSPC));
698 
699 	/* Free old extents if they exist */
700 	zvol_free_extents(zv);
701 
702 	while (resid != 0) {
703 		int error;
704 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
705 
706 		tx = dmu_tx_create(os);
707 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
708 		error = dmu_tx_assign(tx, TXG_WAIT);
709 		if (error) {
710 			dmu_tx_abort(tx);
711 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
712 			return (error);
713 		}
714 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
715 		dmu_tx_commit(tx);
716 		off += bytes;
717 		resid -= bytes;
718 	}
719 	txg_wait_synced(dmu_objset_pool(os), 0);
720 
721 	return (0);
722 }
723 
724 static int
725 zvol_update_volsize(objset_t *os, uint64_t volsize)
726 {
727 	dmu_tx_t *tx;
728 	int error;
729 
730 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
731 
732 	tx = dmu_tx_create(os);
733 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
734 	dmu_tx_mark_netfree(tx);
735 	error = dmu_tx_assign(tx, TXG_WAIT);
736 	if (error) {
737 		dmu_tx_abort(tx);
738 		return (error);
739 	}
740 
741 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
742 	    &volsize, tx);
743 	dmu_tx_commit(tx);
744 
745 	if (error == 0)
746 		error = dmu_free_long_range(os,
747 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
748 	return (error);
749 }
750 
751 void
752 zvol_remove_minors(const char *name)
753 {
754 	zvol_state_t *zv;
755 	char *namebuf;
756 	minor_t minor;
757 
758 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
759 	(void) strncpy(namebuf, name, strlen(name));
760 	(void) strcat(namebuf, "/");
761 	mutex_enter(&zfsdev_state_lock);
762 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
763 
764 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
765 		if (zv == NULL)
766 			continue;
767 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
768 			(void) zvol_remove_zv(zv);
769 	}
770 	kmem_free(namebuf, strlen(name) + 2);
771 
772 	mutex_exit(&zfsdev_state_lock);
773 }
774 
775 static int
776 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
777 {
778 	uint64_t old_volsize = 0ULL;
779 	int error = 0;
780 
781 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
782 
783 	/*
784 	 * Reinitialize the dump area to the new size. If we
785 	 * failed to resize the dump area then restore it back to
786 	 * its original size.  We must set the new volsize prior
787 	 * to calling dumpvp_resize() to ensure that the devices'
788 	 * size(9P) is not visible by the dump subsystem.
789 	 */
790 	old_volsize = zv->zv_volsize;
791 	zvol_size_changed(zv, volsize);
792 
793 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
794 		if ((error = zvol_dumpify(zv)) != 0 ||
795 		    (error = dumpvp_resize()) != 0) {
796 			int dumpify_error;
797 
798 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
799 			zvol_size_changed(zv, old_volsize);
800 			dumpify_error = zvol_dumpify(zv);
801 			error = dumpify_error ? dumpify_error : error;
802 		}
803 	}
804 
805 	/*
806 	 * Generate a LUN expansion event.
807 	 */
808 	if (error == 0) {
809 		sysevent_id_t eid;
810 		nvlist_t *attr;
811 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
812 
813 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
814 		    zv->zv_minor);
815 
816 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
817 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
818 
819 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
820 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
821 
822 		nvlist_free(attr);
823 		kmem_free(physpath, MAXPATHLEN);
824 	}
825 	return (error);
826 }
827 
828 int
829 zvol_set_volsize(const char *name, uint64_t volsize)
830 {
831 	zvol_state_t *zv = NULL;
832 	objset_t *os;
833 	int error;
834 	dmu_object_info_t doi;
835 	uint64_t readonly;
836 	boolean_t owned = B_FALSE;
837 
838 	error = dsl_prop_get_integer(name,
839 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
840 	if (error != 0)
841 		return (error);
842 	if (readonly)
843 		return (SET_ERROR(EROFS));
844 
845 	mutex_enter(&zfsdev_state_lock);
846 	zv = zvol_minor_lookup(name);
847 
848 	if (zv == NULL || zv->zv_objset == NULL) {
849 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
850 		    FTAG, &os)) != 0) {
851 			mutex_exit(&zfsdev_state_lock);
852 			return (error);
853 		}
854 		owned = B_TRUE;
855 		if (zv != NULL)
856 			zv->zv_objset = os;
857 	} else {
858 		os = zv->zv_objset;
859 	}
860 
861 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
862 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
863 		goto out;
864 
865 	error = zvol_update_volsize(os, volsize);
866 
867 	if (error == 0 && zv != NULL)
868 		error = zvol_update_live_volsize(zv, volsize);
869 out:
870 	if (owned) {
871 		dmu_objset_disown(os, FTAG);
872 		if (zv != NULL)
873 			zv->zv_objset = NULL;
874 	}
875 	mutex_exit(&zfsdev_state_lock);
876 	return (error);
877 }
878 
879 /*ARGSUSED*/
880 int
881 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
882 {
883 	zvol_state_t *zv;
884 	int err = 0;
885 
886 	mutex_enter(&zfsdev_state_lock);
887 
888 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
889 	if (zv == NULL) {
890 		mutex_exit(&zfsdev_state_lock);
891 		return (SET_ERROR(ENXIO));
892 	}
893 
894 	if (zv->zv_total_opens == 0)
895 		err = zvol_first_open(zv);
896 	if (err) {
897 		mutex_exit(&zfsdev_state_lock);
898 		return (err);
899 	}
900 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
901 		err = SET_ERROR(EROFS);
902 		goto out;
903 	}
904 	if (zv->zv_flags & ZVOL_EXCL) {
905 		err = SET_ERROR(EBUSY);
906 		goto out;
907 	}
908 	if (flag & FEXCL) {
909 		if (zv->zv_total_opens != 0) {
910 			err = SET_ERROR(EBUSY);
911 			goto out;
912 		}
913 		zv->zv_flags |= ZVOL_EXCL;
914 	}
915 
916 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
917 		zv->zv_open_count[otyp]++;
918 		zv->zv_total_opens++;
919 	}
920 	mutex_exit(&zfsdev_state_lock);
921 
922 	return (err);
923 out:
924 	if (zv->zv_total_opens == 0)
925 		zvol_last_close(zv);
926 	mutex_exit(&zfsdev_state_lock);
927 	return (err);
928 }
929 
930 /*ARGSUSED*/
931 int
932 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
933 {
934 	minor_t minor = getminor(dev);
935 	zvol_state_t *zv;
936 	int error = 0;
937 
938 	mutex_enter(&zfsdev_state_lock);
939 
940 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
941 	if (zv == NULL) {
942 		mutex_exit(&zfsdev_state_lock);
943 		return (SET_ERROR(ENXIO));
944 	}
945 
946 	if (zv->zv_flags & ZVOL_EXCL) {
947 		ASSERT(zv->zv_total_opens == 1);
948 		zv->zv_flags &= ~ZVOL_EXCL;
949 	}
950 
951 	/*
952 	 * If the open count is zero, this is a spurious close.
953 	 * That indicates a bug in the kernel / DDI framework.
954 	 */
955 	ASSERT(zv->zv_open_count[otyp] != 0);
956 	ASSERT(zv->zv_total_opens != 0);
957 
958 	/*
959 	 * You may get multiple opens, but only one close.
960 	 */
961 	zv->zv_open_count[otyp]--;
962 	zv->zv_total_opens--;
963 
964 	if (zv->zv_total_opens == 0)
965 		zvol_last_close(zv);
966 
967 	mutex_exit(&zfsdev_state_lock);
968 	return (error);
969 }
970 
971 static void
972 zvol_get_done(zgd_t *zgd, int error)
973 {
974 	if (zgd->zgd_db)
975 		dmu_buf_rele(zgd->zgd_db, zgd);
976 
977 	zfs_range_unlock(zgd->zgd_rl);
978 
979 	if (error == 0 && zgd->zgd_bp)
980 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
981 
982 	kmem_free(zgd, sizeof (zgd_t));
983 }
984 
985 /*
986  * Get data to generate a TX_WRITE intent log record.
987  */
988 static int
989 zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
990 {
991 	zvol_state_t *zv = arg;
992 	objset_t *os = zv->zv_objset;
993 	uint64_t object = ZVOL_OBJ;
994 	uint64_t offset = lr->lr_offset;
995 	uint64_t size = lr->lr_length;	/* length of user data */
996 	dmu_buf_t *db;
997 	zgd_t *zgd;
998 	int error;
999 
1000 	ASSERT3P(lwb, !=, NULL);
1001 	ASSERT3P(zio, !=, NULL);
1002 	ASSERT3U(size, !=, 0);
1003 
1004 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1005 	zgd->zgd_lwb = lwb;
1006 
1007 	/*
1008 	 * Write records come in two flavors: immediate and indirect.
1009 	 * For small writes it's cheaper to store the data with the
1010 	 * log record (immediate); for large writes it's cheaper to
1011 	 * sync the data and get a pointer to it (indirect) so that
1012 	 * we don't have to write the data twice.
1013 	 */
1014 	if (buf != NULL) { /* immediate write */
1015 		zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
1016 		    RL_READER);
1017 		error = dmu_read(os, object, offset, size, buf,
1018 		    DMU_READ_NO_PREFETCH);
1019 	} else { /* indirect write */
1020 		/*
1021 		 * Have to lock the whole block to ensure when it's written out
1022 		 * and its checksum is being calculated that no one can change
1023 		 * the data. Contrarily to zfs_get_data we need not re-check
1024 		 * blocksize after we get the lock because it cannot be changed.
1025 		 */
1026 		size = zv->zv_volblocksize;
1027 		offset = P2ALIGN(offset, size);
1028 		zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
1029 		    RL_READER);
1030 		error = dmu_buf_hold(os, object, offset, zgd, &db,
1031 		    DMU_READ_NO_PREFETCH);
1032 		if (error == 0) {
1033 			blkptr_t *bp = &lr->lr_blkptr;
1034 
1035 			zgd->zgd_db = db;
1036 			zgd->zgd_bp = bp;
1037 
1038 			ASSERT(db->db_offset == offset);
1039 			ASSERT(db->db_size == size);
1040 
1041 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1042 			    zvol_get_done, zgd);
1043 
1044 			if (error == 0)
1045 				return (0);
1046 		}
1047 	}
1048 
1049 	zvol_get_done(zgd, error);
1050 
1051 	return (error);
1052 }
1053 
1054 /*
1055  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1056  *
1057  * We store data in the log buffers if it's small enough.
1058  * Otherwise we will later flush the data out via dmu_sync().
1059  */
1060 ssize_t zvol_immediate_write_sz = 32768;
1061 
1062 static void
1063 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1064     boolean_t sync)
1065 {
1066 	uint32_t blocksize = zv->zv_volblocksize;
1067 	zilog_t *zilog = zv->zv_zilog;
1068 	itx_wr_state_t write_state;
1069 
1070 	if (zil_replaying(zilog, tx))
1071 		return;
1072 
1073 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1074 		write_state = WR_INDIRECT;
1075 	else if (!spa_has_slogs(zilog->zl_spa) &&
1076 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1077 		write_state = WR_INDIRECT;
1078 	else if (sync)
1079 		write_state = WR_COPIED;
1080 	else
1081 		write_state = WR_NEED_COPY;
1082 
1083 	while (resid) {
1084 		itx_t *itx;
1085 		lr_write_t *lr;
1086 		itx_wr_state_t wr_state = write_state;
1087 		ssize_t len = resid;
1088 
1089 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1090 			wr_state = WR_NEED_COPY;
1091 		else if (wr_state == WR_INDIRECT)
1092 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1093 
1094 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1095 		    (wr_state == WR_COPIED ? len : 0));
1096 		lr = (lr_write_t *)&itx->itx_lr;
1097 		if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
1098 		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1099 			zil_itx_destroy(itx);
1100 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1101 			lr = (lr_write_t *)&itx->itx_lr;
1102 			wr_state = WR_NEED_COPY;
1103 		}
1104 
1105 		itx->itx_wr_state = wr_state;
1106 		lr->lr_foid = ZVOL_OBJ;
1107 		lr->lr_offset = off;
1108 		lr->lr_length = len;
1109 		lr->lr_blkoff = 0;
1110 		BP_ZERO(&lr->lr_blkptr);
1111 
1112 		itx->itx_private = zv;
1113 		itx->itx_sync = sync;
1114 
1115 		zil_itx_assign(zilog, itx, tx);
1116 
1117 		off += len;
1118 		resid -= len;
1119 	}
1120 }
1121 
1122 static int
1123 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1124     uint64_t size, boolean_t doread, boolean_t isdump)
1125 {
1126 	vdev_disk_t *dvd;
1127 	int c;
1128 	int numerrors = 0;
1129 
1130 	if (vd->vdev_ops == &vdev_mirror_ops ||
1131 	    vd->vdev_ops == &vdev_replacing_ops ||
1132 	    vd->vdev_ops == &vdev_spare_ops) {
1133 		for (c = 0; c < vd->vdev_children; c++) {
1134 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1135 			    addr, offset, origoffset, size, doread, isdump);
1136 			if (err != 0) {
1137 				numerrors++;
1138 			} else if (doread) {
1139 				break;
1140 			}
1141 		}
1142 	}
1143 
1144 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1145 		return (numerrors < vd->vdev_children ? 0 : EIO);
1146 
1147 	if (doread && !vdev_readable(vd))
1148 		return (SET_ERROR(EIO));
1149 	else if (!doread && !vdev_writeable(vd))
1150 		return (SET_ERROR(EIO));
1151 
1152 	if (vd->vdev_ops == &vdev_raidz_ops) {
1153 		return (vdev_raidz_physio(vd,
1154 		    addr, size, offset, origoffset, doread, isdump));
1155 	}
1156 
1157 	offset += VDEV_LABEL_START_SIZE;
1158 
1159 	if (ddi_in_panic() || isdump) {
1160 		ASSERT(!doread);
1161 		if (doread)
1162 			return (SET_ERROR(EIO));
1163 		dvd = vd->vdev_tsd;
1164 		ASSERT3P(dvd, !=, NULL);
1165 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1166 		    lbtodb(size)));
1167 	} else {
1168 		dvd = vd->vdev_tsd;
1169 		ASSERT3P(dvd, !=, NULL);
1170 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1171 		    offset, doread ? B_READ : B_WRITE));
1172 	}
1173 }
1174 
1175 static int
1176 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1177     boolean_t doread, boolean_t isdump)
1178 {
1179 	vdev_t *vd;
1180 	int error;
1181 	zvol_extent_t *ze;
1182 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1183 
1184 	/* Must be sector aligned, and not stradle a block boundary. */
1185 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1186 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1187 		return (SET_ERROR(EINVAL));
1188 	}
1189 	ASSERT(size <= zv->zv_volblocksize);
1190 
1191 	/* Locate the extent this belongs to */
1192 	ze = list_head(&zv->zv_extents);
1193 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1194 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1195 		ze = list_next(&zv->zv_extents, ze);
1196 	}
1197 
1198 	if (ze == NULL)
1199 		return (SET_ERROR(EINVAL));
1200 
1201 	if (!ddi_in_panic())
1202 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1203 
1204 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1205 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1206 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1207 	    size, doread, isdump);
1208 
1209 	if (!ddi_in_panic())
1210 		spa_config_exit(spa, SCL_STATE, FTAG);
1211 
1212 	return (error);
1213 }
1214 
1215 int
1216 zvol_strategy(buf_t *bp)
1217 {
1218 	zfs_soft_state_t *zs = NULL;
1219 	zvol_state_t *zv;
1220 	uint64_t off, volsize;
1221 	size_t resid;
1222 	char *addr;
1223 	objset_t *os;
1224 	rl_t *rl;
1225 	int error = 0;
1226 	boolean_t doread = bp->b_flags & B_READ;
1227 	boolean_t is_dumpified;
1228 	boolean_t sync;
1229 
1230 	if (getminor(bp->b_edev) == 0) {
1231 		error = SET_ERROR(EINVAL);
1232 	} else {
1233 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1234 		if (zs == NULL)
1235 			error = SET_ERROR(ENXIO);
1236 		else if (zs->zss_type != ZSST_ZVOL)
1237 			error = SET_ERROR(EINVAL);
1238 	}
1239 
1240 	if (error) {
1241 		bioerror(bp, error);
1242 		biodone(bp);
1243 		return (0);
1244 	}
1245 
1246 	zv = zs->zss_data;
1247 
1248 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1249 		bioerror(bp, EROFS);
1250 		biodone(bp);
1251 		return (0);
1252 	}
1253 
1254 	off = ldbtob(bp->b_blkno);
1255 	volsize = zv->zv_volsize;
1256 
1257 	os = zv->zv_objset;
1258 	ASSERT(os != NULL);
1259 
1260 	bp_mapin(bp);
1261 	addr = bp->b_un.b_addr;
1262 	resid = bp->b_bcount;
1263 
1264 	if (resid > 0 && (off < 0 || off >= volsize)) {
1265 		bioerror(bp, EIO);
1266 		biodone(bp);
1267 		return (0);
1268 	}
1269 
1270 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1271 	sync = ((!(bp->b_flags & B_ASYNC) &&
1272 	    !(zv->zv_flags & ZVOL_WCE)) ||
1273 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1274 	    !doread && !is_dumpified;
1275 
1276 	/*
1277 	 * There must be no buffer changes when doing a dmu_sync() because
1278 	 * we can't change the data whilst calculating the checksum.
1279 	 */
1280 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1281 	    doread ? RL_READER : RL_WRITER);
1282 
1283 	while (resid != 0 && off < volsize) {
1284 		size_t size = MIN(resid, zvol_maxphys);
1285 		if (is_dumpified) {
1286 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1287 			error = zvol_dumpio(zv, addr, off, size,
1288 			    doread, B_FALSE);
1289 		} else if (doread) {
1290 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1291 			    DMU_READ_PREFETCH);
1292 		} else {
1293 			dmu_tx_t *tx = dmu_tx_create(os);
1294 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1295 			error = dmu_tx_assign(tx, TXG_WAIT);
1296 			if (error) {
1297 				dmu_tx_abort(tx);
1298 			} else {
1299 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1300 				zvol_log_write(zv, tx, off, size, sync);
1301 				dmu_tx_commit(tx);
1302 			}
1303 		}
1304 		if (error) {
1305 			/* convert checksum errors into IO errors */
1306 			if (error == ECKSUM)
1307 				error = SET_ERROR(EIO);
1308 			break;
1309 		}
1310 		off += size;
1311 		addr += size;
1312 		resid -= size;
1313 	}
1314 	zfs_range_unlock(rl);
1315 
1316 	if ((bp->b_resid = resid) == bp->b_bcount)
1317 		bioerror(bp, off > volsize ? EINVAL : error);
1318 
1319 	if (sync)
1320 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1321 	biodone(bp);
1322 
1323 	return (0);
1324 }
1325 
1326 /*
1327  * Set the buffer count to the zvol maximum transfer.
1328  * Using our own routine instead of the default minphys()
1329  * means that for larger writes we write bigger buffers on X86
1330  * (128K instead of 56K) and flush the disk write cache less often
1331  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1332  * 56K on X86 and 128K on sparc).
1333  */
1334 void
1335 zvol_minphys(struct buf *bp)
1336 {
1337 	if (bp->b_bcount > zvol_maxphys)
1338 		bp->b_bcount = zvol_maxphys;
1339 }
1340 
1341 int
1342 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1343 {
1344 	minor_t minor = getminor(dev);
1345 	zvol_state_t *zv;
1346 	int error = 0;
1347 	uint64_t size;
1348 	uint64_t boff;
1349 	uint64_t resid;
1350 
1351 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1352 	if (zv == NULL)
1353 		return (SET_ERROR(ENXIO));
1354 
1355 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1356 		return (SET_ERROR(EINVAL));
1357 
1358 	boff = ldbtob(blkno);
1359 	resid = ldbtob(nblocks);
1360 
1361 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1362 
1363 	while (resid) {
1364 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1365 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1366 		if (error)
1367 			break;
1368 		boff += size;
1369 		addr += size;
1370 		resid -= size;
1371 	}
1372 
1373 	return (error);
1374 }
1375 
1376 /*ARGSUSED*/
1377 int
1378 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1379 {
1380 	minor_t minor = getminor(dev);
1381 	zvol_state_t *zv;
1382 	uint64_t volsize;
1383 	rl_t *rl;
1384 	int error = 0;
1385 
1386 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1387 	if (zv == NULL)
1388 		return (SET_ERROR(ENXIO));
1389 
1390 	volsize = zv->zv_volsize;
1391 	if (uio->uio_resid > 0 &&
1392 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1393 		return (SET_ERROR(EIO));
1394 
1395 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1396 		error = physio(zvol_strategy, NULL, dev, B_READ,
1397 		    zvol_minphys, uio);
1398 		return (error);
1399 	}
1400 
1401 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1402 	    RL_READER);
1403 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1404 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1405 
1406 		/* don't read past the end */
1407 		if (bytes > volsize - uio->uio_loffset)
1408 			bytes = volsize - uio->uio_loffset;
1409 
1410 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1411 		if (error) {
1412 			/* convert checksum errors into IO errors */
1413 			if (error == ECKSUM)
1414 				error = SET_ERROR(EIO);
1415 			break;
1416 		}
1417 	}
1418 	zfs_range_unlock(rl);
1419 	return (error);
1420 }
1421 
1422 /*ARGSUSED*/
1423 int
1424 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1425 {
1426 	minor_t minor = getminor(dev);
1427 	zvol_state_t *zv;
1428 	uint64_t volsize;
1429 	rl_t *rl;
1430 	int error = 0;
1431 	boolean_t sync;
1432 
1433 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1434 	if (zv == NULL)
1435 		return (SET_ERROR(ENXIO));
1436 
1437 	volsize = zv->zv_volsize;
1438 	if (uio->uio_resid > 0 &&
1439 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1440 		return (SET_ERROR(EIO));
1441 
1442 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1443 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1444 		    zvol_minphys, uio);
1445 		return (error);
1446 	}
1447 
1448 	sync = !(zv->zv_flags & ZVOL_WCE) ||
1449 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1450 
1451 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1452 	    RL_WRITER);
1453 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1454 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1455 		uint64_t off = uio->uio_loffset;
1456 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1457 
1458 		if (bytes > volsize - off)	/* don't write past the end */
1459 			bytes = volsize - off;
1460 
1461 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1462 		error = dmu_tx_assign(tx, TXG_WAIT);
1463 		if (error) {
1464 			dmu_tx_abort(tx);
1465 			break;
1466 		}
1467 		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1468 		if (error == 0)
1469 			zvol_log_write(zv, tx, off, bytes, sync);
1470 		dmu_tx_commit(tx);
1471 
1472 		if (error)
1473 			break;
1474 	}
1475 	zfs_range_unlock(rl);
1476 	if (sync)
1477 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1478 	return (error);
1479 }
1480 
1481 int
1482 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1483 {
1484 	struct uuid uuid = EFI_RESERVED;
1485 	efi_gpe_t gpe = { 0 };
1486 	uint32_t crc;
1487 	dk_efi_t efi;
1488 	int length;
1489 	char *ptr;
1490 
1491 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1492 		return (SET_ERROR(EFAULT));
1493 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1494 	length = efi.dki_length;
1495 	/*
1496 	 * Some clients may attempt to request a PMBR for the
1497 	 * zvol.  Currently this interface will return EINVAL to
1498 	 * such requests.  These requests could be supported by
1499 	 * adding a check for lba == 0 and consing up an appropriate
1500 	 * PMBR.
1501 	 */
1502 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1503 		return (SET_ERROR(EINVAL));
1504 
1505 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1506 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1507 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1508 
1509 	if (efi.dki_lba == 1) {
1510 		efi_gpt_t gpt = { 0 };
1511 
1512 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1513 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1514 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1515 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1516 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1517 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1518 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1519 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1520 		gpt.efi_gpt_SizeOfPartitionEntry =
1521 		    LE_32(sizeof (efi_gpe_t));
1522 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1523 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1524 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1525 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1526 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1527 		    flag))
1528 			return (SET_ERROR(EFAULT));
1529 		ptr += sizeof (gpt);
1530 		length -= sizeof (gpt);
1531 	}
1532 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1533 	    length), flag))
1534 		return (SET_ERROR(EFAULT));
1535 	return (0);
1536 }
1537 
1538 /*
1539  * BEGIN entry points to allow external callers access to the volume.
1540  */
1541 /*
1542  * Return the volume parameters needed for access from an external caller.
1543  * These values are invariant as long as the volume is held open.
1544  */
1545 int
1546 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1547     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1548     void **rl_hdl, void **bonus_hdl)
1549 {
1550 	zvol_state_t *zv;
1551 
1552 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1553 	if (zv == NULL)
1554 		return (SET_ERROR(ENXIO));
1555 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1556 		return (SET_ERROR(ENXIO));
1557 
1558 	ASSERT(blksize && max_xfer_len && minor_hdl &&
1559 	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1560 
1561 	*blksize = zv->zv_volblocksize;
1562 	*max_xfer_len = (uint64_t)zvol_maxphys;
1563 	*minor_hdl = zv;
1564 	*objset_hdl = zv->zv_objset;
1565 	*zil_hdl = zv->zv_zilog;
1566 	*rl_hdl = &zv->zv_znode;
1567 	*bonus_hdl = zv->zv_dbuf;
1568 	return (0);
1569 }
1570 
1571 /*
1572  * Return the current volume size to an external caller.
1573  * The size can change while the volume is open.
1574  */
1575 uint64_t
1576 zvol_get_volume_size(void *minor_hdl)
1577 {
1578 	zvol_state_t *zv = minor_hdl;
1579 
1580 	return (zv->zv_volsize);
1581 }
1582 
1583 /*
1584  * Return the current WCE setting to an external caller.
1585  * The WCE setting can change while the volume is open.
1586  */
1587 int
1588 zvol_get_volume_wce(void *minor_hdl)
1589 {
1590 	zvol_state_t *zv = minor_hdl;
1591 
1592 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1593 }
1594 
1595 /*
1596  * Entry point for external callers to zvol_log_write
1597  */
1598 void
1599 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1600     boolean_t sync)
1601 {
1602 	zvol_state_t *zv = minor_hdl;
1603 
1604 	zvol_log_write(zv, tx, off, resid, sync);
1605 }
1606 /*
1607  * END entry points to allow external callers access to the volume.
1608  */
1609 
1610 /*
1611  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1612  */
1613 static void
1614 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1615     boolean_t sync)
1616 {
1617 	itx_t *itx;
1618 	lr_truncate_t *lr;
1619 	zilog_t *zilog = zv->zv_zilog;
1620 
1621 	if (zil_replaying(zilog, tx))
1622 		return;
1623 
1624 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1625 	lr = (lr_truncate_t *)&itx->itx_lr;
1626 	lr->lr_foid = ZVOL_OBJ;
1627 	lr->lr_offset = off;
1628 	lr->lr_length = len;
1629 
1630 	itx->itx_sync = sync;
1631 	zil_itx_assign(zilog, itx, tx);
1632 }
1633 
1634 /*
1635  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1636  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1637  */
1638 /*ARGSUSED*/
1639 int
1640 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1641 {
1642 	zvol_state_t *zv;
1643 	struct dk_callback *dkc;
1644 	int error = 0;
1645 	rl_t *rl;
1646 
1647 	mutex_enter(&zfsdev_state_lock);
1648 
1649 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1650 
1651 	if (zv == NULL) {
1652 		mutex_exit(&zfsdev_state_lock);
1653 		return (SET_ERROR(ENXIO));
1654 	}
1655 	ASSERT(zv->zv_total_opens > 0);
1656 
1657 	switch (cmd) {
1658 
1659 	case DKIOCINFO:
1660 	{
1661 		struct dk_cinfo dki;
1662 
1663 		bzero(&dki, sizeof (dki));
1664 		(void) strcpy(dki.dki_cname, "zvol");
1665 		(void) strcpy(dki.dki_dname, "zvol");
1666 		dki.dki_ctype = DKC_UNKNOWN;
1667 		dki.dki_unit = getminor(dev);
1668 		dki.dki_maxtransfer =
1669 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1670 		mutex_exit(&zfsdev_state_lock);
1671 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1672 			error = SET_ERROR(EFAULT);
1673 		return (error);
1674 	}
1675 
1676 	case DKIOCGMEDIAINFO:
1677 	{
1678 		struct dk_minfo dkm;
1679 
1680 		bzero(&dkm, sizeof (dkm));
1681 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1682 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1683 		dkm.dki_media_type = DK_UNKNOWN;
1684 		mutex_exit(&zfsdev_state_lock);
1685 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1686 			error = SET_ERROR(EFAULT);
1687 		return (error);
1688 	}
1689 
1690 	case DKIOCGMEDIAINFOEXT:
1691 	{
1692 		struct dk_minfo_ext dkmext;
1693 
1694 		bzero(&dkmext, sizeof (dkmext));
1695 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1696 		dkmext.dki_pbsize = zv->zv_volblocksize;
1697 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1698 		dkmext.dki_media_type = DK_UNKNOWN;
1699 		mutex_exit(&zfsdev_state_lock);
1700 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1701 			error = SET_ERROR(EFAULT);
1702 		return (error);
1703 	}
1704 
1705 	case DKIOCGETEFI:
1706 	{
1707 		uint64_t vs = zv->zv_volsize;
1708 		uint8_t bs = zv->zv_min_bs;
1709 
1710 		mutex_exit(&zfsdev_state_lock);
1711 		error = zvol_getefi((void *)arg, flag, vs, bs);
1712 		return (error);
1713 	}
1714 
1715 	case DKIOCFLUSHWRITECACHE:
1716 		dkc = (struct dk_callback *)arg;
1717 		mutex_exit(&zfsdev_state_lock);
1718 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1719 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1720 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1721 			error = 0;
1722 		}
1723 		return (error);
1724 
1725 	case DKIOCGETWCE:
1726 	{
1727 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1728 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1729 		    flag))
1730 			error = SET_ERROR(EFAULT);
1731 		break;
1732 	}
1733 	case DKIOCSETWCE:
1734 	{
1735 		int wce;
1736 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1737 		    flag)) {
1738 			error = SET_ERROR(EFAULT);
1739 			break;
1740 		}
1741 		if (wce) {
1742 			zv->zv_flags |= ZVOL_WCE;
1743 			mutex_exit(&zfsdev_state_lock);
1744 		} else {
1745 			zv->zv_flags &= ~ZVOL_WCE;
1746 			mutex_exit(&zfsdev_state_lock);
1747 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1748 		}
1749 		return (0);
1750 	}
1751 
1752 	case DKIOCGGEOM:
1753 	case DKIOCGVTOC:
1754 		/*
1755 		 * commands using these (like prtvtoc) expect ENOTSUP
1756 		 * since we're emulating an EFI label
1757 		 */
1758 		error = SET_ERROR(ENOTSUP);
1759 		break;
1760 
1761 	case DKIOCDUMPINIT:
1762 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1763 		    RL_WRITER);
1764 		error = zvol_dumpify(zv);
1765 		zfs_range_unlock(rl);
1766 		break;
1767 
1768 	case DKIOCDUMPFINI:
1769 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1770 			break;
1771 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1772 		    RL_WRITER);
1773 		error = zvol_dump_fini(zv);
1774 		zfs_range_unlock(rl);
1775 		break;
1776 
1777 	case DKIOCFREE:
1778 	{
1779 		dkioc_free_t df;
1780 		dmu_tx_t *tx;
1781 
1782 		if (!zvol_unmap_enabled)
1783 			break;
1784 
1785 		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1786 			error = SET_ERROR(EFAULT);
1787 			break;
1788 		}
1789 
1790 		/*
1791 		 * Apply Postel's Law to length-checking.  If they overshoot,
1792 		 * just blank out until the end, if there's a need to blank
1793 		 * out anything.
1794 		 */
1795 		if (df.df_start >= zv->zv_volsize)
1796 			break;	/* No need to do anything... */
1797 
1798 		mutex_exit(&zfsdev_state_lock);
1799 
1800 		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1801 		    RL_WRITER);
1802 		tx = dmu_tx_create(zv->zv_objset);
1803 		dmu_tx_mark_netfree(tx);
1804 		error = dmu_tx_assign(tx, TXG_WAIT);
1805 		if (error != 0) {
1806 			dmu_tx_abort(tx);
1807 		} else {
1808 			zvol_log_truncate(zv, tx, df.df_start,
1809 			    df.df_length, B_TRUE);
1810 			dmu_tx_commit(tx);
1811 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1812 			    df.df_start, df.df_length);
1813 		}
1814 
1815 		zfs_range_unlock(rl);
1816 
1817 		/*
1818 		 * If the write-cache is disabled, 'sync' property
1819 		 * is set to 'always', or if the caller is asking for
1820 		 * a synchronous free, commit this operation to the zil.
1821 		 * This will sync any previous uncommitted writes to the
1822 		 * zvol object.
1823 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
1824 		 */
1825 		if ((error == 0) && zvol_unmap_sync_enabled &&
1826 		    (!(zv->zv_flags & ZVOL_WCE) ||
1827 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1828 		    (df.df_flags & DF_WAIT_SYNC))) {
1829 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1830 		}
1831 
1832 		return (error);
1833 	}
1834 
1835 	default:
1836 		error = SET_ERROR(ENOTTY);
1837 		break;
1838 
1839 	}
1840 	mutex_exit(&zfsdev_state_lock);
1841 	return (error);
1842 }
1843 
1844 int
1845 zvol_busy(void)
1846 {
1847 	return (zvol_minors != 0);
1848 }
1849 
1850 void
1851 zvol_init(void)
1852 {
1853 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1854 	    1) == 0);
1855 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1856 }
1857 
1858 void
1859 zvol_fini(void)
1860 {
1861 	mutex_destroy(&zfsdev_state_lock);
1862 	ddi_soft_state_fini(&zfsdev_state);
1863 }
1864 
1865 /*ARGSUSED*/
1866 static int
1867 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1868 {
1869 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1870 
1871 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1872 		return (1);
1873 	return (0);
1874 }
1875 
1876 /*ARGSUSED*/
1877 static void
1878 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1879 {
1880 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1881 
1882 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1883 }
1884 
1885 static int
1886 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1887 {
1888 	dmu_tx_t *tx;
1889 	int error;
1890 	objset_t *os = zv->zv_objset;
1891 	spa_t *spa = dmu_objset_spa(os);
1892 	vdev_t *vd = spa->spa_root_vdev;
1893 	nvlist_t *nv = NULL;
1894 	uint64_t version = spa_version(spa);
1895 	uint64_t checksum, compress, refresrv, vbs, dedup;
1896 
1897 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1898 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1899 
1900 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1901 	    DMU_OBJECT_END);
1902 	if (error != 0)
1903 		return (error);
1904 	/* wait for dmu_free_long_range to actually free the blocks */
1905 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1906 
1907 	/*
1908 	 * If the pool on which the dump device is being initialized has more
1909 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1910 	 * enabled.  If so, bump that feature's counter to indicate that the
1911 	 * feature is active. We also check the vdev type to handle the
1912 	 * following case:
1913 	 *   # zpool create test raidz disk1 disk2 disk3
1914 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1915 	 *   the raidz vdev itself has 3 children.
1916 	 */
1917 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1918 		if (!spa_feature_is_enabled(spa,
1919 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1920 			return (SET_ERROR(ENOTSUP));
1921 		(void) dsl_sync_task(spa_name(spa),
1922 		    zfs_mvdev_dump_feature_check,
1923 		    zfs_mvdev_dump_activate_feature_sync, NULL,
1924 		    2, ZFS_SPACE_CHECK_RESERVED);
1925 	}
1926 
1927 	if (!resize) {
1928 		error = dsl_prop_get_integer(zv->zv_name,
1929 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1930 		if (error == 0) {
1931 			error = dsl_prop_get_integer(zv->zv_name,
1932 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1933 			    NULL);
1934 		}
1935 		if (error == 0) {
1936 			error = dsl_prop_get_integer(zv->zv_name,
1937 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1938 			    &refresrv, NULL);
1939 		}
1940 		if (error == 0) {
1941 			error = dsl_prop_get_integer(zv->zv_name,
1942 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1943 			    NULL);
1944 		}
1945 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1946 			error = dsl_prop_get_integer(zv->zv_name,
1947 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1948 		}
1949 	}
1950 	if (error != 0)
1951 		return (error);
1952 
1953 	tx = dmu_tx_create(os);
1954 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1955 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1956 	error = dmu_tx_assign(tx, TXG_WAIT);
1957 	if (error != 0) {
1958 		dmu_tx_abort(tx);
1959 		return (error);
1960 	}
1961 
1962 	/*
1963 	 * If we are resizing the dump device then we only need to
1964 	 * update the refreservation to match the newly updated
1965 	 * zvolsize. Otherwise, we save off the original state of the
1966 	 * zvol so that we can restore them if the zvol is ever undumpified.
1967 	 */
1968 	if (resize) {
1969 		error = zap_update(os, ZVOL_ZAP_OBJ,
1970 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1971 		    &zv->zv_volsize, tx);
1972 	} else {
1973 		error = zap_update(os, ZVOL_ZAP_OBJ,
1974 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1975 		    &compress, tx);
1976 		if (error == 0) {
1977 			error = zap_update(os, ZVOL_ZAP_OBJ,
1978 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
1979 			    &checksum, tx);
1980 		}
1981 		if (error == 0) {
1982 			error = zap_update(os, ZVOL_ZAP_OBJ,
1983 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1984 			    &refresrv, tx);
1985 		}
1986 		if (error == 0) {
1987 			error = zap_update(os, ZVOL_ZAP_OBJ,
1988 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1989 			    &vbs, tx);
1990 		}
1991 		if (error == 0) {
1992 			error = dmu_object_set_blocksize(
1993 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
1994 		}
1995 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1996 			error = zap_update(os, ZVOL_ZAP_OBJ,
1997 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1998 			    &dedup, tx);
1999 		}
2000 		if (error == 0)
2001 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2002 	}
2003 	dmu_tx_commit(tx);
2004 
2005 	/*
2006 	 * We only need update the zvol's property if we are initializing
2007 	 * the dump area for the first time.
2008 	 */
2009 	if (error == 0 && !resize) {
2010 		/*
2011 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2012 		 * function.  Otherwise, use the old default -- OFF.
2013 		 */
2014 		checksum = spa_feature_is_active(spa,
2015 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2016 		    ZIO_CHECKSUM_OFF;
2017 
2018 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2019 		VERIFY(nvlist_add_uint64(nv,
2020 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2021 		VERIFY(nvlist_add_uint64(nv,
2022 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2023 		    ZIO_COMPRESS_OFF) == 0);
2024 		VERIFY(nvlist_add_uint64(nv,
2025 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2026 		    checksum) == 0);
2027 		if (version >= SPA_VERSION_DEDUP) {
2028 			VERIFY(nvlist_add_uint64(nv,
2029 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2030 			    ZIO_CHECKSUM_OFF) == 0);
2031 		}
2032 
2033 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2034 		    nv, NULL);
2035 		nvlist_free(nv);
2036 	}
2037 
2038 	/* Allocate the space for the dump */
2039 	if (error == 0)
2040 		error = zvol_prealloc(zv);
2041 	return (error);
2042 }
2043 
2044 static int
2045 zvol_dumpify(zvol_state_t *zv)
2046 {
2047 	int error = 0;
2048 	uint64_t dumpsize = 0;
2049 	dmu_tx_t *tx;
2050 	objset_t *os = zv->zv_objset;
2051 
2052 	if (zv->zv_flags & ZVOL_RDONLY)
2053 		return (SET_ERROR(EROFS));
2054 
2055 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2056 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2057 		boolean_t resize = (dumpsize > 0);
2058 
2059 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2060 			(void) zvol_dump_fini(zv);
2061 			return (error);
2062 		}
2063 	}
2064 
2065 	/*
2066 	 * Build up our lba mapping.
2067 	 */
2068 	error = zvol_get_lbas(zv);
2069 	if (error) {
2070 		(void) zvol_dump_fini(zv);
2071 		return (error);
2072 	}
2073 
2074 	tx = dmu_tx_create(os);
2075 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2076 	error = dmu_tx_assign(tx, TXG_WAIT);
2077 	if (error) {
2078 		dmu_tx_abort(tx);
2079 		(void) zvol_dump_fini(zv);
2080 		return (error);
2081 	}
2082 
2083 	zv->zv_flags |= ZVOL_DUMPIFIED;
2084 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2085 	    &zv->zv_volsize, tx);
2086 	dmu_tx_commit(tx);
2087 
2088 	if (error) {
2089 		(void) zvol_dump_fini(zv);
2090 		return (error);
2091 	}
2092 
2093 	txg_wait_synced(dmu_objset_pool(os), 0);
2094 	return (0);
2095 }
2096 
2097 static int
2098 zvol_dump_fini(zvol_state_t *zv)
2099 {
2100 	dmu_tx_t *tx;
2101 	objset_t *os = zv->zv_objset;
2102 	nvlist_t *nv;
2103 	int error = 0;
2104 	uint64_t checksum, compress, refresrv, vbs, dedup;
2105 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2106 
2107 	/*
2108 	 * Attempt to restore the zvol back to its pre-dumpified state.
2109 	 * This is a best-effort attempt as it's possible that not all
2110 	 * of these properties were initialized during the dumpify process
2111 	 * (i.e. error during zvol_dump_init).
2112 	 */
2113 
2114 	tx = dmu_tx_create(os);
2115 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2116 	error = dmu_tx_assign(tx, TXG_WAIT);
2117 	if (error) {
2118 		dmu_tx_abort(tx);
2119 		return (error);
2120 	}
2121 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2122 	dmu_tx_commit(tx);
2123 
2124 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2125 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2126 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2127 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2128 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2129 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2130 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2131 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2132 
2133 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2134 	(void) nvlist_add_uint64(nv,
2135 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2136 	(void) nvlist_add_uint64(nv,
2137 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2138 	(void) nvlist_add_uint64(nv,
2139 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2140 	if (version >= SPA_VERSION_DEDUP &&
2141 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2142 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2143 		(void) nvlist_add_uint64(nv,
2144 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2145 	}
2146 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2147 	    nv, NULL);
2148 	nvlist_free(nv);
2149 
2150 	zvol_free_extents(zv);
2151 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2152 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2153 	/* wait for dmu_free_long_range to actually free the blocks */
2154 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2155 	tx = dmu_tx_create(os);
2156 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2157 	error = dmu_tx_assign(tx, TXG_WAIT);
2158 	if (error) {
2159 		dmu_tx_abort(tx);
2160 		return (error);
2161 	}
2162 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2163 		zv->zv_volblocksize = vbs;
2164 	dmu_tx_commit(tx);
2165 
2166 	return (0);
2167 }
2168