xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 25df42a1)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
22f80ce222SChris Kirby  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23b77b9231SDan McDonald  *
24b77b9231SDan McDonald  * Portions Copyright 2010 Robert Milkowski
25b77b9231SDan McDonald  *
26047c81d3SSaso Kiselkov  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
27*25df42a1SMatthew Ahrens  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
28c3d26abcSMatthew Ahrens  * Copyright (c) 2014 Integros [integros.com]
29455e370cSJohn Levon  * Copyright 2019 Joyent, Inc.
30fa9e4066Sahrens  */
31fa9e4066Sahrens 
32fa9e4066Sahrens /*
33fa9e4066Sahrens  * ZFS volume emulation driver.
34fa9e4066Sahrens  *
35fa9e4066Sahrens  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36fa9e4066Sahrens  * Volumes are accessed through the symbolic links named:
37fa9e4066Sahrens  *
38fa9e4066Sahrens  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39fa9e4066Sahrens  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40fa9e4066Sahrens  *
41681d9761SEric Taylor  * These links are created by the /dev filesystem (sdev_zvolops.c).
42fa9e4066Sahrens  * Volumes are persistent through reboot.  No user command needs to be
43fa9e4066Sahrens  * run before opening and using a device.
44fa9e4066Sahrens  */
45fa9e4066Sahrens 
46fa9e4066Sahrens #include <sys/types.h>
47fa9e4066Sahrens #include <sys/param.h>
48fa9e4066Sahrens #include <sys/errno.h>
49fa9e4066Sahrens #include <sys/uio.h>
50fa9e4066Sahrens #include <sys/buf.h>
51fa9e4066Sahrens #include <sys/modctl.h>
52fa9e4066Sahrens #include <sys/open.h>
53fa9e4066Sahrens #include <sys/kmem.h>
54fa9e4066Sahrens #include <sys/conf.h>
55fa9e4066Sahrens #include <sys/cmn_err.h>
56fa9e4066Sahrens #include <sys/stat.h>
57fa9e4066Sahrens #include <sys/zap.h>
58fa9e4066Sahrens #include <sys/spa.h>
59810e43b2SBill Pijewski #include <sys/spa_impl.h>
60fa9e4066Sahrens #include <sys/zio.h>
61e7cbe64fSgw #include <sys/dmu_traverse.h>
62e7cbe64fSgw #include <sys/dnode.h>
63e7cbe64fSgw #include <sys/dsl_dataset.h>
64fa9e4066Sahrens #include <sys/dsl_prop.h>
65fa9e4066Sahrens #include <sys/dkio.h>
66fa9e4066Sahrens #include <sys/efi_partition.h>
67fa9e4066Sahrens #include <sys/byteorder.h>
68fa9e4066Sahrens #include <sys/pathname.h>
69fa9e4066Sahrens #include <sys/ddi.h>
70fa9e4066Sahrens #include <sys/sunddi.h>
71fa9e4066Sahrens #include <sys/crc32.h>
72fa9e4066Sahrens #include <sys/dirent.h>
73fa9e4066Sahrens #include <sys/policy.h>
74fa9e4066Sahrens #include <sys/fs/zfs.h>
75fa9e4066Sahrens #include <sys/zfs_ioctl.h>
76fa9e4066Sahrens #include <sys/mkdev.h>
7722ac5be4Sperrin #include <sys/zil.h>
78c5c6ffa0Smaybee #include <sys/refcount.h>
79c2e6a7d6Sperrin #include <sys/zfs_znode.h>
80c2e6a7d6Sperrin #include <sys/zfs_rlock.h>
81e7cbe64fSgw #include <sys/vdev_impl.h>
82e7cbe64fSgw #include <sys/zvol.h>
83e7cbe64fSgw #include <sys/dumphdr.h>
841209a471SNeil Perrin #include <sys/zil_impl.h>
8580901aeaSGeorge Wilson #include <sys/dbuf.h>
86810e43b2SBill Pijewski #include <sys/dmu_tx.h>
87810e43b2SBill Pijewski #include <sys/zfeature.h>
88810e43b2SBill Pijewski #include <sys/zio_checksum.h>
891271e4b1SPrakash Surya #include <sys/zil_impl.h>
90c3377ee9SJohn Levon #include <sys/smt.h>
91047c81d3SSaso Kiselkov #include <sys/dkioc_free_util.h>
9279315247SMatthew Ahrens #include <sys/zfs_rlock.h>
93fa9e4066Sahrens 
94fa9e4066Sahrens #include "zfs_namecheck.h"
95fa9e4066Sahrens 
96c99e4bdcSChris Kirby void *zfsdev_state;
97503ad85cSMatthew Ahrens static char *zvol_tag = "zvol_tag";
98fa9e4066Sahrens 
99e7cbe64fSgw #define	ZVOL_DUMPSIZE		"dumpsize"
100e7cbe64fSgw 
101fa9e4066Sahrens /*
102c99e4bdcSChris Kirby  * This lock protects the zfsdev_state structure from being modified
103fa9e4066Sahrens  * while it's being used, e.g. an open that comes in before a create
104fa9e4066Sahrens  * finishes.  It also protects temporary opens of the dataset so that,
105fa9e4066Sahrens  * e.g., an open doesn't get a spurious EBUSY.
106fa9e4066Sahrens  */
107c99e4bdcSChris Kirby kmutex_t zfsdev_state_lock;
108fa9e4066Sahrens static uint32_t zvol_minors;
109fa9e4066Sahrens 
110e7cbe64fSgw typedef struct zvol_extent {
11188b7b0f2SMatthew Ahrens 	list_node_t	ze_node;
112e7cbe64fSgw 	dva_t		ze_dva;		/* dva associated with this extent */
11388b7b0f2SMatthew Ahrens 	uint64_t	ze_nblks;	/* number of blocks in extent */
114e7cbe64fSgw } zvol_extent_t;
115e7cbe64fSgw 
116fa9e4066Sahrens /*
117fa9e4066Sahrens  * The in-core state of each volume.
118fa9e4066Sahrens  */
119fa9e4066Sahrens typedef struct zvol_state {
120fa9e4066Sahrens 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
121fa9e4066Sahrens 	uint64_t	zv_volsize;	/* amount of space we advertise */
12267bd71c6Sperrin 	uint64_t	zv_volblocksize; /* volume block size */
123fa9e4066Sahrens 	minor_t		zv_minor;	/* minor number */
124fa9e4066Sahrens 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
125701f66c4SEric Taylor 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
126fa9e4066Sahrens 	objset_t	*zv_objset;	/* objset handle */
127fa9e4066Sahrens 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
128fa9e4066Sahrens 	uint32_t	zv_total_opens;	/* total open count */
12922ac5be4Sperrin 	zilog_t		*zv_zilog;	/* ZIL handle */
13088b7b0f2SMatthew Ahrens 	list_t		zv_extents;	/* List of extents for dump */
13179315247SMatthew Ahrens 	rangelock_t	zv_rangelock;
1328dfe5547SRichard Yao 	dnode_t		*zv_dn;		/* dnode hold */
133fa9e4066Sahrens } zvol_state_t;
134fa9e4066Sahrens 
135e7cbe64fSgw /*
136e7cbe64fSgw  * zvol specific flags
137e7cbe64fSgw  */
138e7cbe64fSgw #define	ZVOL_RDONLY	0x1
139e7cbe64fSgw #define	ZVOL_DUMPIFIED	0x2
140c7f714e2SEric Taylor #define	ZVOL_EXCL	0x4
141701f66c4SEric Taylor #define	ZVOL_WCE	0x8
142e7cbe64fSgw 
14367bd71c6Sperrin /*
14467bd71c6Sperrin  * zvol maximum transfer in one DMU tx.
14567bd71c6Sperrin  */
14667bd71c6Sperrin int zvol_maxphys = DMU_MAX_ACCESS/2;
14767bd71c6Sperrin 
148893c83baSGeorge Wilson /*
149893c83baSGeorge Wilson  * Toggle unmap functionality.
150893c83baSGeorge Wilson  */
151893c83baSGeorge Wilson boolean_t zvol_unmap_enabled = B_TRUE;
152893c83baSGeorge Wilson 
1531c9272b8SStephen Blinick /*
1541c9272b8SStephen Blinick  * If true, unmaps requested as synchronous are executed synchronously,
1551c9272b8SStephen Blinick  * otherwise all unmaps are asynchronous.
1561c9272b8SStephen Blinick  */
1571c9272b8SStephen Blinick boolean_t zvol_unmap_sync_enabled = B_FALSE;
1581c9272b8SStephen Blinick 
15992241e0bSTom Erickson extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
1604445fffbSMatthew Ahrens     nvlist_t *, nvlist_t *);
161681d9761SEric Taylor static int zvol_remove_zv(zvol_state_t *);
1621271e4b1SPrakash Surya static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
1631271e4b1SPrakash Surya     struct lwb *lwb, zio_t *zio);
164e7cbe64fSgw static int zvol_dumpify(zvol_state_t *zv);
165e7cbe64fSgw static int zvol_dump_fini(zvol_state_t *zv);
166e7cbe64fSgw static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
16767bd71c6Sperrin 
168fa9e4066Sahrens static void
169c61ea566SGeorge Wilson zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
170fa9e4066Sahrens {
171c61ea566SGeorge Wilson 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
172fa9e4066Sahrens 
173c61ea566SGeorge Wilson 	zv->zv_volsize = volsize;
174fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
175681d9761SEric Taylor 	    "Size", volsize) == DDI_SUCCESS);
176fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
177681d9761SEric Taylor 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
178e7cbe64fSgw 
179e7cbe64fSgw 	/* Notify specfs to invalidate the cached size */
180e7cbe64fSgw 	spec_size_invalidate(dev, VBLK);
181e7cbe64fSgw 	spec_size_invalidate(dev, VCHR);
182fa9e4066Sahrens }
183fa9e4066Sahrens 
184fa9e4066Sahrens int
185e9dbad6fSeschrock zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
186fa9e4066Sahrens {
187e9dbad6fSeschrock 	if (volsize == 0)
188be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
189fa9e4066Sahrens 
190e9dbad6fSeschrock 	if (volsize % blocksize != 0)
191be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1925c5460e9Seschrock 
193fa9e4066Sahrens #ifdef _ILP32
194e9dbad6fSeschrock 	if (volsize - 1 > SPEC_MAXOFFSET_T)
195be6fd75aSMatthew Ahrens 		return (SET_ERROR(EOVERFLOW));
196fa9e4066Sahrens #endif
197fa9e4066Sahrens 	return (0);
198fa9e4066Sahrens }
199fa9e4066Sahrens 
200fa9e4066Sahrens int
201e9dbad6fSeschrock zvol_check_volblocksize(uint64_t volblocksize)
202fa9e4066Sahrens {
203e9dbad6fSeschrock 	if (volblocksize < SPA_MINBLOCKSIZE ||
204b5152584SMatthew Ahrens 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
205e9dbad6fSeschrock 	    !ISP2(volblocksize))
206be6fd75aSMatthew Ahrens 		return (SET_ERROR(EDOM));
207fa9e4066Sahrens 
208fa9e4066Sahrens 	return (0);
209fa9e4066Sahrens }
210fa9e4066Sahrens 
211fa9e4066Sahrens int
212a2eea2e1Sahrens zvol_get_stats(objset_t *os, nvlist_t *nv)
213fa9e4066Sahrens {
214fa9e4066Sahrens 	int error;
215fa9e4066Sahrens 	dmu_object_info_t doi;
216a2eea2e1Sahrens 	uint64_t val;
217fa9e4066Sahrens 
218a2eea2e1Sahrens 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
219fa9e4066Sahrens 	if (error)
220fa9e4066Sahrens 		return (error);
221fa9e4066Sahrens 
222a2eea2e1Sahrens 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
223a2eea2e1Sahrens 
224fa9e4066Sahrens 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
225fa9e4066Sahrens 
226a2eea2e1Sahrens 	if (error == 0) {
227a2eea2e1Sahrens 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
228a2eea2e1Sahrens 		    doi.doi_data_block_size);
229a2eea2e1Sahrens 	}
230fa9e4066Sahrens 
231fa9e4066Sahrens 	return (error);
232fa9e4066Sahrens }
233fa9e4066Sahrens 
234fa9e4066Sahrens static zvol_state_t *
235e9dbad6fSeschrock zvol_minor_lookup(const char *name)
236fa9e4066Sahrens {
237fa9e4066Sahrens 	minor_t minor;
238fa9e4066Sahrens 	zvol_state_t *zv;
239fa9e4066Sahrens 
240c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
241fa9e4066Sahrens 
242c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
243c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
244fa9e4066Sahrens 		if (zv == NULL)
245fa9e4066Sahrens 			continue;
246fa9e4066Sahrens 		if (strcmp(zv->zv_name, name) == 0)
247f80ce222SChris Kirby 			return (zv);
248fa9e4066Sahrens 	}
249fa9e4066Sahrens 
250f80ce222SChris Kirby 	return (NULL);
251fa9e4066Sahrens }
252fa9e4066Sahrens 
253e7cbe64fSgw /* extent mapping arg */
254e7cbe64fSgw struct maparg {
25588b7b0f2SMatthew Ahrens 	zvol_state_t	*ma_zv;
25688b7b0f2SMatthew Ahrens 	uint64_t	ma_blks;
257e7cbe64fSgw };
258e7cbe64fSgw 
259e7cbe64fSgw /*ARGSUSED*/
260e7cbe64fSgw static int
2611b912ec7SGeorge Wilson zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2627802d7bfSMatthew Ahrens     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
263e7cbe64fSgw {
26488b7b0f2SMatthew Ahrens 	struct maparg *ma = arg;
26588b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
26688b7b0f2SMatthew Ahrens 	int bs = ma->ma_zv->zv_volblocksize;
267e7cbe64fSgw 
268a2cdcdd2SPaul Dagnelie 	if (bp == NULL || BP_IS_HOLE(bp) ||
26943466aaeSMax Grossman 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
27088b7b0f2SMatthew Ahrens 		return (0);
271e7cbe64fSgw 
2725d7b4d43SMatthew Ahrens 	VERIFY(!BP_IS_EMBEDDED(bp));
2735d7b4d43SMatthew Ahrens 
27488b7b0f2SMatthew Ahrens 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
27588b7b0f2SMatthew Ahrens 	ma->ma_blks++;
276e7cbe64fSgw 
27788b7b0f2SMatthew Ahrens 	/* Abort immediately if we have encountered gang blocks */
27888b7b0f2SMatthew Ahrens 	if (BP_IS_GANG(bp))
279be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFRAGS));
280e7cbe64fSgw 
28188b7b0f2SMatthew Ahrens 	/*
28288b7b0f2SMatthew Ahrens 	 * See if the block is at the end of the previous extent.
28388b7b0f2SMatthew Ahrens 	 */
28488b7b0f2SMatthew Ahrens 	ze = list_tail(&ma->ma_zv->zv_extents);
28588b7b0f2SMatthew Ahrens 	if (ze &&
28688b7b0f2SMatthew Ahrens 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
28788b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
28888b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
28988b7b0f2SMatthew Ahrens 		ze->ze_nblks++;
29088b7b0f2SMatthew Ahrens 		return (0);
291e7cbe64fSgw 	}
292e7cbe64fSgw 
29388b7b0f2SMatthew Ahrens 	dprintf_bp(bp, "%s", "next blkptr:");
294e7cbe64fSgw 
29588b7b0f2SMatthew Ahrens 	/* start a new extent */
29688b7b0f2SMatthew Ahrens 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
29788b7b0f2SMatthew Ahrens 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
29888b7b0f2SMatthew Ahrens 	ze->ze_nblks = 1;
29988b7b0f2SMatthew Ahrens 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
30088b7b0f2SMatthew Ahrens 	return (0);
30188b7b0f2SMatthew Ahrens }
302e7cbe64fSgw 
30388b7b0f2SMatthew Ahrens static void
30488b7b0f2SMatthew Ahrens zvol_free_extents(zvol_state_t *zv)
30588b7b0f2SMatthew Ahrens {
30688b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
307e7cbe64fSgw 
30888b7b0f2SMatthew Ahrens 	while (ze = list_head(&zv->zv_extents)) {
30988b7b0f2SMatthew Ahrens 		list_remove(&zv->zv_extents, ze);
31088b7b0f2SMatthew Ahrens 		kmem_free(ze, sizeof (zvol_extent_t));
311e7cbe64fSgw 	}
31288b7b0f2SMatthew Ahrens }
313e7cbe64fSgw 
31488b7b0f2SMatthew Ahrens static int
31588b7b0f2SMatthew Ahrens zvol_get_lbas(zvol_state_t *zv)
31688b7b0f2SMatthew Ahrens {
3173adc9019SEric Taylor 	objset_t *os = zv->zv_objset;
31888b7b0f2SMatthew Ahrens 	struct maparg	ma;
31988b7b0f2SMatthew Ahrens 	int		err;
32088b7b0f2SMatthew Ahrens 
32188b7b0f2SMatthew Ahrens 	ma.ma_zv = zv;
32288b7b0f2SMatthew Ahrens 	ma.ma_blks = 0;
32388b7b0f2SMatthew Ahrens 	zvol_free_extents(zv);
32488b7b0f2SMatthew Ahrens 
3253adc9019SEric Taylor 	/* commit any in-flight changes before traversing the dataset */
3263adc9019SEric Taylor 	txg_wait_synced(dmu_objset_pool(os), 0);
3273adc9019SEric Taylor 	err = traverse_dataset(dmu_objset_ds(os), 0,
32888b7b0f2SMatthew Ahrens 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
32988b7b0f2SMatthew Ahrens 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
33088b7b0f2SMatthew Ahrens 		zvol_free_extents(zv);
33188b7b0f2SMatthew Ahrens 		return (err ? err : EIO);
332e7cbe64fSgw 	}
33388b7b0f2SMatthew Ahrens 
334e7cbe64fSgw 	return (0);
335e7cbe64fSgw }
336e7cbe64fSgw 
337ecd6cf80Smarks /* ARGSUSED */
338fa9e4066Sahrens void
339ecd6cf80Smarks zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
340fa9e4066Sahrens {
341da6c28aaSamw 	zfs_creat_t *zct = arg;
342da6c28aaSamw 	nvlist_t *nvprops = zct->zct_props;
343fa9e4066Sahrens 	int error;
344e9dbad6fSeschrock 	uint64_t volblocksize, volsize;
345fa9e4066Sahrens 
346ecd6cf80Smarks 	VERIFY(nvlist_lookup_uint64(nvprops,
347e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
348ecd6cf80Smarks 	if (nvlist_lookup_uint64(nvprops,
349e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
350e9dbad6fSeschrock 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
351e9dbad6fSeschrock 
352e9dbad6fSeschrock 	/*
353e7cbe64fSgw 	 * These properties must be removed from the list so the generic
354e9dbad6fSeschrock 	 * property setting step won't apply to them.
355e9dbad6fSeschrock 	 */
356ecd6cf80Smarks 	VERIFY(nvlist_remove_all(nvprops,
357e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
358ecd6cf80Smarks 	(void) nvlist_remove_all(nvprops,
359e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
360e9dbad6fSeschrock 
361e9dbad6fSeschrock 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
362fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
363fa9e4066Sahrens 	ASSERT(error == 0);
364fa9e4066Sahrens 
365fa9e4066Sahrens 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
366fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
367fa9e4066Sahrens 	ASSERT(error == 0);
368fa9e4066Sahrens 
369e9dbad6fSeschrock 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
370fa9e4066Sahrens 	ASSERT(error == 0);
371fa9e4066Sahrens }
372fa9e4066Sahrens 
373b77b9231SDan McDonald /*
374b77b9231SDan McDonald  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
375b77b9231SDan McDonald  * implement DKIOCFREE/free-long-range.
376b77b9231SDan McDonald  */
377b77b9231SDan McDonald static int
3783f7978d0SAlan Somers zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
379b77b9231SDan McDonald {
3803f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
3813f7978d0SAlan Somers 	lr_truncate_t *lr = arg2;
382b77b9231SDan McDonald 	uint64_t offset, length;
383b77b9231SDan McDonald 
384b77b9231SDan McDonald 	if (byteswap)
385b77b9231SDan McDonald 		byteswap_uint64_array(lr, sizeof (*lr));
386b77b9231SDan McDonald 
387b77b9231SDan McDonald 	offset = lr->lr_offset;
388b77b9231SDan McDonald 	length = lr->lr_length;
389b77b9231SDan McDonald 
390b77b9231SDan McDonald 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
391b77b9231SDan McDonald }
392b77b9231SDan McDonald 
39322ac5be4Sperrin /*
39422ac5be4Sperrin  * Replay a TX_WRITE ZIL transaction that didn't get committed
39522ac5be4Sperrin  * after a system failure
39622ac5be4Sperrin  */
397eb633035STom Caputi /* ARGSUSED */
39822ac5be4Sperrin static int
3993f7978d0SAlan Somers zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
40022ac5be4Sperrin {
4013f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
4023f7978d0SAlan Somers 	lr_write_t *lr = arg2;
40322ac5be4Sperrin 	objset_t *os = zv->zv_objset;
40422ac5be4Sperrin 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
405b24ab676SJeff Bonwick 	uint64_t offset, length;
40622ac5be4Sperrin 	dmu_tx_t *tx;
40722ac5be4Sperrin 	int error;
40822ac5be4Sperrin 
40922ac5be4Sperrin 	if (byteswap)
41022ac5be4Sperrin 		byteswap_uint64_array(lr, sizeof (*lr));
41122ac5be4Sperrin 
412b24ab676SJeff Bonwick 	offset = lr->lr_offset;
413b24ab676SJeff Bonwick 	length = lr->lr_length;
414b24ab676SJeff Bonwick 
415b24ab676SJeff Bonwick 	/* If it's a dmu_sync() block, write the whole block */
416b24ab676SJeff Bonwick 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
417b24ab676SJeff Bonwick 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
418b24ab676SJeff Bonwick 		if (length < blocksize) {
419b24ab676SJeff Bonwick 			offset -= offset % blocksize;
420b24ab676SJeff Bonwick 			length = blocksize;
421b24ab676SJeff Bonwick 		}
422b24ab676SJeff Bonwick 	}
423975c32a0SNeil Perrin 
42422ac5be4Sperrin 	tx = dmu_tx_create(os);
425b24ab676SJeff Bonwick 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
4261209a471SNeil Perrin 	error = dmu_tx_assign(tx, TXG_WAIT);
42722ac5be4Sperrin 	if (error) {
42822ac5be4Sperrin 		dmu_tx_abort(tx);
42922ac5be4Sperrin 	} else {
430b24ab676SJeff Bonwick 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
43122ac5be4Sperrin 		dmu_tx_commit(tx);
43222ac5be4Sperrin 	}
43322ac5be4Sperrin 
43422ac5be4Sperrin 	return (error);
43522ac5be4Sperrin }
43622ac5be4Sperrin 
43722ac5be4Sperrin /* ARGSUSED */
43822ac5be4Sperrin static int
4393f7978d0SAlan Somers zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
44022ac5be4Sperrin {
441be6fd75aSMatthew Ahrens 	return (SET_ERROR(ENOTSUP));
44222ac5be4Sperrin }
44322ac5be4Sperrin 
44422ac5be4Sperrin /*
44522ac5be4Sperrin  * Callback vectors for replaying records.
446b77b9231SDan McDonald  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
44722ac5be4Sperrin  */
44822ac5be4Sperrin zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
44922ac5be4Sperrin 	zvol_replay_err,	/* 0 no such transaction type */
45022ac5be4Sperrin 	zvol_replay_err,	/* TX_CREATE */
45122ac5be4Sperrin 	zvol_replay_err,	/* TX_MKDIR */
45222ac5be4Sperrin 	zvol_replay_err,	/* TX_MKXATTR */
45322ac5be4Sperrin 	zvol_replay_err,	/* TX_SYMLINK */
45422ac5be4Sperrin 	zvol_replay_err,	/* TX_REMOVE */
45522ac5be4Sperrin 	zvol_replay_err,	/* TX_RMDIR */
45622ac5be4Sperrin 	zvol_replay_err,	/* TX_LINK */
45722ac5be4Sperrin 	zvol_replay_err,	/* TX_RENAME */
45822ac5be4Sperrin 	zvol_replay_write,	/* TX_WRITE */
459b77b9231SDan McDonald 	zvol_replay_truncate,	/* TX_TRUNCATE */
46022ac5be4Sperrin 	zvol_replay_err,	/* TX_SETATTR */
46122ac5be4Sperrin 	zvol_replay_err,	/* TX_ACL */
462975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL */
463975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ATTR */
464975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
465975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL */
466975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ATTR */
467975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
468975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_WRITE2 */
46922ac5be4Sperrin };
47022ac5be4Sperrin 
471681d9761SEric Taylor int
472681d9761SEric Taylor zvol_name2minor(const char *name, minor_t *minor)
473681d9761SEric Taylor {
474681d9761SEric Taylor 	zvol_state_t *zv;
475681d9761SEric Taylor 
476c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
477681d9761SEric Taylor 	zv = zvol_minor_lookup(name);
478681d9761SEric Taylor 	if (minor && zv)
479681d9761SEric Taylor 		*minor = zv->zv_minor;
480c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
481681d9761SEric Taylor 	return (zv ? 0 : -1);
482681d9761SEric Taylor }
483681d9761SEric Taylor 
484e7cbe64fSgw /*
485e7cbe64fSgw  * Create a minor node (plus a whole lot more) for the specified volume.
486fa9e4066Sahrens  */
487fa9e4066Sahrens int
488681d9761SEric Taylor zvol_create_minor(const char *name)
489fa9e4066Sahrens {
490c99e4bdcSChris Kirby 	zfs_soft_state_t *zs;
491fa9e4066Sahrens 	zvol_state_t *zv;
492fa9e4066Sahrens 	objset_t *os;
49367bd71c6Sperrin 	dmu_object_info_t doi;
494fa9e4066Sahrens 	minor_t minor = 0;
495fa9e4066Sahrens 	char chrbuf[30], blkbuf[30];
496fa9e4066Sahrens 	int error;
497fa9e4066Sahrens 
498c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
499fa9e4066Sahrens 
5001195e687SMark J Musante 	if (zvol_minor_lookup(name) != NULL) {
501c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
502be6fd75aSMatthew Ahrens 		return (SET_ERROR(EEXIST));
503fa9e4066Sahrens 	}
504fa9e4066Sahrens 
505503ad85cSMatthew Ahrens 	/* lie and say we're read-only */
506eb633035STom Caputi 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
507fa9e4066Sahrens 
508fa9e4066Sahrens 	if (error) {
509c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
510fa9e4066Sahrens 		return (error);
511fa9e4066Sahrens 	}
512fa9e4066Sahrens 
513c99e4bdcSChris Kirby 	if ((minor = zfsdev_minor_alloc()) == 0) {
514eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
515c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
516be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
517fa9e4066Sahrens 	}
518fa9e4066Sahrens 
519c99e4bdcSChris Kirby 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
520eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
521c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
522be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
523fa9e4066Sahrens 	}
524e9dbad6fSeschrock 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
525e9dbad6fSeschrock 	    (char *)name);
526fa9e4066Sahrens 
527681d9761SEric Taylor 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
528fa9e4066Sahrens 
529fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
530fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
531c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
532eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
533c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
534be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
535fa9e4066Sahrens 	}
536fa9e4066Sahrens 
537681d9761SEric Taylor 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
538fa9e4066Sahrens 
539fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
540fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
541fa9e4066Sahrens 		ddi_remove_minor_node(zfs_dip, chrbuf);
542c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
543eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
544c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
545be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
546fa9e4066Sahrens 	}
547fa9e4066Sahrens 
548c99e4bdcSChris Kirby 	zs = ddi_get_soft_state(zfsdev_state, minor);
549c99e4bdcSChris Kirby 	zs->zss_type = ZSST_ZVOL;
550c99e4bdcSChris Kirby 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
551681d9761SEric Taylor 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
552fa9e4066Sahrens 	zv->zv_min_bs = DEV_BSHIFT;
553fa9e4066Sahrens 	zv->zv_minor = minor;
554fa9e4066Sahrens 	zv->zv_objset = os;
555f9af39baSGeorge Wilson 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
556681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
55779315247SMatthew Ahrens 	rangelock_init(&zv->zv_rangelock, NULL, NULL);
55888b7b0f2SMatthew Ahrens 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
55988b7b0f2SMatthew Ahrens 	    offsetof(zvol_extent_t, ze_node));
56067bd71c6Sperrin 	/* get and cache the blocksize */
56167bd71c6Sperrin 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
56267bd71c6Sperrin 	ASSERT(error == 0);
56367bd71c6Sperrin 	zv->zv_volblocksize = doi.doi_data_block_size;
56422ac5be4Sperrin 
565f9af39baSGeorge Wilson 	if (spa_writeable(dmu_objset_spa(os))) {
566f9af39baSGeorge Wilson 		if (zil_replay_disable)
567f9af39baSGeorge Wilson 			zil_destroy(dmu_objset_zil(os), B_FALSE);
568f9af39baSGeorge Wilson 		else
569f9af39baSGeorge Wilson 			zil_replay(os, zv, zvol_replay_vector);
570f9af39baSGeorge Wilson 	}
571eb633035STom Caputi 	dmu_objset_disown(os, 1, FTAG);
572681d9761SEric Taylor 	zv->zv_objset = NULL;
573fa9e4066Sahrens 
574fa9e4066Sahrens 	zvol_minors++;
575fa9e4066Sahrens 
576c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
577fa9e4066Sahrens 
578fa9e4066Sahrens 	return (0);
579fa9e4066Sahrens }
580fa9e4066Sahrens 
581fa9e4066Sahrens /*
582fa9e4066Sahrens  * Remove minor node for the specified volume.
583fa9e4066Sahrens  */
584681d9761SEric Taylor static int
585681d9761SEric Taylor zvol_remove_zv(zvol_state_t *zv)
586681d9761SEric Taylor {
587681d9761SEric Taylor 	char nmbuf[20];
588c99e4bdcSChris Kirby 	minor_t minor = zv->zv_minor;
589681d9761SEric Taylor 
590c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
591681d9761SEric Taylor 	if (zv->zv_total_opens != 0)
592be6fd75aSMatthew Ahrens 		return (SET_ERROR(EBUSY));
593681d9761SEric Taylor 
594c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
595681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
596681d9761SEric Taylor 
597c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
598681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
599681d9761SEric Taylor 
60079315247SMatthew Ahrens 	rangelock_fini(&zv->zv_rangelock);
601681d9761SEric Taylor 
602c99e4bdcSChris Kirby 	kmem_free(zv, sizeof (zvol_state_t));
603c99e4bdcSChris Kirby 
604c99e4bdcSChris Kirby 	ddi_soft_state_free(zfsdev_state, minor);
605681d9761SEric Taylor 
606681d9761SEric Taylor 	zvol_minors--;
607681d9761SEric Taylor 	return (0);
608681d9761SEric Taylor }
609681d9761SEric Taylor 
610fa9e4066Sahrens int
611e9dbad6fSeschrock zvol_remove_minor(const char *name)
612fa9e4066Sahrens {
613fa9e4066Sahrens 	zvol_state_t *zv;
614681d9761SEric Taylor 	int rc;
615fa9e4066Sahrens 
616c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
617e9dbad6fSeschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
618c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
619be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
620fa9e4066Sahrens 	}
621681d9761SEric Taylor 	rc = zvol_remove_zv(zv);
622c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
623681d9761SEric Taylor 	return (rc);
624681d9761SEric Taylor }
625fa9e4066Sahrens 
626681d9761SEric Taylor int
6278bf394f1STom Caputi zvol_first_open(zvol_state_t *zv, boolean_t rdonly)
628681d9761SEric Taylor {
629681d9761SEric Taylor 	objset_t *os;
630681d9761SEric Taylor 	uint64_t volsize;
631681d9761SEric Taylor 	int error;
632681d9761SEric Taylor 	uint64_t readonly;
6338bf394f1STom Caputi 	boolean_t ro;
634fa9e4066Sahrens 
6358bf394f1STom Caputi 	ro = (rdonly || (strchr(zv->zv_name, '@') != NULL));
6368bf394f1STom Caputi 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
637681d9761SEric Taylor 	if (error)
638681d9761SEric Taylor 		return (error);
639fa9e4066Sahrens 
640c61ea566SGeorge Wilson 	zv->zv_objset = os;
641681d9761SEric Taylor 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
642681d9761SEric Taylor 	if (error) {
643681d9761SEric Taylor 		ASSERT(error == 0);
644262af05aSJerry Jelinek 		dmu_objset_disown(os, 1, zv);
645681d9761SEric Taylor 		return (error);
646681d9761SEric Taylor 	}
647c61ea566SGeorge Wilson 
6488dfe5547SRichard Yao 	error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
64994d1a210STim Haley 	if (error) {
650262af05aSJerry Jelinek 		dmu_objset_disown(os, 1, zv);
65194d1a210STim Haley 		return (error);
65294d1a210STim Haley 	}
653c61ea566SGeorge Wilson 
654c61ea566SGeorge Wilson 	zvol_size_changed(zv, volsize);
655681d9761SEric Taylor 	zv->zv_zilog = zil_open(os, zvol_get_data);
656fa9e4066Sahrens 
657681d9761SEric Taylor 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
658681d9761SEric Taylor 	    NULL) == 0);
659f9af39baSGeorge Wilson 	if (readonly || dmu_objset_is_snapshot(os) ||
660f9af39baSGeorge Wilson 	    !spa_writeable(dmu_objset_spa(os)))
661681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
662681d9761SEric Taylor 	else
663681d9761SEric Taylor 		zv->zv_flags &= ~ZVOL_RDONLY;
664681d9761SEric Taylor 	return (error);
665681d9761SEric Taylor }
666fa9e4066Sahrens 
667681d9761SEric Taylor void
668681d9761SEric Taylor zvol_last_close(zvol_state_t *zv)
669681d9761SEric Taylor {
67022ac5be4Sperrin 	zil_close(zv->zv_zilog);
67122ac5be4Sperrin 	zv->zv_zilog = NULL;
6722e2c1355SMatthew Ahrens 
6738dfe5547SRichard Yao 	dnode_rele(zv->zv_dn, zvol_tag);
6748dfe5547SRichard Yao 	zv->zv_dn = NULL;
6752e2c1355SMatthew Ahrens 
6762e2c1355SMatthew Ahrens 	/*
6772e2c1355SMatthew Ahrens 	 * Evict cached data
6782e2c1355SMatthew Ahrens 	 */
6792e2c1355SMatthew Ahrens 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
6802e2c1355SMatthew Ahrens 	    !(zv->zv_flags & ZVOL_RDONLY))
6812e2c1355SMatthew Ahrens 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
6823b2aab18SMatthew Ahrens 	dmu_objset_evict_dbufs(zv->zv_objset);
6832e2c1355SMatthew Ahrens 
684262af05aSJerry Jelinek 	dmu_objset_disown(zv->zv_objset, 1, zv);
685fa9e4066Sahrens 	zv->zv_objset = NULL;
686fa9e4066Sahrens }
687fa9e4066Sahrens 
688e7cbe64fSgw int
689e7cbe64fSgw zvol_prealloc(zvol_state_t *zv)
690e7cbe64fSgw {
691e7cbe64fSgw 	objset_t *os = zv->zv_objset;
692e7cbe64fSgw 	dmu_tx_t *tx;
693e7cbe64fSgw 	uint64_t refd, avail, usedobjs, availobjs;
694e7cbe64fSgw 	uint64_t resid = zv->zv_volsize;
695e7cbe64fSgw 	uint64_t off = 0;
696e7cbe64fSgw 
697e7cbe64fSgw 	/* Check the space usage before attempting to allocate the space */
698e7cbe64fSgw 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
699e7cbe64fSgw 	if (avail < zv->zv_volsize)
700be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOSPC));
701e7cbe64fSgw 
702e7cbe64fSgw 	/* Free old extents if they exist */
703e7cbe64fSgw 	zvol_free_extents(zv);
704e7cbe64fSgw 
705e7cbe64fSgw 	while (resid != 0) {
706e7cbe64fSgw 		int error;
707b5152584SMatthew Ahrens 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
708e7cbe64fSgw 
709e7cbe64fSgw 		tx = dmu_tx_create(os);
710e7cbe64fSgw 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
711e7cbe64fSgw 		error = dmu_tx_assign(tx, TXG_WAIT);
712e7cbe64fSgw 		if (error) {
713e7cbe64fSgw 			dmu_tx_abort(tx);
714cdb0ab79Smaybee 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
715e7cbe64fSgw 			return (error);
716e7cbe64fSgw 		}
71782c9918fSTim Haley 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
718e7cbe64fSgw 		dmu_tx_commit(tx);
719e7cbe64fSgw 		off += bytes;
720e7cbe64fSgw 		resid -= bytes;
721e7cbe64fSgw 	}
722e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
723e7cbe64fSgw 
724e7cbe64fSgw 	return (0);
725e7cbe64fSgw }
726e7cbe64fSgw 
7273b2aab18SMatthew Ahrens static int
728681d9761SEric Taylor zvol_update_volsize(objset_t *os, uint64_t volsize)
729e7cbe64fSgw {
730e7cbe64fSgw 	dmu_tx_t *tx;
731e7cbe64fSgw 	int error;
732eb633035STom Caputi 	uint64_t txg;
733e7cbe64fSgw 
734c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
735e7cbe64fSgw 
736681d9761SEric Taylor 	tx = dmu_tx_create(os);
737e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
7384bb73804SMatthew Ahrens 	dmu_tx_mark_netfree(tx);
739e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
740e7cbe64fSgw 	if (error) {
741e7cbe64fSgw 		dmu_tx_abort(tx);
742e7cbe64fSgw 		return (error);
743e7cbe64fSgw 	}
744eb633035STom Caputi 	txg = dmu_tx_get_txg(tx);
745e7cbe64fSgw 
746681d9761SEric Taylor 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
747e7cbe64fSgw 	    &volsize, tx);
748e7cbe64fSgw 	dmu_tx_commit(tx);
749e7cbe64fSgw 
750eb633035STom Caputi 	txg_wait_synced(dmu_objset_pool(os), txg);
751eb633035STom Caputi 
752e7cbe64fSgw 	if (error == 0)
753681d9761SEric Taylor 		error = dmu_free_long_range(os,
754cdb0ab79Smaybee 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
755681d9761SEric Taylor 	return (error);
756681d9761SEric Taylor }
757e7cbe64fSgw 
758681d9761SEric Taylor void
759681d9761SEric Taylor zvol_remove_minors(const char *name)
760681d9761SEric Taylor {
761681d9761SEric Taylor 	zvol_state_t *zv;
762681d9761SEric Taylor 	char *namebuf;
763681d9761SEric Taylor 	minor_t minor;
764681d9761SEric Taylor 
765681d9761SEric Taylor 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
766681d9761SEric Taylor 	(void) strncpy(namebuf, name, strlen(name));
767681d9761SEric Taylor 	(void) strcat(namebuf, "/");
768c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
769c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
770681d9761SEric Taylor 
771c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
772681d9761SEric Taylor 		if (zv == NULL)
773681d9761SEric Taylor 			continue;
774681d9761SEric Taylor 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
775681d9761SEric Taylor 			(void) zvol_remove_zv(zv);
776e7cbe64fSgw 	}
777681d9761SEric Taylor 	kmem_free(namebuf, strlen(name) + 2);
778681d9761SEric Taylor 
779c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
780e7cbe64fSgw }
781e7cbe64fSgw 
782c61ea566SGeorge Wilson static int
7833b2aab18SMatthew Ahrens zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
784fa9e4066Sahrens {
785e7cbe64fSgw 	uint64_t old_volsize = 0ULL;
7863b2aab18SMatthew Ahrens 	int error = 0;
787fa9e4066Sahrens 
788c61ea566SGeorge Wilson 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
789c61ea566SGeorge Wilson 
790e7cbe64fSgw 	/*
791e7cbe64fSgw 	 * Reinitialize the dump area to the new size. If we
792681d9761SEric Taylor 	 * failed to resize the dump area then restore it back to
793c61ea566SGeorge Wilson 	 * its original size.  We must set the new volsize prior
794c61ea566SGeorge Wilson 	 * to calling dumpvp_resize() to ensure that the devices'
795c61ea566SGeorge Wilson 	 * size(9P) is not visible by the dump subsystem.
796e7cbe64fSgw 	 */
7973b2aab18SMatthew Ahrens 	old_volsize = zv->zv_volsize;
7983b2aab18SMatthew Ahrens 	zvol_size_changed(zv, volsize);
7993b2aab18SMatthew Ahrens 
8003b2aab18SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
8013b2aab18SMatthew Ahrens 		if ((error = zvol_dumpify(zv)) != 0 ||
8023b2aab18SMatthew Ahrens 		    (error = dumpvp_resize()) != 0) {
8033b2aab18SMatthew Ahrens 			int dumpify_error;
8043b2aab18SMatthew Ahrens 
8053b2aab18SMatthew Ahrens 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
8063b2aab18SMatthew Ahrens 			zvol_size_changed(zv, old_volsize);
8073b2aab18SMatthew Ahrens 			dumpify_error = zvol_dumpify(zv);
8083b2aab18SMatthew Ahrens 			error = dumpify_error ? dumpify_error : error;
809681d9761SEric Taylor 		}
810fa9e4066Sahrens 	}
811fa9e4066Sahrens 
812573ca77eSGeorge Wilson 	/*
813573ca77eSGeorge Wilson 	 * Generate a LUN expansion event.
814573ca77eSGeorge Wilson 	 */
8153b2aab18SMatthew Ahrens 	if (error == 0) {
816573ca77eSGeorge Wilson 		sysevent_id_t eid;
817573ca77eSGeorge Wilson 		nvlist_t *attr;
818573ca77eSGeorge Wilson 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
819573ca77eSGeorge Wilson 
820681d9761SEric Taylor 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
821573ca77eSGeorge Wilson 		    zv->zv_minor);
822573ca77eSGeorge Wilson 
823573ca77eSGeorge Wilson 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
824573ca77eSGeorge Wilson 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
825573ca77eSGeorge Wilson 
826573ca77eSGeorge Wilson 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
827573ca77eSGeorge Wilson 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
828573ca77eSGeorge Wilson 
829573ca77eSGeorge Wilson 		nvlist_free(attr);
830573ca77eSGeorge Wilson 		kmem_free(physpath, MAXPATHLEN);
831573ca77eSGeorge Wilson 	}
832c61ea566SGeorge Wilson 	return (error);
833c61ea566SGeorge Wilson }
834573ca77eSGeorge Wilson 
835c61ea566SGeorge Wilson int
836c61ea566SGeorge Wilson zvol_set_volsize(const char *name, uint64_t volsize)
837c61ea566SGeorge Wilson {
838c61ea566SGeorge Wilson 	zvol_state_t *zv = NULL;
839c61ea566SGeorge Wilson 	objset_t *os;
840c61ea566SGeorge Wilson 	int error;
841c61ea566SGeorge Wilson 	dmu_object_info_t doi;
842c61ea566SGeorge Wilson 	uint64_t readonly;
8433b2aab18SMatthew Ahrens 	boolean_t owned = B_FALSE;
8443b2aab18SMatthew Ahrens 
8453b2aab18SMatthew Ahrens 	error = dsl_prop_get_integer(name,
8463b2aab18SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
8473b2aab18SMatthew Ahrens 	if (error != 0)
8483b2aab18SMatthew Ahrens 		return (error);
8493b2aab18SMatthew Ahrens 	if (readonly)
850be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
851c61ea566SGeorge Wilson 
852c61ea566SGeorge Wilson 	mutex_enter(&zfsdev_state_lock);
853c61ea566SGeorge Wilson 	zv = zvol_minor_lookup(name);
8543b2aab18SMatthew Ahrens 
8553b2aab18SMatthew Ahrens 	if (zv == NULL || zv->zv_objset == NULL) {
856eb633035STom Caputi 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
8573b2aab18SMatthew Ahrens 		    FTAG, &os)) != 0) {
8583b2aab18SMatthew Ahrens 			mutex_exit(&zfsdev_state_lock);
8593b2aab18SMatthew Ahrens 			return (error);
8603b2aab18SMatthew Ahrens 		}
8613b2aab18SMatthew Ahrens 		owned = B_TRUE;
8623b2aab18SMatthew Ahrens 		if (zv != NULL)
8633b2aab18SMatthew Ahrens 			zv->zv_objset = os;
8643b2aab18SMatthew Ahrens 	} else {
8653b2aab18SMatthew Ahrens 		os = zv->zv_objset;
866c61ea566SGeorge Wilson 	}
867c61ea566SGeorge Wilson 
868c61ea566SGeorge Wilson 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
8693b2aab18SMatthew Ahrens 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
870c61ea566SGeorge Wilson 		goto out;
871c61ea566SGeorge Wilson 
8723b2aab18SMatthew Ahrens 	error = zvol_update_volsize(os, volsize);
873c61ea566SGeorge Wilson 
8743b2aab18SMatthew Ahrens 	if (error == 0 && zv != NULL)
8753b2aab18SMatthew Ahrens 		error = zvol_update_live_volsize(zv, volsize);
876bb0ade09Sahrens out:
8773b2aab18SMatthew Ahrens 	if (owned) {
878eb633035STom Caputi 		dmu_objset_disown(os, B_TRUE, FTAG);
8793b2aab18SMatthew Ahrens 		if (zv != NULL)
8803b2aab18SMatthew Ahrens 			zv->zv_objset = NULL;
8813b2aab18SMatthew Ahrens 	}
882c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
883fa9e4066Sahrens 	return (error);
884fa9e4066Sahrens }
885fa9e4066Sahrens 
886fa9e4066Sahrens /*ARGSUSED*/
887fa9e4066Sahrens int
888fa9e4066Sahrens zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
889fa9e4066Sahrens {
890fa9e4066Sahrens 	zvol_state_t *zv;
891681d9761SEric Taylor 	int err = 0;
892fa9e4066Sahrens 
893c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
894fa9e4066Sahrens 
895c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
896fa9e4066Sahrens 	if (zv == NULL) {
897c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
898be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
899fa9e4066Sahrens 	}
900fa9e4066Sahrens 
901681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
9028bf394f1STom Caputi 		err = zvol_first_open(zv, !(flag & FWRITE));
903681d9761SEric Taylor 	if (err) {
904c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
905681d9761SEric Taylor 		return (err);
906681d9761SEric Taylor 	}
9078bf394f1STom Caputi 
9088bf394f1STom Caputi 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
909be6fd75aSMatthew Ahrens 		err = SET_ERROR(EROFS);
910681d9761SEric Taylor 		goto out;
911fa9e4066Sahrens 	}
912c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
913be6fd75aSMatthew Ahrens 		err = SET_ERROR(EBUSY);
914681d9761SEric Taylor 		goto out;
915c7f714e2SEric Taylor 	}
916c7f714e2SEric Taylor 	if (flag & FEXCL) {
917c7f714e2SEric Taylor 		if (zv->zv_total_opens != 0) {
918be6fd75aSMatthew Ahrens 			err = SET_ERROR(EBUSY);
919681d9761SEric Taylor 			goto out;
920c7f714e2SEric Taylor 		}
921c7f714e2SEric Taylor 		zv->zv_flags |= ZVOL_EXCL;
922c7f714e2SEric Taylor 	}
923fa9e4066Sahrens 
924fa9e4066Sahrens 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
925fa9e4066Sahrens 		zv->zv_open_count[otyp]++;
926fa9e4066Sahrens 		zv->zv_total_opens++;
927fa9e4066Sahrens 	}
928c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
929fa9e4066Sahrens 
930681d9761SEric Taylor 	return (err);
931681d9761SEric Taylor out:
932681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
933681d9761SEric Taylor 		zvol_last_close(zv);
934c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
935681d9761SEric Taylor 	return (err);
936fa9e4066Sahrens }
937fa9e4066Sahrens 
938fa9e4066Sahrens /*ARGSUSED*/
939fa9e4066Sahrens int
940fa9e4066Sahrens zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
941fa9e4066Sahrens {
942fa9e4066Sahrens 	minor_t minor = getminor(dev);
943fa9e4066Sahrens 	zvol_state_t *zv;
944681d9761SEric Taylor 	int error = 0;
945fa9e4066Sahrens 
946c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
947fa9e4066Sahrens 
948c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
949fa9e4066Sahrens 	if (zv == NULL) {
950c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
951be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
952fa9e4066Sahrens 	}
953fa9e4066Sahrens 
954c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
955c7f714e2SEric Taylor 		ASSERT(zv->zv_total_opens == 1);
956c7f714e2SEric Taylor 		zv->zv_flags &= ~ZVOL_EXCL;
957fa9e4066Sahrens 	}
958fa9e4066Sahrens 
959fa9e4066Sahrens 	/*
960fa9e4066Sahrens 	 * If the open count is zero, this is a spurious close.
961fa9e4066Sahrens 	 * That indicates a bug in the kernel / DDI framework.
962fa9e4066Sahrens 	 */
963fa9e4066Sahrens 	ASSERT(zv->zv_open_count[otyp] != 0);
964fa9e4066Sahrens 	ASSERT(zv->zv_total_opens != 0);
965fa9e4066Sahrens 
966fa9e4066Sahrens 	/*
967fa9e4066Sahrens 	 * You may get multiple opens, but only one close.
968fa9e4066Sahrens 	 */
969fa9e4066Sahrens 	zv->zv_open_count[otyp]--;
970fa9e4066Sahrens 	zv->zv_total_opens--;
971fa9e4066Sahrens 
972681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
973681d9761SEric Taylor 		zvol_last_close(zv);
974fa9e4066Sahrens 
975c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
976681d9761SEric Taylor 	return (error);
977fa9e4066Sahrens }
978fa9e4066Sahrens 
979cab3a55eSPrakash Surya /* ARGSUSED */
980feb08c6bSbillm static void
981b24ab676SJeff Bonwick zvol_get_done(zgd_t *zgd, int error)
98267bd71c6Sperrin {
983b24ab676SJeff Bonwick 	if (zgd->zgd_db)
984b24ab676SJeff Bonwick 		dmu_buf_rele(zgd->zgd_db, zgd);
985b24ab676SJeff Bonwick 
98679315247SMatthew Ahrens 	rangelock_exit(zgd->zgd_lr);
987b24ab676SJeff Bonwick 
98867bd71c6Sperrin 	kmem_free(zgd, sizeof (zgd_t));
98967bd71c6Sperrin }
99067bd71c6Sperrin 
99167bd71c6Sperrin /*
99267bd71c6Sperrin  * Get data to generate a TX_WRITE intent log record.
99367bd71c6Sperrin  */
994feb08c6bSbillm static int
9951271e4b1SPrakash Surya zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
99667bd71c6Sperrin {
99767bd71c6Sperrin 	zvol_state_t *zv = arg;
998b24ab676SJeff Bonwick 	uint64_t offset = lr->lr_offset;
999b24ab676SJeff Bonwick 	uint64_t size = lr->lr_length;	/* length of user data */
100067bd71c6Sperrin 	dmu_buf_t *db;
100167bd71c6Sperrin 	zgd_t *zgd;
100267bd71c6Sperrin 	int error;
100367bd71c6Sperrin 
10041271e4b1SPrakash Surya 	ASSERT3P(lwb, !=, NULL);
10051271e4b1SPrakash Surya 	ASSERT3P(zio, !=, NULL);
10061271e4b1SPrakash Surya 	ASSERT3U(size, !=, 0);
1007b24ab676SJeff Bonwick 
1008b24ab676SJeff Bonwick 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
10091271e4b1SPrakash Surya 	zgd->zgd_lwb = lwb;
1010feb08c6bSbillm 
1011c2e6a7d6Sperrin 	/*
1012c2e6a7d6Sperrin 	 * Write records come in two flavors: immediate and indirect.
1013c2e6a7d6Sperrin 	 * For small writes it's cheaper to store the data with the
1014c2e6a7d6Sperrin 	 * log record (immediate); for large writes it's cheaper to
1015c2e6a7d6Sperrin 	 * sync the data and get a pointer to it (indirect) so that
1016c2e6a7d6Sperrin 	 * we don't have to write the data twice.
1017c2e6a7d6Sperrin 	 */
101842b14111SLOLi 	if (buf != NULL) { /* immediate write */
101979315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
102042b14111SLOLi 		    RL_READER);
10218dfe5547SRichard Yao 		error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
1022b24ab676SJeff Bonwick 		    DMU_READ_NO_PREFETCH);
102342b14111SLOLi 	} else { /* indirect write */
102442b14111SLOLi 		/*
102542b14111SLOLi 		 * Have to lock the whole block to ensure when it's written out
102642b14111SLOLi 		 * and its checksum is being calculated that no one can change
102742b14111SLOLi 		 * the data. Contrarily to zfs_get_data we need not re-check
102842b14111SLOLi 		 * blocksize after we get the lock because it cannot be changed.
102942b14111SLOLi 		 */
1030b24ab676SJeff Bonwick 		size = zv->zv_volblocksize;
1031b24ab676SJeff Bonwick 		offset = P2ALIGN(offset, size);
103279315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
103342b14111SLOLi 		    RL_READER);
10348dfe5547SRichard Yao 		error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
103547cb52daSJeff Bonwick 		    DMU_READ_NO_PREFETCH);
1036b24ab676SJeff Bonwick 		if (error == 0) {
1037b7edcb94SMatthew Ahrens 			blkptr_t *bp = &lr->lr_blkptr;
103880901aeaSGeorge Wilson 
1039b24ab676SJeff Bonwick 			zgd->zgd_db = db;
1040b24ab676SJeff Bonwick 			zgd->zgd_bp = bp;
104167bd71c6Sperrin 
1042b24ab676SJeff Bonwick 			ASSERT(db->db_offset == offset);
1043b24ab676SJeff Bonwick 			ASSERT(db->db_size == size);
104467bd71c6Sperrin 
1045b24ab676SJeff Bonwick 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1046b24ab676SJeff Bonwick 			    zvol_get_done, zgd);
1047975c32a0SNeil Perrin 
1048b24ab676SJeff Bonwick 			if (error == 0)
1049b24ab676SJeff Bonwick 				return (0);
1050b24ab676SJeff Bonwick 		}
1051975c32a0SNeil Perrin 	}
1052975c32a0SNeil Perrin 
1053b24ab676SJeff Bonwick 	zvol_get_done(zgd, error);
1054b24ab676SJeff Bonwick 
105567bd71c6Sperrin 	return (error);
105667bd71c6Sperrin }
105767bd71c6Sperrin 
1058a24e15ceSperrin /*
1059a24e15ceSperrin  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
106022ac5be4Sperrin  *
106122ac5be4Sperrin  * We store data in the log buffers if it's small enough.
106267bd71c6Sperrin  * Otherwise we will later flush the data out via dmu_sync().
106322ac5be4Sperrin  */
106467bd71c6Sperrin ssize_t zvol_immediate_write_sz = 32768;
106522ac5be4Sperrin 
1066feb08c6bSbillm static void
1067510b6c0eSNeil Perrin zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1068510b6c0eSNeil Perrin     boolean_t sync)
106922ac5be4Sperrin {
1070feb08c6bSbillm 	uint32_t blocksize = zv->zv_volblocksize;
10711209a471SNeil Perrin 	zilog_t *zilog = zv->zv_zilog;
1072c5ee4681SAlexander Motin 	itx_wr_state_t write_state;
1073510b6c0eSNeil Perrin 
1074b24ab676SJeff Bonwick 	if (zil_replaying(zilog, tx))
10751209a471SNeil Perrin 		return;
10761209a471SNeil Perrin 
1077c5ee4681SAlexander Motin 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1078c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1079c5ee4681SAlexander Motin 	else if (!spa_has_slogs(zilog->zl_spa) &&
1080c5ee4681SAlexander Motin 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1081c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1082c5ee4681SAlexander Motin 	else if (sync)
1083c5ee4681SAlexander Motin 		write_state = WR_COPIED;
1084c5ee4681SAlexander Motin 	else
1085c5ee4681SAlexander Motin 		write_state = WR_NEED_COPY;
1086feb08c6bSbillm 
1087510b6c0eSNeil Perrin 	while (resid) {
1088510b6c0eSNeil Perrin 		itx_t *itx;
1089510b6c0eSNeil Perrin 		lr_write_t *lr;
1090c5ee4681SAlexander Motin 		itx_wr_state_t wr_state = write_state;
1091c5ee4681SAlexander Motin 		ssize_t len = resid;
1092c5ee4681SAlexander Motin 
1093c5ee4681SAlexander Motin 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1094c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1095c5ee4681SAlexander Motin 		else if (wr_state == WR_INDIRECT)
1096c5ee4681SAlexander Motin 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1097510b6c0eSNeil Perrin 
1098510b6c0eSNeil Perrin 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1099c5ee4681SAlexander Motin 		    (wr_state == WR_COPIED ? len : 0));
1100feb08c6bSbillm 		lr = (lr_write_t *)&itx->itx_lr;
11018dfe5547SRichard Yao 		if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
11028dfe5547SRichard Yao 		    off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1103b24ab676SJeff Bonwick 			zil_itx_destroy(itx);
1104510b6c0eSNeil Perrin 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1105510b6c0eSNeil Perrin 			lr = (lr_write_t *)&itx->itx_lr;
1106c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1107510b6c0eSNeil Perrin 		}
1108510b6c0eSNeil Perrin 
1109c5ee4681SAlexander Motin 		itx->itx_wr_state = wr_state;
1110feb08c6bSbillm 		lr->lr_foid = ZVOL_OBJ;
1111feb08c6bSbillm 		lr->lr_offset = off;
1112510b6c0eSNeil Perrin 		lr->lr_length = len;
1113b24ab676SJeff Bonwick 		lr->lr_blkoff = 0;
1114feb08c6bSbillm 		BP_ZERO(&lr->lr_blkptr);
1115feb08c6bSbillm 
1116510b6c0eSNeil Perrin 		itx->itx_private = zv;
1117510b6c0eSNeil Perrin 		itx->itx_sync = sync;
1118510b6c0eSNeil Perrin 
11195002558fSNeil Perrin 		zil_itx_assign(zilog, itx, tx);
1120510b6c0eSNeil Perrin 
1121510b6c0eSNeil Perrin 		off += len;
1122510b6c0eSNeil Perrin 		resid -= len;
112322ac5be4Sperrin 	}
112422ac5be4Sperrin }
112522ac5be4Sperrin 
112688b7b0f2SMatthew Ahrens static int
1127810e43b2SBill Pijewski zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1128810e43b2SBill Pijewski     uint64_t size, boolean_t doread, boolean_t isdump)
1129e7cbe64fSgw {
1130dc0bb255SEric Taylor 	if (doread && !vdev_readable(vd))
1131be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1132ac04831dSMike Gerdts 	if (!doread && !vdev_writeable(vd))
1133ac04831dSMike Gerdts 		return (SET_ERROR(EIO));
1134ac04831dSMike Gerdts 	if (vd->vdev_ops->vdev_op_dumpio == NULL)
1135be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1136e7cbe64fSgw 
1137ac04831dSMike Gerdts 	return (vd->vdev_ops->vdev_op_dumpio(vd, addr, size,
1138ac04831dSMike Gerdts 	    offset, origoffset, doread, isdump));
1139e7cbe64fSgw }
1140e7cbe64fSgw 
114188b7b0f2SMatthew Ahrens static int
114288b7b0f2SMatthew Ahrens zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
114388b7b0f2SMatthew Ahrens     boolean_t doread, boolean_t isdump)
1144e7cbe64fSgw {
1145e7cbe64fSgw 	vdev_t *vd;
1146e7cbe64fSgw 	int error;
114788b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
1148e7cbe64fSgw 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1149e7cbe64fSgw 
115088b7b0f2SMatthew Ahrens 	/* Must be sector aligned, and not stradle a block boundary. */
115188b7b0f2SMatthew Ahrens 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
115288b7b0f2SMatthew Ahrens 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1153be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
115488b7b0f2SMatthew Ahrens 	}
115588b7b0f2SMatthew Ahrens 	ASSERT(size <= zv->zv_volblocksize);
1156e7cbe64fSgw 
115788b7b0f2SMatthew Ahrens 	/* Locate the extent this belongs to */
115888b7b0f2SMatthew Ahrens 	ze = list_head(&zv->zv_extents);
115988b7b0f2SMatthew Ahrens 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
116088b7b0f2SMatthew Ahrens 		offset -= ze->ze_nblks * zv->zv_volblocksize;
116188b7b0f2SMatthew Ahrens 		ze = list_next(&zv->zv_extents, ze);
116288b7b0f2SMatthew Ahrens 	}
116324cc0e1cSGeorge Wilson 
11643b2aab18SMatthew Ahrens 	if (ze == NULL)
1165be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
11663b2aab18SMatthew Ahrens 
116724cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
116824cc0e1cSGeorge Wilson 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
116924cc0e1cSGeorge Wilson 
117088b7b0f2SMatthew Ahrens 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
117188b7b0f2SMatthew Ahrens 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1172810e43b2SBill Pijewski 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1173810e43b2SBill Pijewski 	    size, doread, isdump);
117424cc0e1cSGeorge Wilson 
117524cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
117624cc0e1cSGeorge Wilson 		spa_config_exit(spa, SCL_STATE, FTAG);
117724cc0e1cSGeorge Wilson 
1178e7cbe64fSgw 	return (error);
1179e7cbe64fSgw }
1180e7cbe64fSgw 
1181fa9e4066Sahrens int
1182fa9e4066Sahrens zvol_strategy(buf_t *bp)
1183fa9e4066Sahrens {
1184c99e4bdcSChris Kirby 	zfs_soft_state_t *zs = NULL;
1185c99e4bdcSChris Kirby 	zvol_state_t *zv;
1186fa9e4066Sahrens 	uint64_t off, volsize;
118788b7b0f2SMatthew Ahrens 	size_t resid;
1188fa9e4066Sahrens 	char *addr;
118922ac5be4Sperrin 	objset_t *os;
1190fa9e4066Sahrens 	int error = 0;
1191ac04831dSMike Gerdts 	boolean_t doread = !!(bp->b_flags & B_READ);
1192810e43b2SBill Pijewski 	boolean_t is_dumpified;
1193510b6c0eSNeil Perrin 	boolean_t sync;
1194fa9e4066Sahrens 
1195c99e4bdcSChris Kirby 	if (getminor(bp->b_edev) == 0) {
1196be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
1197c99e4bdcSChris Kirby 	} else {
1198c99e4bdcSChris Kirby 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1199c99e4bdcSChris Kirby 		if (zs == NULL)
1200be6fd75aSMatthew Ahrens 			error = SET_ERROR(ENXIO);
1201c99e4bdcSChris Kirby 		else if (zs->zss_type != ZSST_ZVOL)
1202be6fd75aSMatthew Ahrens 			error = SET_ERROR(EINVAL);
1203fa9e4066Sahrens 	}
1204fa9e4066Sahrens 
1205c99e4bdcSChris Kirby 	if (error) {
1206c99e4bdcSChris Kirby 		bioerror(bp, error);
1207fa9e4066Sahrens 		biodone(bp);
1208fa9e4066Sahrens 		return (0);
1209fa9e4066Sahrens 	}
1210fa9e4066Sahrens 
1211c99e4bdcSChris Kirby 	zv = zs->zss_data;
1212c99e4bdcSChris Kirby 
1213681d9761SEric Taylor 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1214fa9e4066Sahrens 		bioerror(bp, EROFS);
1215fa9e4066Sahrens 		biodone(bp);
1216fa9e4066Sahrens 		return (0);
1217fa9e4066Sahrens 	}
1218fa9e4066Sahrens 
1219fa9e4066Sahrens 	off = ldbtob(bp->b_blkno);
1220fa9e4066Sahrens 	volsize = zv->zv_volsize;
1221fa9e4066Sahrens 
122222ac5be4Sperrin 	os = zv->zv_objset;
122322ac5be4Sperrin 	ASSERT(os != NULL);
1224fa9e4066Sahrens 
1225fa9e4066Sahrens 	bp_mapin(bp);
1226fa9e4066Sahrens 	addr = bp->b_un.b_addr;
1227fa9e4066Sahrens 	resid = bp->b_bcount;
1228fa9e4066Sahrens 
122988b7b0f2SMatthew Ahrens 	if (resid > 0 && (off < 0 || off >= volsize)) {
123088b7b0f2SMatthew Ahrens 		bioerror(bp, EIO);
123188b7b0f2SMatthew Ahrens 		biodone(bp);
123288b7b0f2SMatthew Ahrens 		return (0);
123388b7b0f2SMatthew Ahrens 	}
123473ec3d9cSgw 
1235810e43b2SBill Pijewski 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
123655da60b9SMark J Musante 	sync = ((!(bp->b_flags & B_ASYNC) &&
123755da60b9SMark J Musante 	    !(zv->zv_flags & ZVOL_WCE)) ||
123855da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1239810e43b2SBill Pijewski 	    !doread && !is_dumpified;
1240510b6c0eSNeil Perrin 
1241c3377ee9SJohn Levon 	smt_begin_unsafe();
1242455e370cSJohn Levon 
1243a24e15ceSperrin 	/*
1244a24e15ceSperrin 	 * There must be no buffer changes when doing a dmu_sync() because
1245a24e15ceSperrin 	 * we can't change the data whilst calculating the checksum.
1246a24e15ceSperrin 	 */
124779315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock, off, resid,
124888b7b0f2SMatthew Ahrens 	    doread ? RL_READER : RL_WRITER);
1249fa9e4066Sahrens 
1250e7cbe64fSgw 	while (resid != 0 && off < volsize) {
125188b7b0f2SMatthew Ahrens 		size_t size = MIN(resid, zvol_maxphys);
1252810e43b2SBill Pijewski 		if (is_dumpified) {
1253e7cbe64fSgw 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
125488b7b0f2SMatthew Ahrens 			error = zvol_dumpio(zv, addr, off, size,
125588b7b0f2SMatthew Ahrens 			    doread, B_FALSE);
125688b7b0f2SMatthew Ahrens 		} else if (doread) {
12577bfdf011SNeil Perrin 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
12587bfdf011SNeil Perrin 			    DMU_READ_PREFETCH);
1259fa9e4066Sahrens 		} else {
126022ac5be4Sperrin 			dmu_tx_t *tx = dmu_tx_create(os);
1261fa9e4066Sahrens 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1262fa9e4066Sahrens 			error = dmu_tx_assign(tx, TXG_WAIT);
1263fa9e4066Sahrens 			if (error) {
1264fa9e4066Sahrens 				dmu_tx_abort(tx);
1265fa9e4066Sahrens 			} else {
126622ac5be4Sperrin 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1267510b6c0eSNeil Perrin 				zvol_log_write(zv, tx, off, size, sync);
1268fa9e4066Sahrens 				dmu_tx_commit(tx);
1269fa9e4066Sahrens 			}
1270fa9e4066Sahrens 		}
1271b87f3af3Sperrin 		if (error) {
1272b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1273b87f3af3Sperrin 			if (error == ECKSUM)
1274be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1275fa9e4066Sahrens 			break;
1276b87f3af3Sperrin 		}
1277fa9e4066Sahrens 		off += size;
1278fa9e4066Sahrens 		addr += size;
1279fa9e4066Sahrens 		resid -= size;
1280fa9e4066Sahrens 	}
128179315247SMatthew Ahrens 	rangelock_exit(lr);
1282fa9e4066Sahrens 
1283fa9e4066Sahrens 	if ((bp->b_resid = resid) == bp->b_bcount)
1284fa9e4066Sahrens 		bioerror(bp, off > volsize ? EINVAL : error);
1285fa9e4066Sahrens 
1286510b6c0eSNeil Perrin 	if (sync)
12875002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1288feb08c6bSbillm 	biodone(bp);
128922ac5be4Sperrin 
1290c3377ee9SJohn Levon 	smt_end_unsafe();
1291455e370cSJohn Levon 
1292fa9e4066Sahrens 	return (0);
1293fa9e4066Sahrens }
1294fa9e4066Sahrens 
129567bd71c6Sperrin /*
129667bd71c6Sperrin  * Set the buffer count to the zvol maximum transfer.
129767bd71c6Sperrin  * Using our own routine instead of the default minphys()
129867bd71c6Sperrin  * means that for larger writes we write bigger buffers on X86
129967bd71c6Sperrin  * (128K instead of 56K) and flush the disk write cache less often
130067bd71c6Sperrin  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
130167bd71c6Sperrin  * 56K on X86 and 128K on sparc).
130267bd71c6Sperrin  */
130367bd71c6Sperrin void
130467bd71c6Sperrin zvol_minphys(struct buf *bp)
130567bd71c6Sperrin {
130667bd71c6Sperrin 	if (bp->b_bcount > zvol_maxphys)
130767bd71c6Sperrin 		bp->b_bcount = zvol_maxphys;
130867bd71c6Sperrin }
130967bd71c6Sperrin 
1310e7cbe64fSgw int
1311e7cbe64fSgw zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1312e7cbe64fSgw {
1313e7cbe64fSgw 	minor_t minor = getminor(dev);
1314e7cbe64fSgw 	zvol_state_t *zv;
1315e7cbe64fSgw 	int error = 0;
1316e7cbe64fSgw 	uint64_t size;
1317e7cbe64fSgw 	uint64_t boff;
1318e7cbe64fSgw 	uint64_t resid;
1319e7cbe64fSgw 
1320c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1321e7cbe64fSgw 	if (zv == NULL)
1322be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1323e7cbe64fSgw 
13243b2aab18SMatthew Ahrens 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1325be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
13263b2aab18SMatthew Ahrens 
1327e7cbe64fSgw 	boff = ldbtob(blkno);
1328e7cbe64fSgw 	resid = ldbtob(nblocks);
132988b7b0f2SMatthew Ahrens 
133088b7b0f2SMatthew Ahrens 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
133188b7b0f2SMatthew Ahrens 
1332e7cbe64fSgw 	while (resid) {
1333e7cbe64fSgw 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
133488b7b0f2SMatthew Ahrens 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1335e7cbe64fSgw 		if (error)
1336e7cbe64fSgw 			break;
1337e7cbe64fSgw 		boff += size;
1338e7cbe64fSgw 		addr += size;
1339e7cbe64fSgw 		resid -= size;
1340e7cbe64fSgw 	}
1341e7cbe64fSgw 
1342e7cbe64fSgw 	return (error);
1343e7cbe64fSgw }
1344e7cbe64fSgw 
1345fa9e4066Sahrens /*ARGSUSED*/
1346fa9e4066Sahrens int
1347feb08c6bSbillm zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1348fa9e4066Sahrens {
1349c7ca1008Sgw 	minor_t minor = getminor(dev);
1350c7ca1008Sgw 	zvol_state_t *zv;
135173ec3d9cSgw 	uint64_t volsize;
1352feb08c6bSbillm 	int error = 0;
1353fa9e4066Sahrens 
1354c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1355c7ca1008Sgw 	if (zv == NULL)
1356be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1357c7ca1008Sgw 
135873ec3d9cSgw 	volsize = zv->zv_volsize;
135973ec3d9cSgw 	if (uio->uio_resid > 0 &&
136073ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1361be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
136273ec3d9cSgw 
136388b7b0f2SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
136488b7b0f2SMatthew Ahrens 		error = physio(zvol_strategy, NULL, dev, B_READ,
136588b7b0f2SMatthew Ahrens 		    zvol_minphys, uio);
136688b7b0f2SMatthew Ahrens 		return (error);
136788b7b0f2SMatthew Ahrens 	}
136888b7b0f2SMatthew Ahrens 
1369c3377ee9SJohn Levon 	smt_begin_unsafe();
1370455e370cSJohn Levon 
137179315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
137279315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_READER);
137373ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1374feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1375fa9e4066Sahrens 
137673ec3d9cSgw 		/* don't read past the end */
137773ec3d9cSgw 		if (bytes > volsize - uio->uio_loffset)
137873ec3d9cSgw 			bytes = volsize - uio->uio_loffset;
137973ec3d9cSgw 
1380feb08c6bSbillm 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1381b87f3af3Sperrin 		if (error) {
1382b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1383b87f3af3Sperrin 			if (error == ECKSUM)
1384be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1385feb08c6bSbillm 			break;
1386b87f3af3Sperrin 		}
1387feb08c6bSbillm 	}
138879315247SMatthew Ahrens 	rangelock_exit(lr);
138979315247SMatthew Ahrens 
1390c3377ee9SJohn Levon 	smt_end_unsafe();
1391455e370cSJohn Levon 
1392feb08c6bSbillm 	return (error);
1393fa9e4066Sahrens }
1394fa9e4066Sahrens 
1395fa9e4066Sahrens /*ARGSUSED*/
1396fa9e4066Sahrens int
1397feb08c6bSbillm zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1398fa9e4066Sahrens {
1399c7ca1008Sgw 	minor_t minor = getminor(dev);
1400c7ca1008Sgw 	zvol_state_t *zv;
140173ec3d9cSgw 	uint64_t volsize;
1402feb08c6bSbillm 	int error = 0;
1403510b6c0eSNeil Perrin 	boolean_t sync;
1404feb08c6bSbillm 
1405c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1406c7ca1008Sgw 	if (zv == NULL)
1407be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1408c7ca1008Sgw 
140973ec3d9cSgw 	volsize = zv->zv_volsize;
141073ec3d9cSgw 	if (uio->uio_resid > 0 &&
141173ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1412be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
141373ec3d9cSgw 
1414e7cbe64fSgw 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1415e7cbe64fSgw 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1416e7cbe64fSgw 		    zvol_minphys, uio);
1417e7cbe64fSgw 		return (error);
1418e7cbe64fSgw 	}
1419e7cbe64fSgw 
1420c3377ee9SJohn Levon 	smt_begin_unsafe();
1421455e370cSJohn Levon 
142255da60b9SMark J Musante 	sync = !(zv->zv_flags & ZVOL_WCE) ||
142355da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1424510b6c0eSNeil Perrin 
142579315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
142679315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_WRITER);
142773ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1428feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1429feb08c6bSbillm 		uint64_t off = uio->uio_loffset;
1430feb08c6bSbillm 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
143173ec3d9cSgw 
143273ec3d9cSgw 		if (bytes > volsize - off)	/* don't write past the end */
143373ec3d9cSgw 			bytes = volsize - off;
143473ec3d9cSgw 
1435*25df42a1SMatthew Ahrens 		dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
1436feb08c6bSbillm 		error = dmu_tx_assign(tx, TXG_WAIT);
1437feb08c6bSbillm 		if (error) {
1438feb08c6bSbillm 			dmu_tx_abort(tx);
1439feb08c6bSbillm 			break;
1440feb08c6bSbillm 		}
14418dfe5547SRichard Yao 		error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
1442feb08c6bSbillm 		if (error == 0)
1443510b6c0eSNeil Perrin 			zvol_log_write(zv, tx, off, bytes, sync);
1444feb08c6bSbillm 		dmu_tx_commit(tx);
1445feb08c6bSbillm 
1446feb08c6bSbillm 		if (error)
1447feb08c6bSbillm 			break;
1448feb08c6bSbillm 	}
144979315247SMatthew Ahrens 	rangelock_exit(lr);
145079315247SMatthew Ahrens 
1451510b6c0eSNeil Perrin 	if (sync)
14525002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1453455e370cSJohn Levon 
1454c3377ee9SJohn Levon 	smt_end_unsafe();
1455455e370cSJohn Levon 
1456feb08c6bSbillm 	return (error);
1457fa9e4066Sahrens }
1458fa9e4066Sahrens 
1459c7f714e2SEric Taylor int
1460c7f714e2SEric Taylor zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1461c7f714e2SEric Taylor {
1462c7f714e2SEric Taylor 	struct uuid uuid = EFI_RESERVED;
1463c7f714e2SEric Taylor 	efi_gpe_t gpe = { 0 };
1464c7f714e2SEric Taylor 	uint32_t crc;
1465c7f714e2SEric Taylor 	dk_efi_t efi;
1466c7f714e2SEric Taylor 	int length;
1467c7f714e2SEric Taylor 	char *ptr;
1468c7f714e2SEric Taylor 
1469c7f714e2SEric Taylor 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1470be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1471c7f714e2SEric Taylor 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1472c7f714e2SEric Taylor 	length = efi.dki_length;
1473c7f714e2SEric Taylor 	/*
1474c7f714e2SEric Taylor 	 * Some clients may attempt to request a PMBR for the
1475c7f714e2SEric Taylor 	 * zvol.  Currently this interface will return EINVAL to
1476c7f714e2SEric Taylor 	 * such requests.  These requests could be supported by
1477c7f714e2SEric Taylor 	 * adding a check for lba == 0 and consing up an appropriate
1478c7f714e2SEric Taylor 	 * PMBR.
1479c7f714e2SEric Taylor 	 */
1480c7f714e2SEric Taylor 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1481be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1482c7f714e2SEric Taylor 
1483c7f714e2SEric Taylor 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1484c7f714e2SEric Taylor 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1485c7f714e2SEric Taylor 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1486c7f714e2SEric Taylor 
1487c7f714e2SEric Taylor 	if (efi.dki_lba == 1) {
1488c7f714e2SEric Taylor 		efi_gpt_t gpt = { 0 };
1489c7f714e2SEric Taylor 
1490c7f714e2SEric Taylor 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1491c7f714e2SEric Taylor 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1492fd797736SJohn Levon 		gpt.efi_gpt_HeaderSize = LE_32(EFI_HEADER_SIZE);
1493c7f714e2SEric Taylor 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1494c7f714e2SEric Taylor 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1495c7f714e2SEric Taylor 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1496c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1497c7f714e2SEric Taylor 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1498c7f714e2SEric Taylor 		gpt.efi_gpt_SizeOfPartitionEntry =
1499c7f714e2SEric Taylor 		    LE_32(sizeof (efi_gpe_t));
1500c7f714e2SEric Taylor 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1501c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1502fd797736SJohn Levon 		CRC32(crc, &gpt, EFI_HEADER_SIZE, -1U, crc32_table);
1503c7f714e2SEric Taylor 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1504c7f714e2SEric Taylor 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1505c7f714e2SEric Taylor 		    flag))
1506be6fd75aSMatthew Ahrens 			return (SET_ERROR(EFAULT));
1507c7f714e2SEric Taylor 		ptr += sizeof (gpt);
1508c7f714e2SEric Taylor 		length -= sizeof (gpt);
1509c7f714e2SEric Taylor 	}
1510c7f714e2SEric Taylor 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1511c7f714e2SEric Taylor 	    length), flag))
1512be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1513c7f714e2SEric Taylor 	return (0);
1514c7f714e2SEric Taylor }
1515c7f714e2SEric Taylor 
15163fb517f7SJames Moore /*
15173fb517f7SJames Moore  * BEGIN entry points to allow external callers access to the volume.
15183fb517f7SJames Moore  */
15193fb517f7SJames Moore /*
15203fb517f7SJames Moore  * Return the volume parameters needed for access from an external caller.
15213fb517f7SJames Moore  * These values are invariant as long as the volume is held open.
15223fb517f7SJames Moore  */
15233fb517f7SJames Moore int
15243fb517f7SJames Moore zvol_get_volume_params(minor_t minor, uint64_t *blksize,
15253fb517f7SJames Moore     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
15268dfe5547SRichard Yao     void **rl_hdl, void **dnode_hdl)
15273fb517f7SJames Moore {
15283fb517f7SJames Moore 	zvol_state_t *zv;
15293fb517f7SJames Moore 
1530c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1531c99e4bdcSChris Kirby 	if (zv == NULL)
1532be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15333fb517f7SJames Moore 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1534be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15353fb517f7SJames Moore 
15363fb517f7SJames Moore 	ASSERT(blksize && max_xfer_len && minor_hdl &&
15378dfe5547SRichard Yao 	    objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
15383fb517f7SJames Moore 
15393fb517f7SJames Moore 	*blksize = zv->zv_volblocksize;
15403fb517f7SJames Moore 	*max_xfer_len = (uint64_t)zvol_maxphys;
15413fb517f7SJames Moore 	*minor_hdl = zv;
15423fb517f7SJames Moore 	*objset_hdl = zv->zv_objset;
15433fb517f7SJames Moore 	*zil_hdl = zv->zv_zilog;
154479315247SMatthew Ahrens 	*rl_hdl = &zv->zv_rangelock;
15458dfe5547SRichard Yao 	*dnode_hdl = zv->zv_dn;
15463fb517f7SJames Moore 	return (0);
15473fb517f7SJames Moore }
15483fb517f7SJames Moore 
15493fb517f7SJames Moore /*
15503fb517f7SJames Moore  * Return the current volume size to an external caller.
15513fb517f7SJames Moore  * The size can change while the volume is open.
15523fb517f7SJames Moore  */
15533fb517f7SJames Moore uint64_t
15543fb517f7SJames Moore zvol_get_volume_size(void *minor_hdl)
15553fb517f7SJames Moore {
15563fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
15573fb517f7SJames Moore 
15583fb517f7SJames Moore 	return (zv->zv_volsize);
15593fb517f7SJames Moore }
15603fb517f7SJames Moore 
15613fb517f7SJames Moore /*
15623fb517f7SJames Moore  * Return the current WCE setting to an external caller.
15633fb517f7SJames Moore  * The WCE setting can change while the volume is open.
15643fb517f7SJames Moore  */
15653fb517f7SJames Moore int
15663fb517f7SJames Moore zvol_get_volume_wce(void *minor_hdl)
15673fb517f7SJames Moore {
15683fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
15693fb517f7SJames Moore 
15703fb517f7SJames Moore 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
15713fb517f7SJames Moore }
15723fb517f7SJames Moore 
15733fb517f7SJames Moore /*
15743fb517f7SJames Moore  * Entry point for external callers to zvol_log_write
15753fb517f7SJames Moore  */
15763fb517f7SJames Moore void
15773fb517f7SJames Moore zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
15783fb517f7SJames Moore     boolean_t sync)
15793fb517f7SJames Moore {
15803fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
15813fb517f7SJames Moore 
15823fb517f7SJames Moore 	zvol_log_write(zv, tx, off, resid, sync);
15833fb517f7SJames Moore }
15843fb517f7SJames Moore /*
15853fb517f7SJames Moore  * END entry points to allow external callers access to the volume.
15863fb517f7SJames Moore  */
15873fb517f7SJames Moore 
1588b77b9231SDan McDonald /*
1589b77b9231SDan McDonald  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1590b77b9231SDan McDonald  */
1591b77b9231SDan McDonald static void
1592b77b9231SDan McDonald zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1593b77b9231SDan McDonald     boolean_t sync)
1594b77b9231SDan McDonald {
1595b77b9231SDan McDonald 	itx_t *itx;
1596b77b9231SDan McDonald 	lr_truncate_t *lr;
1597b77b9231SDan McDonald 	zilog_t *zilog = zv->zv_zilog;
1598b77b9231SDan McDonald 
1599b77b9231SDan McDonald 	if (zil_replaying(zilog, tx))
1600b77b9231SDan McDonald 		return;
1601b77b9231SDan McDonald 
1602b77b9231SDan McDonald 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1603b77b9231SDan McDonald 	lr = (lr_truncate_t *)&itx->itx_lr;
1604b77b9231SDan McDonald 	lr->lr_foid = ZVOL_OBJ;
1605b77b9231SDan McDonald 	lr->lr_offset = off;
1606b77b9231SDan McDonald 	lr->lr_length = len;
1607b77b9231SDan McDonald 
1608b77b9231SDan McDonald 	itx->itx_sync = sync;
1609b77b9231SDan McDonald 	zil_itx_assign(zilog, itx, tx);
1610b77b9231SDan McDonald }
1611b77b9231SDan McDonald 
1612fa9e4066Sahrens /*
1613fa9e4066Sahrens  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1614b77b9231SDan McDonald  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1615fa9e4066Sahrens  */
1616fa9e4066Sahrens /*ARGSUSED*/
1617fa9e4066Sahrens int
1618fa9e4066Sahrens zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1619fa9e4066Sahrens {
1620fa9e4066Sahrens 	zvol_state_t *zv;
1621af2c4821Smaybee 	struct dk_callback *dkc;
1622efe44a03SJerry Jelinek 	int i, error = 0;
162379315247SMatthew Ahrens 	locked_range_t *lr;
1624fa9e4066Sahrens 
1625c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
1626fa9e4066Sahrens 
1627c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1628fa9e4066Sahrens 
1629fa9e4066Sahrens 	if (zv == NULL) {
1630c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1631be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1632fa9e4066Sahrens 	}
1633701f66c4SEric Taylor 	ASSERT(zv->zv_total_opens > 0);
1634fa9e4066Sahrens 
1635fa9e4066Sahrens 	switch (cmd) {
1636fa9e4066Sahrens 
1637fa9e4066Sahrens 	case DKIOCINFO:
1638a0b60564SGeorge Wilson 	{
1639a0b60564SGeorge Wilson 		struct dk_cinfo dki;
1640a0b60564SGeorge Wilson 
1641af2c4821Smaybee 		bzero(&dki, sizeof (dki));
1642af2c4821Smaybee 		(void) strcpy(dki.dki_cname, "zvol");
1643af2c4821Smaybee 		(void) strcpy(dki.dki_dname, "zvol");
1644af2c4821Smaybee 		dki.dki_ctype = DKC_UNKNOWN;
16453adc9019SEric Taylor 		dki.dki_unit = getminor(dev);
1646b5152584SMatthew Ahrens 		dki.dki_maxtransfer =
1647b5152584SMatthew Ahrens 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1648c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1649af2c4821Smaybee 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1650be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1651fa9e4066Sahrens 		return (error);
1652a0b60564SGeorge Wilson 	}
1653fa9e4066Sahrens 
1654fa9e4066Sahrens 	case DKIOCGMEDIAINFO:
1655a0b60564SGeorge Wilson 	{
1656a0b60564SGeorge Wilson 		struct dk_minfo dkm;
1657a0b60564SGeorge Wilson 
1658fa9e4066Sahrens 		bzero(&dkm, sizeof (dkm));
1659fa9e4066Sahrens 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1660fa9e4066Sahrens 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1661fa9e4066Sahrens 		dkm.dki_media_type = DK_UNKNOWN;
1662c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1663fa9e4066Sahrens 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1664be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1665fa9e4066Sahrens 		return (error);
1666a0b60564SGeorge Wilson 	}
1667a0b60564SGeorge Wilson 
1668a0b60564SGeorge Wilson 	case DKIOCGMEDIAINFOEXT:
1669a0b60564SGeorge Wilson 	{
1670a0b60564SGeorge Wilson 		struct dk_minfo_ext dkmext;
1671a0b60564SGeorge Wilson 
1672a0b60564SGeorge Wilson 		bzero(&dkmext, sizeof (dkmext));
1673a0b60564SGeorge Wilson 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1674a0b60564SGeorge Wilson 		dkmext.dki_pbsize = zv->zv_volblocksize;
1675a0b60564SGeorge Wilson 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1676a0b60564SGeorge Wilson 		dkmext.dki_media_type = DK_UNKNOWN;
1677a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1678a0b60564SGeorge Wilson 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1679a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1680a0b60564SGeorge Wilson 		return (error);
1681a0b60564SGeorge Wilson 	}
1682fa9e4066Sahrens 
1683fa9e4066Sahrens 	case DKIOCGETEFI:
1684a0b60564SGeorge Wilson 	{
1685a0b60564SGeorge Wilson 		uint64_t vs = zv->zv_volsize;
1686a0b60564SGeorge Wilson 		uint8_t bs = zv->zv_min_bs;
1687fa9e4066Sahrens 
1688a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1689a0b60564SGeorge Wilson 		error = zvol_getefi((void *)arg, flag, vs, bs);
1690a0b60564SGeorge Wilson 		return (error);
1691a0b60564SGeorge Wilson 	}
1692fa9e4066Sahrens 
1693feb08c6bSbillm 	case DKIOCFLUSHWRITECACHE:
1694af2c4821Smaybee 		dkc = (struct dk_callback *)arg;
1695c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1696455e370cSJohn Levon 
1697c3377ee9SJohn Levon 		smt_begin_unsafe();
1698455e370cSJohn Levon 
16995002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1700af2c4821Smaybee 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1701af2c4821Smaybee 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1702af2c4821Smaybee 			error = 0;
1703af2c4821Smaybee 		}
1704455e370cSJohn Levon 
1705c3377ee9SJohn Levon 		smt_end_unsafe();
1706455e370cSJohn Levon 
1707701f66c4SEric Taylor 		return (error);
1708701f66c4SEric Taylor 
1709701f66c4SEric Taylor 	case DKIOCGETWCE:
1710a0b60564SGeorge Wilson 	{
1711a0b60564SGeorge Wilson 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1712a0b60564SGeorge Wilson 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1713a0b60564SGeorge Wilson 		    flag))
1714a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1715a0b60564SGeorge Wilson 		break;
1716a0b60564SGeorge Wilson 	}
1717a0b60564SGeorge Wilson 	case DKIOCSETWCE:
1718a0b60564SGeorge Wilson 	{
1719a0b60564SGeorge Wilson 		int wce;
1720a0b60564SGeorge Wilson 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1721a0b60564SGeorge Wilson 		    flag)) {
1722a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1723701f66c4SEric Taylor 			break;
1724701f66c4SEric Taylor 		}
1725a0b60564SGeorge Wilson 		if (wce) {
1726a0b60564SGeorge Wilson 			zv->zv_flags |= ZVOL_WCE;
1727a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1728a0b60564SGeorge Wilson 		} else {
1729a0b60564SGeorge Wilson 			zv->zv_flags &= ~ZVOL_WCE;
1730a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1731c3377ee9SJohn Levon 			smt_begin_unsafe();
1732a0b60564SGeorge Wilson 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1733c3377ee9SJohn Levon 			smt_end_unsafe();
1734701f66c4SEric Taylor 		}
1735a0b60564SGeorge Wilson 		return (0);
1736a0b60564SGeorge Wilson 	}
1737feb08c6bSbillm 
1738b6130eadSmaybee 	case DKIOCGGEOM:
1739b6130eadSmaybee 	case DKIOCGVTOC:
1740e7cbe64fSgw 		/*
1741e7cbe64fSgw 		 * commands using these (like prtvtoc) expect ENOTSUP
1742e7cbe64fSgw 		 * since we're emulating an EFI label
1743e7cbe64fSgw 		 */
1744be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTSUP);
1745b6130eadSmaybee 		break;
1746b6130eadSmaybee 
1747e7cbe64fSgw 	case DKIOCDUMPINIT:
174879315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1749e7cbe64fSgw 		    RL_WRITER);
1750e7cbe64fSgw 		error = zvol_dumpify(zv);
175179315247SMatthew Ahrens 		rangelock_exit(lr);
1752e7cbe64fSgw 		break;
1753e7cbe64fSgw 
1754e7cbe64fSgw 	case DKIOCDUMPFINI:
175506d5ae10SEric Taylor 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
175606d5ae10SEric Taylor 			break;
175779315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1758e7cbe64fSgw 		    RL_WRITER);
1759e7cbe64fSgw 		error = zvol_dump_fini(zv);
176079315247SMatthew Ahrens 		rangelock_exit(lr);
1761e7cbe64fSgw 		break;
1762e7cbe64fSgw 
1763b77b9231SDan McDonald 	case DKIOCFREE:
1764b77b9231SDan McDonald 	{
1765047c81d3SSaso Kiselkov 		dkioc_free_list_t *dfl;
1766b77b9231SDan McDonald 		dmu_tx_t *tx;
1767b77b9231SDan McDonald 
1768893c83baSGeorge Wilson 		if (!zvol_unmap_enabled)
1769893c83baSGeorge Wilson 			break;
1770893c83baSGeorge Wilson 
1771047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL)) {
1772047c81d3SSaso Kiselkov 			error = dfl_copyin((void *)arg, &dfl, flag, KM_SLEEP);
1773047c81d3SSaso Kiselkov 			if (error != 0)
1774047c81d3SSaso Kiselkov 				break;
1775047c81d3SSaso Kiselkov 		} else {
1776047c81d3SSaso Kiselkov 			dfl = (dkioc_free_list_t *)arg;
1777047c81d3SSaso Kiselkov 			ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
1778047c81d3SSaso Kiselkov 			if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
1779047c81d3SSaso Kiselkov 				error = SET_ERROR(EINVAL);
1780047c81d3SSaso Kiselkov 				break;
1781047c81d3SSaso Kiselkov 			}
1782b77b9231SDan McDonald 		}
1783b77b9231SDan McDonald 
1784574e2414SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1785b77b9231SDan McDonald 
1786c3377ee9SJohn Levon 		smt_begin_unsafe();
1787455e370cSJohn Levon 
1788047c81d3SSaso Kiselkov 		for (int i = 0; i < dfl->dfl_num_exts; i++) {
1789047c81d3SSaso Kiselkov 			uint64_t start = dfl->dfl_exts[i].dfle_start,
1790047c81d3SSaso Kiselkov 			    length = dfl->dfl_exts[i].dfle_length,
1791047c81d3SSaso Kiselkov 			    end = start + length;
1792047c81d3SSaso Kiselkov 
1793047c81d3SSaso Kiselkov 			/*
1794047c81d3SSaso Kiselkov 			 * Apply Postel's Law to length-checking.  If they
1795047c81d3SSaso Kiselkov 			 * overshoot, just blank out until the end, if there's
1796047c81d3SSaso Kiselkov 			 * a need to blank out anything.
1797047c81d3SSaso Kiselkov 			 */
1798047c81d3SSaso Kiselkov 			if (start >= zv->zv_volsize)
1799047c81d3SSaso Kiselkov 				continue;	/* No need to do anything... */
1800047c81d3SSaso Kiselkov 			if (end > zv->zv_volsize) {
1801047c81d3SSaso Kiselkov 				end = DMU_OBJECT_END;
1802047c81d3SSaso Kiselkov 				length = end - start;
1803047c81d3SSaso Kiselkov 			}
1804b77b9231SDan McDonald 
180579315247SMatthew Ahrens 			lr = rangelock_enter(&zv->zv_rangelock, start, length,
1806047c81d3SSaso Kiselkov 			    RL_WRITER);
1807047c81d3SSaso Kiselkov 			tx = dmu_tx_create(zv->zv_objset);
1808047c81d3SSaso Kiselkov 			error = dmu_tx_assign(tx, TXG_WAIT);
1809047c81d3SSaso Kiselkov 			if (error != 0) {
1810047c81d3SSaso Kiselkov 				dmu_tx_abort(tx);
1811047c81d3SSaso Kiselkov 			} else {
1812047c81d3SSaso Kiselkov 				zvol_log_truncate(zv, tx, start, length,
1813047c81d3SSaso Kiselkov 				    B_TRUE);
1814047c81d3SSaso Kiselkov 				dmu_tx_commit(tx);
1815047c81d3SSaso Kiselkov 				error = dmu_free_long_range(zv->zv_objset,
1816047c81d3SSaso Kiselkov 				    ZVOL_OBJ, start, length);
1817047c81d3SSaso Kiselkov 			}
1818047c81d3SSaso Kiselkov 
181979315247SMatthew Ahrens 			rangelock_exit(lr);
1820047c81d3SSaso Kiselkov 
1821047c81d3SSaso Kiselkov 			if (error != 0)
1822047c81d3SSaso Kiselkov 				break;
1823047c81d3SSaso Kiselkov 		}
1824b77b9231SDan McDonald 
18251c9272b8SStephen Blinick 		/*
18261c9272b8SStephen Blinick 		 * If the write-cache is disabled, 'sync' property
18271c9272b8SStephen Blinick 		 * is set to 'always', or if the caller is asking for
18281c9272b8SStephen Blinick 		 * a synchronous free, commit this operation to the zil.
18291c9272b8SStephen Blinick 		 * This will sync any previous uncommitted writes to the
18301c9272b8SStephen Blinick 		 * zvol object.
18311c9272b8SStephen Blinick 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
18321c9272b8SStephen Blinick 		 */
18331c9272b8SStephen Blinick 		if ((error == 0) && zvol_unmap_sync_enabled &&
18341c9272b8SStephen Blinick 		    (!(zv->zv_flags & ZVOL_WCE) ||
18351c9272b8SStephen Blinick 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1836047c81d3SSaso Kiselkov 		    (dfl->dfl_flags & DF_WAIT_SYNC))) {
18371c9272b8SStephen Blinick 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1838b77b9231SDan McDonald 		}
18391c9272b8SStephen Blinick 
1840047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL))
1841047c81d3SSaso Kiselkov 			dfl_free(dfl);
1842047c81d3SSaso Kiselkov 
1843c3377ee9SJohn Levon 		smt_end_unsafe();
1844455e370cSJohn Levon 
1845574e2414SGeorge Wilson 		return (error);
1846b77b9231SDan McDonald 	}
1847b77b9231SDan McDonald 
1848efe44a03SJerry Jelinek 	case DKIOC_CANFREE:
1849efe44a03SJerry Jelinek 		i = zvol_unmap_enabled ? 1 : 0;
1850efe44a03SJerry Jelinek 		if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
1851efe44a03SJerry Jelinek 			error = EFAULT;
1852efe44a03SJerry Jelinek 		} else {
1853efe44a03SJerry Jelinek 			error = 0;
1854efe44a03SJerry Jelinek 		}
1855efe44a03SJerry Jelinek 		break;
1856efe44a03SJerry Jelinek 
1857fa9e4066Sahrens 	default:
1858be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTTY);
1859fa9e4066Sahrens 		break;
1860fa9e4066Sahrens 
1861fa9e4066Sahrens 	}
1862c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
1863fa9e4066Sahrens 	return (error);
1864fa9e4066Sahrens }
1865fa9e4066Sahrens 
1866fa9e4066Sahrens int
1867fa9e4066Sahrens zvol_busy(void)
1868fa9e4066Sahrens {
1869fa9e4066Sahrens 	return (zvol_minors != 0);
1870fa9e4066Sahrens }
1871fa9e4066Sahrens 
1872fa9e4066Sahrens void
1873fa9e4066Sahrens zvol_init(void)
1874fa9e4066Sahrens {
1875c99e4bdcSChris Kirby 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1876c99e4bdcSChris Kirby 	    1) == 0);
1877c99e4bdcSChris Kirby 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1878fa9e4066Sahrens }
1879fa9e4066Sahrens 
1880fa9e4066Sahrens void
1881fa9e4066Sahrens zvol_fini(void)
1882fa9e4066Sahrens {
1883c99e4bdcSChris Kirby 	mutex_destroy(&zfsdev_state_lock);
1884c99e4bdcSChris Kirby 	ddi_soft_state_fini(&zfsdev_state);
1885fa9e4066Sahrens }
1886e7cbe64fSgw 
1887810e43b2SBill Pijewski /*ARGSUSED*/
1888810e43b2SBill Pijewski static int
1889810e43b2SBill Pijewski zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1890810e43b2SBill Pijewski {
1891810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1892810e43b2SBill Pijewski 
18932acef22dSMatthew Ahrens 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1894810e43b2SBill Pijewski 		return (1);
1895810e43b2SBill Pijewski 	return (0);
1896810e43b2SBill Pijewski }
1897810e43b2SBill Pijewski 
1898810e43b2SBill Pijewski /*ARGSUSED*/
1899810e43b2SBill Pijewski static void
1900810e43b2SBill Pijewski zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1901810e43b2SBill Pijewski {
1902810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1903810e43b2SBill Pijewski 
19042acef22dSMatthew Ahrens 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1905810e43b2SBill Pijewski }
1906810e43b2SBill Pijewski 
1907e7cbe64fSgw static int
1908e7cbe64fSgw zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1909e7cbe64fSgw {
1910e7cbe64fSgw 	dmu_tx_t *tx;
1911810e43b2SBill Pijewski 	int error;
1912e7cbe64fSgw 	objset_t *os = zv->zv_objset;
1913810e43b2SBill Pijewski 	spa_t *spa = dmu_objset_spa(os);
1914810e43b2SBill Pijewski 	vdev_t *vd = spa->spa_root_vdev;
1915e7cbe64fSgw 	nvlist_t *nv = NULL;
1916810e43b2SBill Pijewski 	uint64_t version = spa_version(spa);
1917b10bba72SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
1918e7cbe64fSgw 
1919c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1920810e43b2SBill Pijewski 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1921810e43b2SBill Pijewski 
1922681d9761SEric Taylor 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1923681d9761SEric Taylor 	    DMU_OBJECT_END);
1924b10bba72SGeorge Wilson 	if (error != 0)
1925b10bba72SGeorge Wilson 		return (error);
1926681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
1927681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1928e7cbe64fSgw 
1929810e43b2SBill Pijewski 	/*
1930810e43b2SBill Pijewski 	 * If the pool on which the dump device is being initialized has more
1931810e43b2SBill Pijewski 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1932810e43b2SBill Pijewski 	 * enabled.  If so, bump that feature's counter to indicate that the
1933810e43b2SBill Pijewski 	 * feature is active. We also check the vdev type to handle the
1934810e43b2SBill Pijewski 	 * following case:
1935810e43b2SBill Pijewski 	 *   # zpool create test raidz disk1 disk2 disk3
1936810e43b2SBill Pijewski 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1937810e43b2SBill Pijewski 	 *   the raidz vdev itself has 3 children.
1938810e43b2SBill Pijewski 	 */
1939810e43b2SBill Pijewski 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1940810e43b2SBill Pijewski 		if (!spa_feature_is_enabled(spa,
19412acef22dSMatthew Ahrens 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1942810e43b2SBill Pijewski 			return (SET_ERROR(ENOTSUP));
1943810e43b2SBill Pijewski 		(void) dsl_sync_task(spa_name(spa),
1944810e43b2SBill Pijewski 		    zfs_mvdev_dump_feature_check,
19457d46dc6cSMatthew Ahrens 		    zfs_mvdev_dump_activate_feature_sync, NULL,
19467d46dc6cSMatthew Ahrens 		    2, ZFS_SPACE_CHECK_RESERVED);
1947810e43b2SBill Pijewski 	}
1948810e43b2SBill Pijewski 
1949b10bba72SGeorge Wilson 	if (!resize) {
1950b10bba72SGeorge Wilson 		error = dsl_prop_get_integer(zv->zv_name,
1951b10bba72SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1952b10bba72SGeorge Wilson 		if (error == 0) {
1953b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1954b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1955b10bba72SGeorge Wilson 			    NULL);
1956b10bba72SGeorge Wilson 		}
1957b10bba72SGeorge Wilson 		if (error == 0) {
1958b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1959b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1960b10bba72SGeorge Wilson 			    &refresrv, NULL);
1961b10bba72SGeorge Wilson 		}
1962b10bba72SGeorge Wilson 		if (error == 0) {
1963b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1964b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1965b10bba72SGeorge Wilson 			    NULL);
1966b10bba72SGeorge Wilson 		}
1967b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1968b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1969b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1970b10bba72SGeorge Wilson 		}
1971b10bba72SGeorge Wilson 	}
1972b10bba72SGeorge Wilson 	if (error != 0)
1973b10bba72SGeorge Wilson 		return (error);
1974b10bba72SGeorge Wilson 
1975e7cbe64fSgw 	tx = dmu_tx_create(os);
1976e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1977681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1978e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
1979b10bba72SGeorge Wilson 	if (error != 0) {
1980e7cbe64fSgw 		dmu_tx_abort(tx);
1981e7cbe64fSgw 		return (error);
1982e7cbe64fSgw 	}
1983e7cbe64fSgw 
1984e7cbe64fSgw 	/*
1985e7cbe64fSgw 	 * If we are resizing the dump device then we only need to
1986e7cbe64fSgw 	 * update the refreservation to match the newly updated
1987e7cbe64fSgw 	 * zvolsize. Otherwise, we save off the original state of the
1988e7cbe64fSgw 	 * zvol so that we can restore them if the zvol is ever undumpified.
1989e7cbe64fSgw 	 */
1990e7cbe64fSgw 	if (resize) {
1991e7cbe64fSgw 		error = zap_update(os, ZVOL_ZAP_OBJ,
1992e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1993e7cbe64fSgw 		    &zv->zv_volsize, tx);
1994e7cbe64fSgw 	} else {
1995b10bba72SGeorge Wilson 		error = zap_update(os, ZVOL_ZAP_OBJ,
1996e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1997e7cbe64fSgw 		    &compress, tx);
1998b10bba72SGeorge Wilson 		if (error == 0) {
1999b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2000b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2001b10bba72SGeorge Wilson 			    &checksum, tx);
2002b10bba72SGeorge Wilson 		}
2003b10bba72SGeorge Wilson 		if (error == 0) {
2004b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2005b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2006b10bba72SGeorge Wilson 			    &refresrv, tx);
2007b10bba72SGeorge Wilson 		}
2008b10bba72SGeorge Wilson 		if (error == 0) {
2009b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2010b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2011b10bba72SGeorge Wilson 			    &vbs, tx);
2012b10bba72SGeorge Wilson 		}
2013b10bba72SGeorge Wilson 		if (error == 0) {
2014b10bba72SGeorge Wilson 			error = dmu_object_set_blocksize(
2015b10bba72SGeorge Wilson 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2016b10bba72SGeorge Wilson 		}
2017b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2018b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
20198d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
20208d265e66SGeorge Wilson 			    &dedup, tx);
20218d265e66SGeorge Wilson 		}
2022681d9761SEric Taylor 		if (error == 0)
2023b5152584SMatthew Ahrens 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2024e7cbe64fSgw 	}
2025e7cbe64fSgw 	dmu_tx_commit(tx);
2026e7cbe64fSgw 
2027e7cbe64fSgw 	/*
2028e7cbe64fSgw 	 * We only need update the zvol's property if we are initializing
2029e7cbe64fSgw 	 * the dump area for the first time.
2030e7cbe64fSgw 	 */
2031b10bba72SGeorge Wilson 	if (error == 0 && !resize) {
2032b10bba72SGeorge Wilson 		/*
2033b10bba72SGeorge Wilson 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2034b10bba72SGeorge Wilson 		 * function.  Otherwise, use the old default -- OFF.
2035b10bba72SGeorge Wilson 		 */
2036b10bba72SGeorge Wilson 		checksum = spa_feature_is_active(spa,
2037b10bba72SGeorge Wilson 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2038b10bba72SGeorge Wilson 		    ZIO_CHECKSUM_OFF;
2039b10bba72SGeorge Wilson 
2040e7cbe64fSgw 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2041e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2042e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2043e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2044e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2045e7cbe64fSgw 		    ZIO_COMPRESS_OFF) == 0);
2046e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2047e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2048810e43b2SBill Pijewski 		    checksum) == 0);
20498d265e66SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP) {
20508d265e66SGeorge Wilson 			VERIFY(nvlist_add_uint64(nv,
20518d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
20528d265e66SGeorge Wilson 			    ZIO_CHECKSUM_OFF) == 0);
20538d265e66SGeorge Wilson 		}
2054e7cbe64fSgw 
205592241e0bSTom Erickson 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
205692241e0bSTom Erickson 		    nv, NULL);
2057e7cbe64fSgw 		nvlist_free(nv);
2058e7cbe64fSgw 	}
2059e7cbe64fSgw 
2060e7cbe64fSgw 	/* Allocate the space for the dump */
2061b10bba72SGeorge Wilson 	if (error == 0)
2062b10bba72SGeorge Wilson 		error = zvol_prealloc(zv);
2063e7cbe64fSgw 	return (error);
2064e7cbe64fSgw }
2065e7cbe64fSgw 
2066e7cbe64fSgw static int
2067e7cbe64fSgw zvol_dumpify(zvol_state_t *zv)
2068e7cbe64fSgw {
2069e7cbe64fSgw 	int error = 0;
2070e7cbe64fSgw 	uint64_t dumpsize = 0;
2071e7cbe64fSgw 	dmu_tx_t *tx;
2072e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2073e7cbe64fSgw 
2074681d9761SEric Taylor 	if (zv->zv_flags & ZVOL_RDONLY)
2075be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
2076e7cbe64fSgw 
2077eb633035STom Caputi 	if (os->os_encrypted)
2078eb633035STom Caputi 		return (SET_ERROR(ENOTSUP));
2079eb633035STom Caputi 
2080e7cbe64fSgw 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2081e7cbe64fSgw 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
20824445fffbSMatthew Ahrens 		boolean_t resize = (dumpsize > 0);
2083e7cbe64fSgw 
2084e7cbe64fSgw 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2085e7cbe64fSgw 			(void) zvol_dump_fini(zv);
2086e7cbe64fSgw 			return (error);
2087e7cbe64fSgw 		}
2088e7cbe64fSgw 	}
2089e7cbe64fSgw 
2090e7cbe64fSgw 	/*
2091e7cbe64fSgw 	 * Build up our lba mapping.
2092e7cbe64fSgw 	 */
2093e7cbe64fSgw 	error = zvol_get_lbas(zv);
2094e7cbe64fSgw 	if (error) {
2095e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2096e7cbe64fSgw 		return (error);
2097e7cbe64fSgw 	}
2098e7cbe64fSgw 
2099e7cbe64fSgw 	tx = dmu_tx_create(os);
2100e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2101e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2102e7cbe64fSgw 	if (error) {
2103e7cbe64fSgw 		dmu_tx_abort(tx);
2104e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2105e7cbe64fSgw 		return (error);
2106e7cbe64fSgw 	}
2107e7cbe64fSgw 
2108e7cbe64fSgw 	zv->zv_flags |= ZVOL_DUMPIFIED;
2109e7cbe64fSgw 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2110e7cbe64fSgw 	    &zv->zv_volsize, tx);
2111e7cbe64fSgw 	dmu_tx_commit(tx);
2112e7cbe64fSgw 
2113e7cbe64fSgw 	if (error) {
2114e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2115e7cbe64fSgw 		return (error);
2116e7cbe64fSgw 	}
2117e7cbe64fSgw 
2118e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
2119e7cbe64fSgw 	return (0);
2120e7cbe64fSgw }
2121e7cbe64fSgw 
2122e7cbe64fSgw static int
2123e7cbe64fSgw zvol_dump_fini(zvol_state_t *zv)
2124e7cbe64fSgw {
2125e7cbe64fSgw 	dmu_tx_t *tx;
2126e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2127e7cbe64fSgw 	nvlist_t *nv;
2128e7cbe64fSgw 	int error = 0;
2129afee20e4SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
21308d265e66SGeorge Wilson 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2131e7cbe64fSgw 
2132b7e50089Smaybee 	/*
2133b7e50089Smaybee 	 * Attempt to restore the zvol back to its pre-dumpified state.
2134b7e50089Smaybee 	 * This is a best-effort attempt as it's possible that not all
2135b7e50089Smaybee 	 * of these properties were initialized during the dumpify process
2136b7e50089Smaybee 	 * (i.e. error during zvol_dump_init).
2137b7e50089Smaybee 	 */
2138b7e50089Smaybee 
2139e7cbe64fSgw 	tx = dmu_tx_create(os);
2140e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2141e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2142e7cbe64fSgw 	if (error) {
2143e7cbe64fSgw 		dmu_tx_abort(tx);
2144e7cbe64fSgw 		return (error);
2145e7cbe64fSgw 	}
2146b7e50089Smaybee 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2147b7e50089Smaybee 	dmu_tx_commit(tx);
2148e7cbe64fSgw 
2149e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2150e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2151e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2152e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2153e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2154e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
215588b7b0f2SMatthew Ahrens 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
215688b7b0f2SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2157e7cbe64fSgw 
2158e7cbe64fSgw 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2159e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2160e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2161e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2162e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2163e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2164e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
21658d265e66SGeorge Wilson 	if (version >= SPA_VERSION_DEDUP &&
21668d265e66SGeorge Wilson 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
21678d265e66SGeorge Wilson 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
21688d265e66SGeorge Wilson 		(void) nvlist_add_uint64(nv,
21698d265e66SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
21708d265e66SGeorge Wilson 	}
217192241e0bSTom Erickson 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
217292241e0bSTom Erickson 	    nv, NULL);
2173e7cbe64fSgw 	nvlist_free(nv);
2174e7cbe64fSgw 
2175b7e50089Smaybee 	zvol_free_extents(zv);
2176b7e50089Smaybee 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2177b7e50089Smaybee 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2178681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
2179681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2180681d9761SEric Taylor 	tx = dmu_tx_create(os);
2181681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2182681d9761SEric Taylor 	error = dmu_tx_assign(tx, TXG_WAIT);
2183681d9761SEric Taylor 	if (error) {
2184681d9761SEric Taylor 		dmu_tx_abort(tx);
2185681d9761SEric Taylor 		return (error);
2186681d9761SEric Taylor 	}
2187b24ab676SJeff Bonwick 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2188b24ab676SJeff Bonwick 		zv->zv_volblocksize = vbs;
2189681d9761SEric Taylor 	dmu_tx_commit(tx);
2190b7e50089Smaybee 
2191e7cbe64fSgw 	return (0);
2192e7cbe64fSgw }
2193