xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 455e370c)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
22f80ce222SChris Kirby  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23b77b9231SDan McDonald  *
24b77b9231SDan McDonald  * Portions Copyright 2010 Robert Milkowski
25b77b9231SDan McDonald  *
26047c81d3SSaso Kiselkov  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
27b7edcb94SMatthew Ahrens  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28c3d26abcSMatthew Ahrens  * Copyright (c) 2014 Integros [integros.com]
29*455e370cSJohn Levon  * Copyright 2019 Joyent, Inc.
30fa9e4066Sahrens  */
31fa9e4066Sahrens 
32fa9e4066Sahrens /*
33fa9e4066Sahrens  * ZFS volume emulation driver.
34fa9e4066Sahrens  *
35fa9e4066Sahrens  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36fa9e4066Sahrens  * Volumes are accessed through the symbolic links named:
37fa9e4066Sahrens  *
38fa9e4066Sahrens  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39fa9e4066Sahrens  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40fa9e4066Sahrens  *
41681d9761SEric Taylor  * These links are created by the /dev filesystem (sdev_zvolops.c).
42fa9e4066Sahrens  * Volumes are persistent through reboot.  No user command needs to be
43fa9e4066Sahrens  * run before opening and using a device.
44fa9e4066Sahrens  */
45fa9e4066Sahrens 
46fa9e4066Sahrens #include <sys/types.h>
47fa9e4066Sahrens #include <sys/param.h>
48fa9e4066Sahrens #include <sys/errno.h>
49fa9e4066Sahrens #include <sys/uio.h>
50fa9e4066Sahrens #include <sys/buf.h>
51fa9e4066Sahrens #include <sys/modctl.h>
52fa9e4066Sahrens #include <sys/open.h>
53fa9e4066Sahrens #include <sys/kmem.h>
54fa9e4066Sahrens #include <sys/conf.h>
55fa9e4066Sahrens #include <sys/cmn_err.h>
56fa9e4066Sahrens #include <sys/stat.h>
57fa9e4066Sahrens #include <sys/zap.h>
58fa9e4066Sahrens #include <sys/spa.h>
59810e43b2SBill Pijewski #include <sys/spa_impl.h>
60fa9e4066Sahrens #include <sys/zio.h>
61e7cbe64fSgw #include <sys/dmu_traverse.h>
62e7cbe64fSgw #include <sys/dnode.h>
63e7cbe64fSgw #include <sys/dsl_dataset.h>
64fa9e4066Sahrens #include <sys/dsl_prop.h>
65fa9e4066Sahrens #include <sys/dkio.h>
66fa9e4066Sahrens #include <sys/efi_partition.h>
67fa9e4066Sahrens #include <sys/byteorder.h>
68fa9e4066Sahrens #include <sys/pathname.h>
69fa9e4066Sahrens #include <sys/ddi.h>
70fa9e4066Sahrens #include <sys/sunddi.h>
71fa9e4066Sahrens #include <sys/crc32.h>
72fa9e4066Sahrens #include <sys/dirent.h>
73fa9e4066Sahrens #include <sys/policy.h>
74fa9e4066Sahrens #include <sys/fs/zfs.h>
75fa9e4066Sahrens #include <sys/zfs_ioctl.h>
76fa9e4066Sahrens #include <sys/mkdev.h>
7722ac5be4Sperrin #include <sys/zil.h>
78c5c6ffa0Smaybee #include <sys/refcount.h>
79c2e6a7d6Sperrin #include <sys/zfs_znode.h>
80c2e6a7d6Sperrin #include <sys/zfs_rlock.h>
81e7cbe64fSgw #include <sys/vdev_disk.h>
82e7cbe64fSgw #include <sys/vdev_impl.h>
83810e43b2SBill Pijewski #include <sys/vdev_raidz.h>
84e7cbe64fSgw #include <sys/zvol.h>
85e7cbe64fSgw #include <sys/dumphdr.h>
861209a471SNeil Perrin #include <sys/zil_impl.h>
8780901aeaSGeorge Wilson #include <sys/dbuf.h>
88810e43b2SBill Pijewski #include <sys/dmu_tx.h>
89810e43b2SBill Pijewski #include <sys/zfeature.h>
90810e43b2SBill Pijewski #include <sys/zio_checksum.h>
911271e4b1SPrakash Surya #include <sys/zil_impl.h>
92*455e370cSJohn Levon #include <sys/ht.h>
93047c81d3SSaso Kiselkov #include <sys/dkioc_free_util.h>
9479315247SMatthew Ahrens #include <sys/zfs_rlock.h>
95fa9e4066Sahrens 
96fa9e4066Sahrens #include "zfs_namecheck.h"
97fa9e4066Sahrens 
98c99e4bdcSChris Kirby void *zfsdev_state;
99503ad85cSMatthew Ahrens static char *zvol_tag = "zvol_tag";
100fa9e4066Sahrens 
101e7cbe64fSgw #define	ZVOL_DUMPSIZE		"dumpsize"
102e7cbe64fSgw 
103fa9e4066Sahrens /*
104c99e4bdcSChris Kirby  * This lock protects the zfsdev_state structure from being modified
105fa9e4066Sahrens  * while it's being used, e.g. an open that comes in before a create
106fa9e4066Sahrens  * finishes.  It also protects temporary opens of the dataset so that,
107fa9e4066Sahrens  * e.g., an open doesn't get a spurious EBUSY.
108fa9e4066Sahrens  */
109c99e4bdcSChris Kirby kmutex_t zfsdev_state_lock;
110fa9e4066Sahrens static uint32_t zvol_minors;
111fa9e4066Sahrens 
112e7cbe64fSgw typedef struct zvol_extent {
11388b7b0f2SMatthew Ahrens 	list_node_t	ze_node;
114e7cbe64fSgw 	dva_t		ze_dva;		/* dva associated with this extent */
11588b7b0f2SMatthew Ahrens 	uint64_t	ze_nblks;	/* number of blocks in extent */
116e7cbe64fSgw } zvol_extent_t;
117e7cbe64fSgw 
118fa9e4066Sahrens /*
119fa9e4066Sahrens  * The in-core state of each volume.
120fa9e4066Sahrens  */
121fa9e4066Sahrens typedef struct zvol_state {
122fa9e4066Sahrens 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
123fa9e4066Sahrens 	uint64_t	zv_volsize;	/* amount of space we advertise */
12467bd71c6Sperrin 	uint64_t	zv_volblocksize; /* volume block size */
125fa9e4066Sahrens 	minor_t		zv_minor;	/* minor number */
126fa9e4066Sahrens 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
127701f66c4SEric Taylor 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
128fa9e4066Sahrens 	objset_t	*zv_objset;	/* objset handle */
129fa9e4066Sahrens 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
130fa9e4066Sahrens 	uint32_t	zv_total_opens;	/* total open count */
13122ac5be4Sperrin 	zilog_t		*zv_zilog;	/* ZIL handle */
13288b7b0f2SMatthew Ahrens 	list_t		zv_extents;	/* List of extents for dump */
13379315247SMatthew Ahrens 	rangelock_t	zv_rangelock;
1348dfe5547SRichard Yao 	dnode_t		*zv_dn;		/* dnode hold */
135fa9e4066Sahrens } zvol_state_t;
136fa9e4066Sahrens 
137e7cbe64fSgw /*
138e7cbe64fSgw  * zvol specific flags
139e7cbe64fSgw  */
140e7cbe64fSgw #define	ZVOL_RDONLY	0x1
141e7cbe64fSgw #define	ZVOL_DUMPIFIED	0x2
142c7f714e2SEric Taylor #define	ZVOL_EXCL	0x4
143701f66c4SEric Taylor #define	ZVOL_WCE	0x8
144e7cbe64fSgw 
14567bd71c6Sperrin /*
14667bd71c6Sperrin  * zvol maximum transfer in one DMU tx.
14767bd71c6Sperrin  */
14867bd71c6Sperrin int zvol_maxphys = DMU_MAX_ACCESS/2;
14967bd71c6Sperrin 
150893c83baSGeorge Wilson /*
151893c83baSGeorge Wilson  * Toggle unmap functionality.
152893c83baSGeorge Wilson  */
153893c83baSGeorge Wilson boolean_t zvol_unmap_enabled = B_TRUE;
154893c83baSGeorge Wilson 
1551c9272b8SStephen Blinick /*
1561c9272b8SStephen Blinick  * If true, unmaps requested as synchronous are executed synchronously,
1571c9272b8SStephen Blinick  * otherwise all unmaps are asynchronous.
1581c9272b8SStephen Blinick  */
1591c9272b8SStephen Blinick boolean_t zvol_unmap_sync_enabled = B_FALSE;
1601c9272b8SStephen Blinick 
16192241e0bSTom Erickson extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
1624445fffbSMatthew Ahrens     nvlist_t *, nvlist_t *);
163681d9761SEric Taylor static int zvol_remove_zv(zvol_state_t *);
1641271e4b1SPrakash Surya static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
1651271e4b1SPrakash Surya     struct lwb *lwb, zio_t *zio);
166e7cbe64fSgw static int zvol_dumpify(zvol_state_t *zv);
167e7cbe64fSgw static int zvol_dump_fini(zvol_state_t *zv);
168e7cbe64fSgw static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
16967bd71c6Sperrin 
170fa9e4066Sahrens static void
171c61ea566SGeorge Wilson zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
172fa9e4066Sahrens {
173c61ea566SGeorge Wilson 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
174fa9e4066Sahrens 
175c61ea566SGeorge Wilson 	zv->zv_volsize = volsize;
176fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
177681d9761SEric Taylor 	    "Size", volsize) == DDI_SUCCESS);
178fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
179681d9761SEric Taylor 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
180e7cbe64fSgw 
181e7cbe64fSgw 	/* Notify specfs to invalidate the cached size */
182e7cbe64fSgw 	spec_size_invalidate(dev, VBLK);
183e7cbe64fSgw 	spec_size_invalidate(dev, VCHR);
184fa9e4066Sahrens }
185fa9e4066Sahrens 
186fa9e4066Sahrens int
187e9dbad6fSeschrock zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
188fa9e4066Sahrens {
189e9dbad6fSeschrock 	if (volsize == 0)
190be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
191fa9e4066Sahrens 
192e9dbad6fSeschrock 	if (volsize % blocksize != 0)
193be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1945c5460e9Seschrock 
195fa9e4066Sahrens #ifdef _ILP32
196e9dbad6fSeschrock 	if (volsize - 1 > SPEC_MAXOFFSET_T)
197be6fd75aSMatthew Ahrens 		return (SET_ERROR(EOVERFLOW));
198fa9e4066Sahrens #endif
199fa9e4066Sahrens 	return (0);
200fa9e4066Sahrens }
201fa9e4066Sahrens 
202fa9e4066Sahrens int
203e9dbad6fSeschrock zvol_check_volblocksize(uint64_t volblocksize)
204fa9e4066Sahrens {
205e9dbad6fSeschrock 	if (volblocksize < SPA_MINBLOCKSIZE ||
206b5152584SMatthew Ahrens 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
207e9dbad6fSeschrock 	    !ISP2(volblocksize))
208be6fd75aSMatthew Ahrens 		return (SET_ERROR(EDOM));
209fa9e4066Sahrens 
210fa9e4066Sahrens 	return (0);
211fa9e4066Sahrens }
212fa9e4066Sahrens 
213fa9e4066Sahrens int
214a2eea2e1Sahrens zvol_get_stats(objset_t *os, nvlist_t *nv)
215fa9e4066Sahrens {
216fa9e4066Sahrens 	int error;
217fa9e4066Sahrens 	dmu_object_info_t doi;
218a2eea2e1Sahrens 	uint64_t val;
219fa9e4066Sahrens 
220a2eea2e1Sahrens 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
221fa9e4066Sahrens 	if (error)
222fa9e4066Sahrens 		return (error);
223fa9e4066Sahrens 
224a2eea2e1Sahrens 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
225a2eea2e1Sahrens 
226fa9e4066Sahrens 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
227fa9e4066Sahrens 
228a2eea2e1Sahrens 	if (error == 0) {
229a2eea2e1Sahrens 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
230a2eea2e1Sahrens 		    doi.doi_data_block_size);
231a2eea2e1Sahrens 	}
232fa9e4066Sahrens 
233fa9e4066Sahrens 	return (error);
234fa9e4066Sahrens }
235fa9e4066Sahrens 
236fa9e4066Sahrens static zvol_state_t *
237e9dbad6fSeschrock zvol_minor_lookup(const char *name)
238fa9e4066Sahrens {
239fa9e4066Sahrens 	minor_t minor;
240fa9e4066Sahrens 	zvol_state_t *zv;
241fa9e4066Sahrens 
242c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
243fa9e4066Sahrens 
244c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
245c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
246fa9e4066Sahrens 		if (zv == NULL)
247fa9e4066Sahrens 			continue;
248fa9e4066Sahrens 		if (strcmp(zv->zv_name, name) == 0)
249f80ce222SChris Kirby 			return (zv);
250fa9e4066Sahrens 	}
251fa9e4066Sahrens 
252f80ce222SChris Kirby 	return (NULL);
253fa9e4066Sahrens }
254fa9e4066Sahrens 
255e7cbe64fSgw /* extent mapping arg */
256e7cbe64fSgw struct maparg {
25788b7b0f2SMatthew Ahrens 	zvol_state_t	*ma_zv;
25888b7b0f2SMatthew Ahrens 	uint64_t	ma_blks;
259e7cbe64fSgw };
260e7cbe64fSgw 
261e7cbe64fSgw /*ARGSUSED*/
262e7cbe64fSgw static int
2631b912ec7SGeorge Wilson zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2647802d7bfSMatthew Ahrens     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
265e7cbe64fSgw {
26688b7b0f2SMatthew Ahrens 	struct maparg *ma = arg;
26788b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
26888b7b0f2SMatthew Ahrens 	int bs = ma->ma_zv->zv_volblocksize;
269e7cbe64fSgw 
270a2cdcdd2SPaul Dagnelie 	if (bp == NULL || BP_IS_HOLE(bp) ||
27143466aaeSMax Grossman 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
27288b7b0f2SMatthew Ahrens 		return (0);
273e7cbe64fSgw 
2745d7b4d43SMatthew Ahrens 	VERIFY(!BP_IS_EMBEDDED(bp));
2755d7b4d43SMatthew Ahrens 
27688b7b0f2SMatthew Ahrens 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
27788b7b0f2SMatthew Ahrens 	ma->ma_blks++;
278e7cbe64fSgw 
27988b7b0f2SMatthew Ahrens 	/* Abort immediately if we have encountered gang blocks */
28088b7b0f2SMatthew Ahrens 	if (BP_IS_GANG(bp))
281be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFRAGS));
282e7cbe64fSgw 
28388b7b0f2SMatthew Ahrens 	/*
28488b7b0f2SMatthew Ahrens 	 * See if the block is at the end of the previous extent.
28588b7b0f2SMatthew Ahrens 	 */
28688b7b0f2SMatthew Ahrens 	ze = list_tail(&ma->ma_zv->zv_extents);
28788b7b0f2SMatthew Ahrens 	if (ze &&
28888b7b0f2SMatthew Ahrens 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
28988b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
29088b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
29188b7b0f2SMatthew Ahrens 		ze->ze_nblks++;
29288b7b0f2SMatthew Ahrens 		return (0);
293e7cbe64fSgw 	}
294e7cbe64fSgw 
29588b7b0f2SMatthew Ahrens 	dprintf_bp(bp, "%s", "next blkptr:");
296e7cbe64fSgw 
29788b7b0f2SMatthew Ahrens 	/* start a new extent */
29888b7b0f2SMatthew Ahrens 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
29988b7b0f2SMatthew Ahrens 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
30088b7b0f2SMatthew Ahrens 	ze->ze_nblks = 1;
30188b7b0f2SMatthew Ahrens 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
30288b7b0f2SMatthew Ahrens 	return (0);
30388b7b0f2SMatthew Ahrens }
304e7cbe64fSgw 
30588b7b0f2SMatthew Ahrens static void
30688b7b0f2SMatthew Ahrens zvol_free_extents(zvol_state_t *zv)
30788b7b0f2SMatthew Ahrens {
30888b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
309e7cbe64fSgw 
31088b7b0f2SMatthew Ahrens 	while (ze = list_head(&zv->zv_extents)) {
31188b7b0f2SMatthew Ahrens 		list_remove(&zv->zv_extents, ze);
31288b7b0f2SMatthew Ahrens 		kmem_free(ze, sizeof (zvol_extent_t));
313e7cbe64fSgw 	}
31488b7b0f2SMatthew Ahrens }
315e7cbe64fSgw 
31688b7b0f2SMatthew Ahrens static int
31788b7b0f2SMatthew Ahrens zvol_get_lbas(zvol_state_t *zv)
31888b7b0f2SMatthew Ahrens {
3193adc9019SEric Taylor 	objset_t *os = zv->zv_objset;
32088b7b0f2SMatthew Ahrens 	struct maparg	ma;
32188b7b0f2SMatthew Ahrens 	int		err;
32288b7b0f2SMatthew Ahrens 
32388b7b0f2SMatthew Ahrens 	ma.ma_zv = zv;
32488b7b0f2SMatthew Ahrens 	ma.ma_blks = 0;
32588b7b0f2SMatthew Ahrens 	zvol_free_extents(zv);
32688b7b0f2SMatthew Ahrens 
3273adc9019SEric Taylor 	/* commit any in-flight changes before traversing the dataset */
3283adc9019SEric Taylor 	txg_wait_synced(dmu_objset_pool(os), 0);
3293adc9019SEric Taylor 	err = traverse_dataset(dmu_objset_ds(os), 0,
33088b7b0f2SMatthew Ahrens 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
33188b7b0f2SMatthew Ahrens 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
33288b7b0f2SMatthew Ahrens 		zvol_free_extents(zv);
33388b7b0f2SMatthew Ahrens 		return (err ? err : EIO);
334e7cbe64fSgw 	}
33588b7b0f2SMatthew Ahrens 
336e7cbe64fSgw 	return (0);
337e7cbe64fSgw }
338e7cbe64fSgw 
339ecd6cf80Smarks /* ARGSUSED */
340fa9e4066Sahrens void
341ecd6cf80Smarks zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
342fa9e4066Sahrens {
343da6c28aaSamw 	zfs_creat_t *zct = arg;
344da6c28aaSamw 	nvlist_t *nvprops = zct->zct_props;
345fa9e4066Sahrens 	int error;
346e9dbad6fSeschrock 	uint64_t volblocksize, volsize;
347fa9e4066Sahrens 
348ecd6cf80Smarks 	VERIFY(nvlist_lookup_uint64(nvprops,
349e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
350ecd6cf80Smarks 	if (nvlist_lookup_uint64(nvprops,
351e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
352e9dbad6fSeschrock 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
353e9dbad6fSeschrock 
354e9dbad6fSeschrock 	/*
355e7cbe64fSgw 	 * These properties must be removed from the list so the generic
356e9dbad6fSeschrock 	 * property setting step won't apply to them.
357e9dbad6fSeschrock 	 */
358ecd6cf80Smarks 	VERIFY(nvlist_remove_all(nvprops,
359e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
360ecd6cf80Smarks 	(void) nvlist_remove_all(nvprops,
361e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
362e9dbad6fSeschrock 
363e9dbad6fSeschrock 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
364fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
365fa9e4066Sahrens 	ASSERT(error == 0);
366fa9e4066Sahrens 
367fa9e4066Sahrens 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
368fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
369fa9e4066Sahrens 	ASSERT(error == 0);
370fa9e4066Sahrens 
371e9dbad6fSeschrock 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
372fa9e4066Sahrens 	ASSERT(error == 0);
373fa9e4066Sahrens }
374fa9e4066Sahrens 
375b77b9231SDan McDonald /*
376b77b9231SDan McDonald  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
377b77b9231SDan McDonald  * implement DKIOCFREE/free-long-range.
378b77b9231SDan McDonald  */
379b77b9231SDan McDonald static int
3803f7978d0SAlan Somers zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
381b77b9231SDan McDonald {
3823f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
3833f7978d0SAlan Somers 	lr_truncate_t *lr = arg2;
384b77b9231SDan McDonald 	uint64_t offset, length;
385b77b9231SDan McDonald 
386b77b9231SDan McDonald 	if (byteswap)
387b77b9231SDan McDonald 		byteswap_uint64_array(lr, sizeof (*lr));
388b77b9231SDan McDonald 
389b77b9231SDan McDonald 	offset = lr->lr_offset;
390b77b9231SDan McDonald 	length = lr->lr_length;
391b77b9231SDan McDonald 
392b77b9231SDan McDonald 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
393b77b9231SDan McDonald }
394b77b9231SDan McDonald 
39522ac5be4Sperrin /*
39622ac5be4Sperrin  * Replay a TX_WRITE ZIL transaction that didn't get committed
39722ac5be4Sperrin  * after a system failure
39822ac5be4Sperrin  */
39922ac5be4Sperrin static int
4003f7978d0SAlan Somers zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
40122ac5be4Sperrin {
4023f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
4033f7978d0SAlan Somers 	lr_write_t *lr = arg2;
40422ac5be4Sperrin 	objset_t *os = zv->zv_objset;
40522ac5be4Sperrin 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
406b24ab676SJeff Bonwick 	uint64_t offset, length;
40722ac5be4Sperrin 	dmu_tx_t *tx;
40822ac5be4Sperrin 	int error;
40922ac5be4Sperrin 
41022ac5be4Sperrin 	if (byteswap)
41122ac5be4Sperrin 		byteswap_uint64_array(lr, sizeof (*lr));
41222ac5be4Sperrin 
413b24ab676SJeff Bonwick 	offset = lr->lr_offset;
414b24ab676SJeff Bonwick 	length = lr->lr_length;
415b24ab676SJeff Bonwick 
416b24ab676SJeff Bonwick 	/* If it's a dmu_sync() block, write the whole block */
417b24ab676SJeff Bonwick 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
418b24ab676SJeff Bonwick 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
419b24ab676SJeff Bonwick 		if (length < blocksize) {
420b24ab676SJeff Bonwick 			offset -= offset % blocksize;
421b24ab676SJeff Bonwick 			length = blocksize;
422b24ab676SJeff Bonwick 		}
423b24ab676SJeff Bonwick 	}
424975c32a0SNeil Perrin 
42522ac5be4Sperrin 	tx = dmu_tx_create(os);
426b24ab676SJeff Bonwick 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
4271209a471SNeil Perrin 	error = dmu_tx_assign(tx, TXG_WAIT);
42822ac5be4Sperrin 	if (error) {
42922ac5be4Sperrin 		dmu_tx_abort(tx);
43022ac5be4Sperrin 	} else {
431b24ab676SJeff Bonwick 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
43222ac5be4Sperrin 		dmu_tx_commit(tx);
43322ac5be4Sperrin 	}
43422ac5be4Sperrin 
43522ac5be4Sperrin 	return (error);
43622ac5be4Sperrin }
43722ac5be4Sperrin 
43822ac5be4Sperrin /* ARGSUSED */
43922ac5be4Sperrin static int
4403f7978d0SAlan Somers zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
44122ac5be4Sperrin {
442be6fd75aSMatthew Ahrens 	return (SET_ERROR(ENOTSUP));
44322ac5be4Sperrin }
44422ac5be4Sperrin 
44522ac5be4Sperrin /*
44622ac5be4Sperrin  * Callback vectors for replaying records.
447b77b9231SDan McDonald  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
44822ac5be4Sperrin  */
44922ac5be4Sperrin zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
45022ac5be4Sperrin 	zvol_replay_err,	/* 0 no such transaction type */
45122ac5be4Sperrin 	zvol_replay_err,	/* TX_CREATE */
45222ac5be4Sperrin 	zvol_replay_err,	/* TX_MKDIR */
45322ac5be4Sperrin 	zvol_replay_err,	/* TX_MKXATTR */
45422ac5be4Sperrin 	zvol_replay_err,	/* TX_SYMLINK */
45522ac5be4Sperrin 	zvol_replay_err,	/* TX_REMOVE */
45622ac5be4Sperrin 	zvol_replay_err,	/* TX_RMDIR */
45722ac5be4Sperrin 	zvol_replay_err,	/* TX_LINK */
45822ac5be4Sperrin 	zvol_replay_err,	/* TX_RENAME */
45922ac5be4Sperrin 	zvol_replay_write,	/* TX_WRITE */
460b77b9231SDan McDonald 	zvol_replay_truncate,	/* TX_TRUNCATE */
46122ac5be4Sperrin 	zvol_replay_err,	/* TX_SETATTR */
46222ac5be4Sperrin 	zvol_replay_err,	/* TX_ACL */
463975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL */
464975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ATTR */
465975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
466975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL */
467975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ATTR */
468975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
469975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_WRITE2 */
47022ac5be4Sperrin };
47122ac5be4Sperrin 
472681d9761SEric Taylor int
473681d9761SEric Taylor zvol_name2minor(const char *name, minor_t *minor)
474681d9761SEric Taylor {
475681d9761SEric Taylor 	zvol_state_t *zv;
476681d9761SEric Taylor 
477c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
478681d9761SEric Taylor 	zv = zvol_minor_lookup(name);
479681d9761SEric Taylor 	if (minor && zv)
480681d9761SEric Taylor 		*minor = zv->zv_minor;
481c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
482681d9761SEric Taylor 	return (zv ? 0 : -1);
483681d9761SEric Taylor }
484681d9761SEric Taylor 
485e7cbe64fSgw /*
486e7cbe64fSgw  * Create a minor node (plus a whole lot more) for the specified volume.
487fa9e4066Sahrens  */
488fa9e4066Sahrens int
489681d9761SEric Taylor zvol_create_minor(const char *name)
490fa9e4066Sahrens {
491c99e4bdcSChris Kirby 	zfs_soft_state_t *zs;
492fa9e4066Sahrens 	zvol_state_t *zv;
493fa9e4066Sahrens 	objset_t *os;
49467bd71c6Sperrin 	dmu_object_info_t doi;
495fa9e4066Sahrens 	minor_t minor = 0;
496fa9e4066Sahrens 	char chrbuf[30], blkbuf[30];
497fa9e4066Sahrens 	int error;
498fa9e4066Sahrens 
499c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
500fa9e4066Sahrens 
5011195e687SMark J Musante 	if (zvol_minor_lookup(name) != NULL) {
502c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
503be6fd75aSMatthew Ahrens 		return (SET_ERROR(EEXIST));
504fa9e4066Sahrens 	}
505fa9e4066Sahrens 
506503ad85cSMatthew Ahrens 	/* lie and say we're read-only */
5076e0cbcaaSMatthew Ahrens 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
508fa9e4066Sahrens 
509fa9e4066Sahrens 	if (error) {
510c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
511fa9e4066Sahrens 		return (error);
512fa9e4066Sahrens 	}
513fa9e4066Sahrens 
514c99e4bdcSChris Kirby 	if ((minor = zfsdev_minor_alloc()) == 0) {
5156e0cbcaaSMatthew Ahrens 		dmu_objset_disown(os, FTAG);
516c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
517be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
518fa9e4066Sahrens 	}
519fa9e4066Sahrens 
520c99e4bdcSChris Kirby 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
5216e0cbcaaSMatthew Ahrens 		dmu_objset_disown(os, FTAG);
522c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
523be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
524fa9e4066Sahrens 	}
525e9dbad6fSeschrock 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
526e9dbad6fSeschrock 	    (char *)name);
527fa9e4066Sahrens 
528681d9761SEric Taylor 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
529fa9e4066Sahrens 
530fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
531fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
532c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
5336e0cbcaaSMatthew Ahrens 		dmu_objset_disown(os, FTAG);
534c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
535be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
536fa9e4066Sahrens 	}
537fa9e4066Sahrens 
538681d9761SEric Taylor 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
539fa9e4066Sahrens 
540fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
541fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
542fa9e4066Sahrens 		ddi_remove_minor_node(zfs_dip, chrbuf);
543c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
5446e0cbcaaSMatthew Ahrens 		dmu_objset_disown(os, FTAG);
545c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
546be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
547fa9e4066Sahrens 	}
548fa9e4066Sahrens 
549c99e4bdcSChris Kirby 	zs = ddi_get_soft_state(zfsdev_state, minor);
550c99e4bdcSChris Kirby 	zs->zss_type = ZSST_ZVOL;
551c99e4bdcSChris Kirby 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
552681d9761SEric Taylor 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
553fa9e4066Sahrens 	zv->zv_min_bs = DEV_BSHIFT;
554fa9e4066Sahrens 	zv->zv_minor = minor;
555fa9e4066Sahrens 	zv->zv_objset = os;
556f9af39baSGeorge Wilson 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
557681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
55879315247SMatthew Ahrens 	rangelock_init(&zv->zv_rangelock, NULL, NULL);
55988b7b0f2SMatthew Ahrens 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
56088b7b0f2SMatthew Ahrens 	    offsetof(zvol_extent_t, ze_node));
56167bd71c6Sperrin 	/* get and cache the blocksize */
56267bd71c6Sperrin 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
56367bd71c6Sperrin 	ASSERT(error == 0);
56467bd71c6Sperrin 	zv->zv_volblocksize = doi.doi_data_block_size;
56522ac5be4Sperrin 
566f9af39baSGeorge Wilson 	if (spa_writeable(dmu_objset_spa(os))) {
567f9af39baSGeorge Wilson 		if (zil_replay_disable)
568f9af39baSGeorge Wilson 			zil_destroy(dmu_objset_zil(os), B_FALSE);
569f9af39baSGeorge Wilson 		else
570f9af39baSGeorge Wilson 			zil_replay(os, zv, zvol_replay_vector);
571f9af39baSGeorge Wilson 	}
5726e0cbcaaSMatthew Ahrens 	dmu_objset_disown(os, FTAG);
573681d9761SEric Taylor 	zv->zv_objset = NULL;
574fa9e4066Sahrens 
575fa9e4066Sahrens 	zvol_minors++;
576fa9e4066Sahrens 
577c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
578fa9e4066Sahrens 
579fa9e4066Sahrens 	return (0);
580fa9e4066Sahrens }
581fa9e4066Sahrens 
582fa9e4066Sahrens /*
583fa9e4066Sahrens  * Remove minor node for the specified volume.
584fa9e4066Sahrens  */
585681d9761SEric Taylor static int
586681d9761SEric Taylor zvol_remove_zv(zvol_state_t *zv)
587681d9761SEric Taylor {
588681d9761SEric Taylor 	char nmbuf[20];
589c99e4bdcSChris Kirby 	minor_t minor = zv->zv_minor;
590681d9761SEric Taylor 
591c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
592681d9761SEric Taylor 	if (zv->zv_total_opens != 0)
593be6fd75aSMatthew Ahrens 		return (SET_ERROR(EBUSY));
594681d9761SEric Taylor 
595c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
596681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
597681d9761SEric Taylor 
598c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
599681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
600681d9761SEric Taylor 
60179315247SMatthew Ahrens 	rangelock_fini(&zv->zv_rangelock);
602681d9761SEric Taylor 
603c99e4bdcSChris Kirby 	kmem_free(zv, sizeof (zvol_state_t));
604c99e4bdcSChris Kirby 
605c99e4bdcSChris Kirby 	ddi_soft_state_free(zfsdev_state, minor);
606681d9761SEric Taylor 
607681d9761SEric Taylor 	zvol_minors--;
608681d9761SEric Taylor 	return (0);
609681d9761SEric Taylor }
610681d9761SEric Taylor 
611fa9e4066Sahrens int
612e9dbad6fSeschrock zvol_remove_minor(const char *name)
613fa9e4066Sahrens {
614fa9e4066Sahrens 	zvol_state_t *zv;
615681d9761SEric Taylor 	int rc;
616fa9e4066Sahrens 
617c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
618e9dbad6fSeschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
619c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
620be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
621fa9e4066Sahrens 	}
622681d9761SEric Taylor 	rc = zvol_remove_zv(zv);
623c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
624681d9761SEric Taylor 	return (rc);
625681d9761SEric Taylor }
626fa9e4066Sahrens 
627681d9761SEric Taylor int
628681d9761SEric Taylor zvol_first_open(zvol_state_t *zv)
629681d9761SEric Taylor {
630681d9761SEric Taylor 	objset_t *os;
631681d9761SEric Taylor 	uint64_t volsize;
632681d9761SEric Taylor 	int error;
633681d9761SEric Taylor 	uint64_t readonly;
634fa9e4066Sahrens 
635681d9761SEric Taylor 	/* lie and say we're read-only */
636681d9761SEric Taylor 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
637681d9761SEric Taylor 	    zvol_tag, &os);
638681d9761SEric Taylor 	if (error)
639681d9761SEric Taylor 		return (error);
640fa9e4066Sahrens 
641c61ea566SGeorge Wilson 	zv->zv_objset = os;
642681d9761SEric Taylor 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
643681d9761SEric Taylor 	if (error) {
644681d9761SEric Taylor 		ASSERT(error == 0);
645681d9761SEric Taylor 		dmu_objset_disown(os, zvol_tag);
646681d9761SEric Taylor 		return (error);
647681d9761SEric Taylor 	}
648c61ea566SGeorge Wilson 
6498dfe5547SRichard Yao 	error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
65094d1a210STim Haley 	if (error) {
65194d1a210STim Haley 		dmu_objset_disown(os, zvol_tag);
65294d1a210STim Haley 		return (error);
65394d1a210STim Haley 	}
654c61ea566SGeorge Wilson 
655c61ea566SGeorge Wilson 	zvol_size_changed(zv, volsize);
656681d9761SEric Taylor 	zv->zv_zilog = zil_open(os, zvol_get_data);
657fa9e4066Sahrens 
658681d9761SEric Taylor 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
659681d9761SEric Taylor 	    NULL) == 0);
660f9af39baSGeorge Wilson 	if (readonly || dmu_objset_is_snapshot(os) ||
661f9af39baSGeorge Wilson 	    !spa_writeable(dmu_objset_spa(os)))
662681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
663681d9761SEric Taylor 	else
664681d9761SEric Taylor 		zv->zv_flags &= ~ZVOL_RDONLY;
665681d9761SEric Taylor 	return (error);
666681d9761SEric Taylor }
667fa9e4066Sahrens 
668681d9761SEric Taylor void
669681d9761SEric Taylor zvol_last_close(zvol_state_t *zv)
670681d9761SEric Taylor {
67122ac5be4Sperrin 	zil_close(zv->zv_zilog);
67222ac5be4Sperrin 	zv->zv_zilog = NULL;
6732e2c1355SMatthew Ahrens 
6748dfe5547SRichard Yao 	dnode_rele(zv->zv_dn, zvol_tag);
6758dfe5547SRichard Yao 	zv->zv_dn = NULL;
6762e2c1355SMatthew Ahrens 
6772e2c1355SMatthew Ahrens 	/*
6782e2c1355SMatthew Ahrens 	 * Evict cached data
6792e2c1355SMatthew Ahrens 	 */
6802e2c1355SMatthew Ahrens 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
6812e2c1355SMatthew Ahrens 	    !(zv->zv_flags & ZVOL_RDONLY))
6822e2c1355SMatthew Ahrens 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
6833b2aab18SMatthew Ahrens 	dmu_objset_evict_dbufs(zv->zv_objset);
6842e2c1355SMatthew Ahrens 
685503ad85cSMatthew Ahrens 	dmu_objset_disown(zv->zv_objset, zvol_tag);
686fa9e4066Sahrens 	zv->zv_objset = NULL;
687fa9e4066Sahrens }
688fa9e4066Sahrens 
689e7cbe64fSgw int
690e7cbe64fSgw zvol_prealloc(zvol_state_t *zv)
691e7cbe64fSgw {
692e7cbe64fSgw 	objset_t *os = zv->zv_objset;
693e7cbe64fSgw 	dmu_tx_t *tx;
694e7cbe64fSgw 	uint64_t refd, avail, usedobjs, availobjs;
695e7cbe64fSgw 	uint64_t resid = zv->zv_volsize;
696e7cbe64fSgw 	uint64_t off = 0;
697e7cbe64fSgw 
698e7cbe64fSgw 	/* Check the space usage before attempting to allocate the space */
699e7cbe64fSgw 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
700e7cbe64fSgw 	if (avail < zv->zv_volsize)
701be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOSPC));
702e7cbe64fSgw 
703e7cbe64fSgw 	/* Free old extents if they exist */
704e7cbe64fSgw 	zvol_free_extents(zv);
705e7cbe64fSgw 
706e7cbe64fSgw 	while (resid != 0) {
707e7cbe64fSgw 		int error;
708b5152584SMatthew Ahrens 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
709e7cbe64fSgw 
710e7cbe64fSgw 		tx = dmu_tx_create(os);
711e7cbe64fSgw 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
712e7cbe64fSgw 		error = dmu_tx_assign(tx, TXG_WAIT);
713e7cbe64fSgw 		if (error) {
714e7cbe64fSgw 			dmu_tx_abort(tx);
715cdb0ab79Smaybee 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
716e7cbe64fSgw 			return (error);
717e7cbe64fSgw 		}
71882c9918fSTim Haley 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
719e7cbe64fSgw 		dmu_tx_commit(tx);
720e7cbe64fSgw 		off += bytes;
721e7cbe64fSgw 		resid -= bytes;
722e7cbe64fSgw 	}
723e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
724e7cbe64fSgw 
725e7cbe64fSgw 	return (0);
726e7cbe64fSgw }
727e7cbe64fSgw 
7283b2aab18SMatthew Ahrens static int
729681d9761SEric Taylor zvol_update_volsize(objset_t *os, uint64_t volsize)
730e7cbe64fSgw {
731e7cbe64fSgw 	dmu_tx_t *tx;
732e7cbe64fSgw 	int error;
733e7cbe64fSgw 
734c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
735e7cbe64fSgw 
736681d9761SEric Taylor 	tx = dmu_tx_create(os);
737e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
7384bb73804SMatthew Ahrens 	dmu_tx_mark_netfree(tx);
739e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
740e7cbe64fSgw 	if (error) {
741e7cbe64fSgw 		dmu_tx_abort(tx);
742e7cbe64fSgw 		return (error);
743e7cbe64fSgw 	}
744e7cbe64fSgw 
745681d9761SEric Taylor 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
746e7cbe64fSgw 	    &volsize, tx);
747e7cbe64fSgw 	dmu_tx_commit(tx);
748e7cbe64fSgw 
749e7cbe64fSgw 	if (error == 0)
750681d9761SEric Taylor 		error = dmu_free_long_range(os,
751cdb0ab79Smaybee 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
752681d9761SEric Taylor 	return (error);
753681d9761SEric Taylor }
754e7cbe64fSgw 
755681d9761SEric Taylor void
756681d9761SEric Taylor zvol_remove_minors(const char *name)
757681d9761SEric Taylor {
758681d9761SEric Taylor 	zvol_state_t *zv;
759681d9761SEric Taylor 	char *namebuf;
760681d9761SEric Taylor 	minor_t minor;
761681d9761SEric Taylor 
762681d9761SEric Taylor 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
763681d9761SEric Taylor 	(void) strncpy(namebuf, name, strlen(name));
764681d9761SEric Taylor 	(void) strcat(namebuf, "/");
765c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
766c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
767681d9761SEric Taylor 
768c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
769681d9761SEric Taylor 		if (zv == NULL)
770681d9761SEric Taylor 			continue;
771681d9761SEric Taylor 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
772681d9761SEric Taylor 			(void) zvol_remove_zv(zv);
773e7cbe64fSgw 	}
774681d9761SEric Taylor 	kmem_free(namebuf, strlen(name) + 2);
775681d9761SEric Taylor 
776c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
777e7cbe64fSgw }
778e7cbe64fSgw 
779c61ea566SGeorge Wilson static int
7803b2aab18SMatthew Ahrens zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
781fa9e4066Sahrens {
782e7cbe64fSgw 	uint64_t old_volsize = 0ULL;
7833b2aab18SMatthew Ahrens 	int error = 0;
784fa9e4066Sahrens 
785c61ea566SGeorge Wilson 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
786c61ea566SGeorge Wilson 
787e7cbe64fSgw 	/*
788e7cbe64fSgw 	 * Reinitialize the dump area to the new size. If we
789681d9761SEric Taylor 	 * failed to resize the dump area then restore it back to
790c61ea566SGeorge Wilson 	 * its original size.  We must set the new volsize prior
791c61ea566SGeorge Wilson 	 * to calling dumpvp_resize() to ensure that the devices'
792c61ea566SGeorge Wilson 	 * size(9P) is not visible by the dump subsystem.
793e7cbe64fSgw 	 */
7943b2aab18SMatthew Ahrens 	old_volsize = zv->zv_volsize;
7953b2aab18SMatthew Ahrens 	zvol_size_changed(zv, volsize);
7963b2aab18SMatthew Ahrens 
7973b2aab18SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
7983b2aab18SMatthew Ahrens 		if ((error = zvol_dumpify(zv)) != 0 ||
7993b2aab18SMatthew Ahrens 		    (error = dumpvp_resize()) != 0) {
8003b2aab18SMatthew Ahrens 			int dumpify_error;
8013b2aab18SMatthew Ahrens 
8023b2aab18SMatthew Ahrens 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
8033b2aab18SMatthew Ahrens 			zvol_size_changed(zv, old_volsize);
8043b2aab18SMatthew Ahrens 			dumpify_error = zvol_dumpify(zv);
8053b2aab18SMatthew Ahrens 			error = dumpify_error ? dumpify_error : error;
806681d9761SEric Taylor 		}
807fa9e4066Sahrens 	}
808fa9e4066Sahrens 
809573ca77eSGeorge Wilson 	/*
810573ca77eSGeorge Wilson 	 * Generate a LUN expansion event.
811573ca77eSGeorge Wilson 	 */
8123b2aab18SMatthew Ahrens 	if (error == 0) {
813573ca77eSGeorge Wilson 		sysevent_id_t eid;
814573ca77eSGeorge Wilson 		nvlist_t *attr;
815573ca77eSGeorge Wilson 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
816573ca77eSGeorge Wilson 
817681d9761SEric Taylor 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
818573ca77eSGeorge Wilson 		    zv->zv_minor);
819573ca77eSGeorge Wilson 
820573ca77eSGeorge Wilson 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
821573ca77eSGeorge Wilson 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
822573ca77eSGeorge Wilson 
823573ca77eSGeorge Wilson 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
824573ca77eSGeorge Wilson 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
825573ca77eSGeorge Wilson 
826573ca77eSGeorge Wilson 		nvlist_free(attr);
827573ca77eSGeorge Wilson 		kmem_free(physpath, MAXPATHLEN);
828573ca77eSGeorge Wilson 	}
829c61ea566SGeorge Wilson 	return (error);
830c61ea566SGeorge Wilson }
831573ca77eSGeorge Wilson 
832c61ea566SGeorge Wilson int
833c61ea566SGeorge Wilson zvol_set_volsize(const char *name, uint64_t volsize)
834c61ea566SGeorge Wilson {
835c61ea566SGeorge Wilson 	zvol_state_t *zv = NULL;
836c61ea566SGeorge Wilson 	objset_t *os;
837c61ea566SGeorge Wilson 	int error;
838c61ea566SGeorge Wilson 	dmu_object_info_t doi;
839c61ea566SGeorge Wilson 	uint64_t readonly;
8403b2aab18SMatthew Ahrens 	boolean_t owned = B_FALSE;
8413b2aab18SMatthew Ahrens 
8423b2aab18SMatthew Ahrens 	error = dsl_prop_get_integer(name,
8433b2aab18SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
8443b2aab18SMatthew Ahrens 	if (error != 0)
8453b2aab18SMatthew Ahrens 		return (error);
8463b2aab18SMatthew Ahrens 	if (readonly)
847be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
848c61ea566SGeorge Wilson 
849c61ea566SGeorge Wilson 	mutex_enter(&zfsdev_state_lock);
850c61ea566SGeorge Wilson 	zv = zvol_minor_lookup(name);
8513b2aab18SMatthew Ahrens 
8523b2aab18SMatthew Ahrens 	if (zv == NULL || zv->zv_objset == NULL) {
8533b2aab18SMatthew Ahrens 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
8543b2aab18SMatthew Ahrens 		    FTAG, &os)) != 0) {
8553b2aab18SMatthew Ahrens 			mutex_exit(&zfsdev_state_lock);
8563b2aab18SMatthew Ahrens 			return (error);
8573b2aab18SMatthew Ahrens 		}
8583b2aab18SMatthew Ahrens 		owned = B_TRUE;
8593b2aab18SMatthew Ahrens 		if (zv != NULL)
8603b2aab18SMatthew Ahrens 			zv->zv_objset = os;
8613b2aab18SMatthew Ahrens 	} else {
8623b2aab18SMatthew Ahrens 		os = zv->zv_objset;
863c61ea566SGeorge Wilson 	}
864c61ea566SGeorge Wilson 
865c61ea566SGeorge Wilson 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
8663b2aab18SMatthew Ahrens 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
867c61ea566SGeorge Wilson 		goto out;
868c61ea566SGeorge Wilson 
8693b2aab18SMatthew Ahrens 	error = zvol_update_volsize(os, volsize);
870c61ea566SGeorge Wilson 
8713b2aab18SMatthew Ahrens 	if (error == 0 && zv != NULL)
8723b2aab18SMatthew Ahrens 		error = zvol_update_live_volsize(zv, volsize);
873bb0ade09Sahrens out:
8743b2aab18SMatthew Ahrens 	if (owned) {
8753b2aab18SMatthew Ahrens 		dmu_objset_disown(os, FTAG);
8763b2aab18SMatthew Ahrens 		if (zv != NULL)
8773b2aab18SMatthew Ahrens 			zv->zv_objset = NULL;
8783b2aab18SMatthew Ahrens 	}
879c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
880fa9e4066Sahrens 	return (error);
881fa9e4066Sahrens }
882fa9e4066Sahrens 
883fa9e4066Sahrens /*ARGSUSED*/
884fa9e4066Sahrens int
885fa9e4066Sahrens zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
886fa9e4066Sahrens {
887fa9e4066Sahrens 	zvol_state_t *zv;
888681d9761SEric Taylor 	int err = 0;
889fa9e4066Sahrens 
890c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
891fa9e4066Sahrens 
892c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
893fa9e4066Sahrens 	if (zv == NULL) {
894c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
895be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
896fa9e4066Sahrens 	}
897fa9e4066Sahrens 
898681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
899681d9761SEric Taylor 		err = zvol_first_open(zv);
900681d9761SEric Taylor 	if (err) {
901c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
902681d9761SEric Taylor 		return (err);
903681d9761SEric Taylor 	}
904681d9761SEric Taylor 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
905be6fd75aSMatthew Ahrens 		err = SET_ERROR(EROFS);
906681d9761SEric Taylor 		goto out;
907fa9e4066Sahrens 	}
908c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
909be6fd75aSMatthew Ahrens 		err = SET_ERROR(EBUSY);
910681d9761SEric Taylor 		goto out;
911c7f714e2SEric Taylor 	}
912c7f714e2SEric Taylor 	if (flag & FEXCL) {
913c7f714e2SEric Taylor 		if (zv->zv_total_opens != 0) {
914be6fd75aSMatthew Ahrens 			err = SET_ERROR(EBUSY);
915681d9761SEric Taylor 			goto out;
916c7f714e2SEric Taylor 		}
917c7f714e2SEric Taylor 		zv->zv_flags |= ZVOL_EXCL;
918c7f714e2SEric Taylor 	}
919fa9e4066Sahrens 
920fa9e4066Sahrens 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
921fa9e4066Sahrens 		zv->zv_open_count[otyp]++;
922fa9e4066Sahrens 		zv->zv_total_opens++;
923fa9e4066Sahrens 	}
924c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
925fa9e4066Sahrens 
926681d9761SEric Taylor 	return (err);
927681d9761SEric Taylor out:
928681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
929681d9761SEric Taylor 		zvol_last_close(zv);
930c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
931681d9761SEric Taylor 	return (err);
932fa9e4066Sahrens }
933fa9e4066Sahrens 
934fa9e4066Sahrens /*ARGSUSED*/
935fa9e4066Sahrens int
936fa9e4066Sahrens zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
937fa9e4066Sahrens {
938fa9e4066Sahrens 	minor_t minor = getminor(dev);
939fa9e4066Sahrens 	zvol_state_t *zv;
940681d9761SEric Taylor 	int error = 0;
941fa9e4066Sahrens 
942c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
943fa9e4066Sahrens 
944c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
945fa9e4066Sahrens 	if (zv == NULL) {
946c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
947be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
948fa9e4066Sahrens 	}
949fa9e4066Sahrens 
950c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
951c7f714e2SEric Taylor 		ASSERT(zv->zv_total_opens == 1);
952c7f714e2SEric Taylor 		zv->zv_flags &= ~ZVOL_EXCL;
953fa9e4066Sahrens 	}
954fa9e4066Sahrens 
955fa9e4066Sahrens 	/*
956fa9e4066Sahrens 	 * If the open count is zero, this is a spurious close.
957fa9e4066Sahrens 	 * That indicates a bug in the kernel / DDI framework.
958fa9e4066Sahrens 	 */
959fa9e4066Sahrens 	ASSERT(zv->zv_open_count[otyp] != 0);
960fa9e4066Sahrens 	ASSERT(zv->zv_total_opens != 0);
961fa9e4066Sahrens 
962fa9e4066Sahrens 	/*
963fa9e4066Sahrens 	 * You may get multiple opens, but only one close.
964fa9e4066Sahrens 	 */
965fa9e4066Sahrens 	zv->zv_open_count[otyp]--;
966fa9e4066Sahrens 	zv->zv_total_opens--;
967fa9e4066Sahrens 
968681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
969681d9761SEric Taylor 		zvol_last_close(zv);
970fa9e4066Sahrens 
971c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
972681d9761SEric Taylor 	return (error);
973fa9e4066Sahrens }
974fa9e4066Sahrens 
975cab3a55eSPrakash Surya /* ARGSUSED */
976feb08c6bSbillm static void
977b24ab676SJeff Bonwick zvol_get_done(zgd_t *zgd, int error)
97867bd71c6Sperrin {
979b24ab676SJeff Bonwick 	if (zgd->zgd_db)
980b24ab676SJeff Bonwick 		dmu_buf_rele(zgd->zgd_db, zgd);
981b24ab676SJeff Bonwick 
98279315247SMatthew Ahrens 	rangelock_exit(zgd->zgd_lr);
983b24ab676SJeff Bonwick 
98467bd71c6Sperrin 	kmem_free(zgd, sizeof (zgd_t));
98567bd71c6Sperrin }
98667bd71c6Sperrin 
98767bd71c6Sperrin /*
98867bd71c6Sperrin  * Get data to generate a TX_WRITE intent log record.
98967bd71c6Sperrin  */
990feb08c6bSbillm static int
9911271e4b1SPrakash Surya zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
99267bd71c6Sperrin {
99367bd71c6Sperrin 	zvol_state_t *zv = arg;
994b24ab676SJeff Bonwick 	uint64_t offset = lr->lr_offset;
995b24ab676SJeff Bonwick 	uint64_t size = lr->lr_length;	/* length of user data */
99667bd71c6Sperrin 	dmu_buf_t *db;
99767bd71c6Sperrin 	zgd_t *zgd;
99867bd71c6Sperrin 	int error;
99967bd71c6Sperrin 
10001271e4b1SPrakash Surya 	ASSERT3P(lwb, !=, NULL);
10011271e4b1SPrakash Surya 	ASSERT3P(zio, !=, NULL);
10021271e4b1SPrakash Surya 	ASSERT3U(size, !=, 0);
1003b24ab676SJeff Bonwick 
1004b24ab676SJeff Bonwick 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
10051271e4b1SPrakash Surya 	zgd->zgd_lwb = lwb;
1006feb08c6bSbillm 
1007c2e6a7d6Sperrin 	/*
1008c2e6a7d6Sperrin 	 * Write records come in two flavors: immediate and indirect.
1009c2e6a7d6Sperrin 	 * For small writes it's cheaper to store the data with the
1010c2e6a7d6Sperrin 	 * log record (immediate); for large writes it's cheaper to
1011c2e6a7d6Sperrin 	 * sync the data and get a pointer to it (indirect) so that
1012c2e6a7d6Sperrin 	 * we don't have to write the data twice.
1013c2e6a7d6Sperrin 	 */
101442b14111SLOLi 	if (buf != NULL) { /* immediate write */
101579315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
101642b14111SLOLi 		    RL_READER);
10178dfe5547SRichard Yao 		error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
1018b24ab676SJeff Bonwick 		    DMU_READ_NO_PREFETCH);
101942b14111SLOLi 	} else { /* indirect write */
102042b14111SLOLi 		/*
102142b14111SLOLi 		 * Have to lock the whole block to ensure when it's written out
102242b14111SLOLi 		 * and its checksum is being calculated that no one can change
102342b14111SLOLi 		 * the data. Contrarily to zfs_get_data we need not re-check
102442b14111SLOLi 		 * blocksize after we get the lock because it cannot be changed.
102542b14111SLOLi 		 */
1026b24ab676SJeff Bonwick 		size = zv->zv_volblocksize;
1027b24ab676SJeff Bonwick 		offset = P2ALIGN(offset, size);
102879315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
102942b14111SLOLi 		    RL_READER);
10308dfe5547SRichard Yao 		error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
103147cb52daSJeff Bonwick 		    DMU_READ_NO_PREFETCH);
1032b24ab676SJeff Bonwick 		if (error == 0) {
1033b7edcb94SMatthew Ahrens 			blkptr_t *bp = &lr->lr_blkptr;
103480901aeaSGeorge Wilson 
1035b24ab676SJeff Bonwick 			zgd->zgd_db = db;
1036b24ab676SJeff Bonwick 			zgd->zgd_bp = bp;
103767bd71c6Sperrin 
1038b24ab676SJeff Bonwick 			ASSERT(db->db_offset == offset);
1039b24ab676SJeff Bonwick 			ASSERT(db->db_size == size);
104067bd71c6Sperrin 
1041b24ab676SJeff Bonwick 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1042b24ab676SJeff Bonwick 			    zvol_get_done, zgd);
1043975c32a0SNeil Perrin 
1044b24ab676SJeff Bonwick 			if (error == 0)
1045b24ab676SJeff Bonwick 				return (0);
1046b24ab676SJeff Bonwick 		}
1047975c32a0SNeil Perrin 	}
1048975c32a0SNeil Perrin 
1049b24ab676SJeff Bonwick 	zvol_get_done(zgd, error);
1050b24ab676SJeff Bonwick 
105167bd71c6Sperrin 	return (error);
105267bd71c6Sperrin }
105367bd71c6Sperrin 
1054a24e15ceSperrin /*
1055a24e15ceSperrin  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
105622ac5be4Sperrin  *
105722ac5be4Sperrin  * We store data in the log buffers if it's small enough.
105867bd71c6Sperrin  * Otherwise we will later flush the data out via dmu_sync().
105922ac5be4Sperrin  */
106067bd71c6Sperrin ssize_t zvol_immediate_write_sz = 32768;
106122ac5be4Sperrin 
1062feb08c6bSbillm static void
1063510b6c0eSNeil Perrin zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1064510b6c0eSNeil Perrin     boolean_t sync)
106522ac5be4Sperrin {
1066feb08c6bSbillm 	uint32_t blocksize = zv->zv_volblocksize;
10671209a471SNeil Perrin 	zilog_t *zilog = zv->zv_zilog;
1068c5ee4681SAlexander Motin 	itx_wr_state_t write_state;
1069510b6c0eSNeil Perrin 
1070b24ab676SJeff Bonwick 	if (zil_replaying(zilog, tx))
10711209a471SNeil Perrin 		return;
10721209a471SNeil Perrin 
1073c5ee4681SAlexander Motin 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1074c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1075c5ee4681SAlexander Motin 	else if (!spa_has_slogs(zilog->zl_spa) &&
1076c5ee4681SAlexander Motin 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1077c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1078c5ee4681SAlexander Motin 	else if (sync)
1079c5ee4681SAlexander Motin 		write_state = WR_COPIED;
1080c5ee4681SAlexander Motin 	else
1081c5ee4681SAlexander Motin 		write_state = WR_NEED_COPY;
1082feb08c6bSbillm 
1083510b6c0eSNeil Perrin 	while (resid) {
1084510b6c0eSNeil Perrin 		itx_t *itx;
1085510b6c0eSNeil Perrin 		lr_write_t *lr;
1086c5ee4681SAlexander Motin 		itx_wr_state_t wr_state = write_state;
1087c5ee4681SAlexander Motin 		ssize_t len = resid;
1088c5ee4681SAlexander Motin 
1089c5ee4681SAlexander Motin 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1090c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1091c5ee4681SAlexander Motin 		else if (wr_state == WR_INDIRECT)
1092c5ee4681SAlexander Motin 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1093510b6c0eSNeil Perrin 
1094510b6c0eSNeil Perrin 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1095c5ee4681SAlexander Motin 		    (wr_state == WR_COPIED ? len : 0));
1096feb08c6bSbillm 		lr = (lr_write_t *)&itx->itx_lr;
10978dfe5547SRichard Yao 		if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
10988dfe5547SRichard Yao 		    off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1099b24ab676SJeff Bonwick 			zil_itx_destroy(itx);
1100510b6c0eSNeil Perrin 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1101510b6c0eSNeil Perrin 			lr = (lr_write_t *)&itx->itx_lr;
1102c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1103510b6c0eSNeil Perrin 		}
1104510b6c0eSNeil Perrin 
1105c5ee4681SAlexander Motin 		itx->itx_wr_state = wr_state;
1106feb08c6bSbillm 		lr->lr_foid = ZVOL_OBJ;
1107feb08c6bSbillm 		lr->lr_offset = off;
1108510b6c0eSNeil Perrin 		lr->lr_length = len;
1109b24ab676SJeff Bonwick 		lr->lr_blkoff = 0;
1110feb08c6bSbillm 		BP_ZERO(&lr->lr_blkptr);
1111feb08c6bSbillm 
1112510b6c0eSNeil Perrin 		itx->itx_private = zv;
1113510b6c0eSNeil Perrin 		itx->itx_sync = sync;
1114510b6c0eSNeil Perrin 
11155002558fSNeil Perrin 		zil_itx_assign(zilog, itx, tx);
1116510b6c0eSNeil Perrin 
1117510b6c0eSNeil Perrin 		off += len;
1118510b6c0eSNeil Perrin 		resid -= len;
111922ac5be4Sperrin 	}
112022ac5be4Sperrin }
112122ac5be4Sperrin 
112288b7b0f2SMatthew Ahrens static int
1123810e43b2SBill Pijewski zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1124810e43b2SBill Pijewski     uint64_t size, boolean_t doread, boolean_t isdump)
1125e7cbe64fSgw {
1126e7cbe64fSgw 	vdev_disk_t *dvd;
1127e7cbe64fSgw 	int c;
1128e7cbe64fSgw 	int numerrors = 0;
1129e7cbe64fSgw 
1130810e43b2SBill Pijewski 	if (vd->vdev_ops == &vdev_mirror_ops ||
1131810e43b2SBill Pijewski 	    vd->vdev_ops == &vdev_replacing_ops ||
1132810e43b2SBill Pijewski 	    vd->vdev_ops == &vdev_spare_ops) {
1133810e43b2SBill Pijewski 		for (c = 0; c < vd->vdev_children; c++) {
1134810e43b2SBill Pijewski 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1135810e43b2SBill Pijewski 			    addr, offset, origoffset, size, doread, isdump);
1136810e43b2SBill Pijewski 			if (err != 0) {
1137810e43b2SBill Pijewski 				numerrors++;
1138810e43b2SBill Pijewski 			} else if (doread) {
1139810e43b2SBill Pijewski 				break;
1140810e43b2SBill Pijewski 			}
1141e7cbe64fSgw 		}
1142e7cbe64fSgw 	}
1143e7cbe64fSgw 
1144810e43b2SBill Pijewski 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1145e7cbe64fSgw 		return (numerrors < vd->vdev_children ? 0 : EIO);
1146e7cbe64fSgw 
1147dc0bb255SEric Taylor 	if (doread && !vdev_readable(vd))
1148be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1149dc0bb255SEric Taylor 	else if (!doread && !vdev_writeable(vd))
1150be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1151e7cbe64fSgw 
1152810e43b2SBill Pijewski 	if (vd->vdev_ops == &vdev_raidz_ops) {
1153810e43b2SBill Pijewski 		return (vdev_raidz_physio(vd,
1154810e43b2SBill Pijewski 		    addr, size, offset, origoffset, doread, isdump));
1155810e43b2SBill Pijewski 	}
1156810e43b2SBill Pijewski 
1157e7cbe64fSgw 	offset += VDEV_LABEL_START_SIZE;
1158e7cbe64fSgw 
1159e7cbe64fSgw 	if (ddi_in_panic() || isdump) {
116088b7b0f2SMatthew Ahrens 		ASSERT(!doread);
116188b7b0f2SMatthew Ahrens 		if (doread)
1162be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
1163810e43b2SBill Pijewski 		dvd = vd->vdev_tsd;
1164810e43b2SBill Pijewski 		ASSERT3P(dvd, !=, NULL);
1165e7cbe64fSgw 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1166e7cbe64fSgw 		    lbtodb(size)));
1167e7cbe64fSgw 	} else {
1168810e43b2SBill Pijewski 		dvd = vd->vdev_tsd;
1169810e43b2SBill Pijewski 		ASSERT3P(dvd, !=, NULL);
1170810e43b2SBill Pijewski 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1171810e43b2SBill Pijewski 		    offset, doread ? B_READ : B_WRITE));
1172e7cbe64fSgw 	}
1173e7cbe64fSgw }
1174e7cbe64fSgw 
117588b7b0f2SMatthew Ahrens static int
117688b7b0f2SMatthew Ahrens zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
117788b7b0f2SMatthew Ahrens     boolean_t doread, boolean_t isdump)
1178e7cbe64fSgw {
1179e7cbe64fSgw 	vdev_t *vd;
1180e7cbe64fSgw 	int error;
118188b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
1182e7cbe64fSgw 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1183e7cbe64fSgw 
118488b7b0f2SMatthew Ahrens 	/* Must be sector aligned, and not stradle a block boundary. */
118588b7b0f2SMatthew Ahrens 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
118688b7b0f2SMatthew Ahrens 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1187be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
118888b7b0f2SMatthew Ahrens 	}
118988b7b0f2SMatthew Ahrens 	ASSERT(size <= zv->zv_volblocksize);
1190e7cbe64fSgw 
119188b7b0f2SMatthew Ahrens 	/* Locate the extent this belongs to */
119288b7b0f2SMatthew Ahrens 	ze = list_head(&zv->zv_extents);
119388b7b0f2SMatthew Ahrens 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
119488b7b0f2SMatthew Ahrens 		offset -= ze->ze_nblks * zv->zv_volblocksize;
119588b7b0f2SMatthew Ahrens 		ze = list_next(&zv->zv_extents, ze);
119688b7b0f2SMatthew Ahrens 	}
119724cc0e1cSGeorge Wilson 
11983b2aab18SMatthew Ahrens 	if (ze == NULL)
1199be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
12003b2aab18SMatthew Ahrens 
120124cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
120224cc0e1cSGeorge Wilson 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
120324cc0e1cSGeorge Wilson 
120488b7b0f2SMatthew Ahrens 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
120588b7b0f2SMatthew Ahrens 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1206810e43b2SBill Pijewski 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1207810e43b2SBill Pijewski 	    size, doread, isdump);
120824cc0e1cSGeorge Wilson 
120924cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
121024cc0e1cSGeorge Wilson 		spa_config_exit(spa, SCL_STATE, FTAG);
121124cc0e1cSGeorge Wilson 
1212e7cbe64fSgw 	return (error);
1213e7cbe64fSgw }
1214e7cbe64fSgw 
1215fa9e4066Sahrens int
1216fa9e4066Sahrens zvol_strategy(buf_t *bp)
1217fa9e4066Sahrens {
1218c99e4bdcSChris Kirby 	zfs_soft_state_t *zs = NULL;
1219c99e4bdcSChris Kirby 	zvol_state_t *zv;
1220fa9e4066Sahrens 	uint64_t off, volsize;
122188b7b0f2SMatthew Ahrens 	size_t resid;
1222fa9e4066Sahrens 	char *addr;
122322ac5be4Sperrin 	objset_t *os;
1224fa9e4066Sahrens 	int error = 0;
122588b7b0f2SMatthew Ahrens 	boolean_t doread = bp->b_flags & B_READ;
1226810e43b2SBill Pijewski 	boolean_t is_dumpified;
1227510b6c0eSNeil Perrin 	boolean_t sync;
1228fa9e4066Sahrens 
1229c99e4bdcSChris Kirby 	if (getminor(bp->b_edev) == 0) {
1230be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
1231c99e4bdcSChris Kirby 	} else {
1232c99e4bdcSChris Kirby 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1233c99e4bdcSChris Kirby 		if (zs == NULL)
1234be6fd75aSMatthew Ahrens 			error = SET_ERROR(ENXIO);
1235c99e4bdcSChris Kirby 		else if (zs->zss_type != ZSST_ZVOL)
1236be6fd75aSMatthew Ahrens 			error = SET_ERROR(EINVAL);
1237fa9e4066Sahrens 	}
1238fa9e4066Sahrens 
1239c99e4bdcSChris Kirby 	if (error) {
1240c99e4bdcSChris Kirby 		bioerror(bp, error);
1241fa9e4066Sahrens 		biodone(bp);
1242fa9e4066Sahrens 		return (0);
1243fa9e4066Sahrens 	}
1244fa9e4066Sahrens 
1245c99e4bdcSChris Kirby 	zv = zs->zss_data;
1246c99e4bdcSChris Kirby 
1247681d9761SEric Taylor 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1248fa9e4066Sahrens 		bioerror(bp, EROFS);
1249fa9e4066Sahrens 		biodone(bp);
1250fa9e4066Sahrens 		return (0);
1251fa9e4066Sahrens 	}
1252fa9e4066Sahrens 
1253fa9e4066Sahrens 	off = ldbtob(bp->b_blkno);
1254fa9e4066Sahrens 	volsize = zv->zv_volsize;
1255fa9e4066Sahrens 
125622ac5be4Sperrin 	os = zv->zv_objset;
125722ac5be4Sperrin 	ASSERT(os != NULL);
1258fa9e4066Sahrens 
1259fa9e4066Sahrens 	bp_mapin(bp);
1260fa9e4066Sahrens 	addr = bp->b_un.b_addr;
1261fa9e4066Sahrens 	resid = bp->b_bcount;
1262fa9e4066Sahrens 
126388b7b0f2SMatthew Ahrens 	if (resid > 0 && (off < 0 || off >= volsize)) {
126488b7b0f2SMatthew Ahrens 		bioerror(bp, EIO);
126588b7b0f2SMatthew Ahrens 		biodone(bp);
126688b7b0f2SMatthew Ahrens 		return (0);
126788b7b0f2SMatthew Ahrens 	}
126873ec3d9cSgw 
1269810e43b2SBill Pijewski 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
127055da60b9SMark J Musante 	sync = ((!(bp->b_flags & B_ASYNC) &&
127155da60b9SMark J Musante 	    !(zv->zv_flags & ZVOL_WCE)) ||
127255da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1273810e43b2SBill Pijewski 	    !doread && !is_dumpified;
1274510b6c0eSNeil Perrin 
1275*455e370cSJohn Levon 	ht_begin_unsafe();
1276*455e370cSJohn Levon 
1277a24e15ceSperrin 	/*
1278a24e15ceSperrin 	 * There must be no buffer changes when doing a dmu_sync() because
1279a24e15ceSperrin 	 * we can't change the data whilst calculating the checksum.
1280a24e15ceSperrin 	 */
128179315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock, off, resid,
128288b7b0f2SMatthew Ahrens 	    doread ? RL_READER : RL_WRITER);
1283fa9e4066Sahrens 
1284e7cbe64fSgw 	while (resid != 0 && off < volsize) {
128588b7b0f2SMatthew Ahrens 		size_t size = MIN(resid, zvol_maxphys);
1286810e43b2SBill Pijewski 		if (is_dumpified) {
1287e7cbe64fSgw 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
128888b7b0f2SMatthew Ahrens 			error = zvol_dumpio(zv, addr, off, size,
128988b7b0f2SMatthew Ahrens 			    doread, B_FALSE);
129088b7b0f2SMatthew Ahrens 		} else if (doread) {
12917bfdf011SNeil Perrin 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
12927bfdf011SNeil Perrin 			    DMU_READ_PREFETCH);
1293fa9e4066Sahrens 		} else {
129422ac5be4Sperrin 			dmu_tx_t *tx = dmu_tx_create(os);
1295fa9e4066Sahrens 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1296fa9e4066Sahrens 			error = dmu_tx_assign(tx, TXG_WAIT);
1297fa9e4066Sahrens 			if (error) {
1298fa9e4066Sahrens 				dmu_tx_abort(tx);
1299fa9e4066Sahrens 			} else {
130022ac5be4Sperrin 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1301510b6c0eSNeil Perrin 				zvol_log_write(zv, tx, off, size, sync);
1302fa9e4066Sahrens 				dmu_tx_commit(tx);
1303fa9e4066Sahrens 			}
1304fa9e4066Sahrens 		}
1305b87f3af3Sperrin 		if (error) {
1306b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1307b87f3af3Sperrin 			if (error == ECKSUM)
1308be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1309fa9e4066Sahrens 			break;
1310b87f3af3Sperrin 		}
1311fa9e4066Sahrens 		off += size;
1312fa9e4066Sahrens 		addr += size;
1313fa9e4066Sahrens 		resid -= size;
1314fa9e4066Sahrens 	}
131579315247SMatthew Ahrens 	rangelock_exit(lr);
1316fa9e4066Sahrens 
1317fa9e4066Sahrens 	if ((bp->b_resid = resid) == bp->b_bcount)
1318fa9e4066Sahrens 		bioerror(bp, off > volsize ? EINVAL : error);
1319fa9e4066Sahrens 
1320510b6c0eSNeil Perrin 	if (sync)
13215002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1322feb08c6bSbillm 	biodone(bp);
132322ac5be4Sperrin 
1324*455e370cSJohn Levon 	ht_end_unsafe();
1325*455e370cSJohn Levon 
1326fa9e4066Sahrens 	return (0);
1327fa9e4066Sahrens }
1328fa9e4066Sahrens 
132967bd71c6Sperrin /*
133067bd71c6Sperrin  * Set the buffer count to the zvol maximum transfer.
133167bd71c6Sperrin  * Using our own routine instead of the default minphys()
133267bd71c6Sperrin  * means that for larger writes we write bigger buffers on X86
133367bd71c6Sperrin  * (128K instead of 56K) and flush the disk write cache less often
133467bd71c6Sperrin  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
133567bd71c6Sperrin  * 56K on X86 and 128K on sparc).
133667bd71c6Sperrin  */
133767bd71c6Sperrin void
133867bd71c6Sperrin zvol_minphys(struct buf *bp)
133967bd71c6Sperrin {
134067bd71c6Sperrin 	if (bp->b_bcount > zvol_maxphys)
134167bd71c6Sperrin 		bp->b_bcount = zvol_maxphys;
134267bd71c6Sperrin }
134367bd71c6Sperrin 
1344e7cbe64fSgw int
1345e7cbe64fSgw zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1346e7cbe64fSgw {
1347e7cbe64fSgw 	minor_t minor = getminor(dev);
1348e7cbe64fSgw 	zvol_state_t *zv;
1349e7cbe64fSgw 	int error = 0;
1350e7cbe64fSgw 	uint64_t size;
1351e7cbe64fSgw 	uint64_t boff;
1352e7cbe64fSgw 	uint64_t resid;
1353e7cbe64fSgw 
1354c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1355e7cbe64fSgw 	if (zv == NULL)
1356be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1357e7cbe64fSgw 
13583b2aab18SMatthew Ahrens 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1359be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
13603b2aab18SMatthew Ahrens 
1361e7cbe64fSgw 	boff = ldbtob(blkno);
1362e7cbe64fSgw 	resid = ldbtob(nblocks);
136388b7b0f2SMatthew Ahrens 
136488b7b0f2SMatthew Ahrens 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
136588b7b0f2SMatthew Ahrens 
1366e7cbe64fSgw 	while (resid) {
1367e7cbe64fSgw 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
136888b7b0f2SMatthew Ahrens 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1369e7cbe64fSgw 		if (error)
1370e7cbe64fSgw 			break;
1371e7cbe64fSgw 		boff += size;
1372e7cbe64fSgw 		addr += size;
1373e7cbe64fSgw 		resid -= size;
1374e7cbe64fSgw 	}
1375e7cbe64fSgw 
1376e7cbe64fSgw 	return (error);
1377e7cbe64fSgw }
1378e7cbe64fSgw 
1379fa9e4066Sahrens /*ARGSUSED*/
1380fa9e4066Sahrens int
1381feb08c6bSbillm zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1382fa9e4066Sahrens {
1383c7ca1008Sgw 	minor_t minor = getminor(dev);
1384c7ca1008Sgw 	zvol_state_t *zv;
138573ec3d9cSgw 	uint64_t volsize;
1386feb08c6bSbillm 	int error = 0;
1387fa9e4066Sahrens 
1388c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1389c7ca1008Sgw 	if (zv == NULL)
1390be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1391c7ca1008Sgw 
139273ec3d9cSgw 	volsize = zv->zv_volsize;
139373ec3d9cSgw 	if (uio->uio_resid > 0 &&
139473ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1395be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
139673ec3d9cSgw 
139788b7b0f2SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
139888b7b0f2SMatthew Ahrens 		error = physio(zvol_strategy, NULL, dev, B_READ,
139988b7b0f2SMatthew Ahrens 		    zvol_minphys, uio);
140088b7b0f2SMatthew Ahrens 		return (error);
140188b7b0f2SMatthew Ahrens 	}
140288b7b0f2SMatthew Ahrens 
1403*455e370cSJohn Levon 	ht_begin_unsafe();
1404*455e370cSJohn Levon 
140579315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
140679315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_READER);
140773ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1408feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1409fa9e4066Sahrens 
141073ec3d9cSgw 		/* don't read past the end */
141173ec3d9cSgw 		if (bytes > volsize - uio->uio_loffset)
141273ec3d9cSgw 			bytes = volsize - uio->uio_loffset;
141373ec3d9cSgw 
1414feb08c6bSbillm 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1415b87f3af3Sperrin 		if (error) {
1416b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1417b87f3af3Sperrin 			if (error == ECKSUM)
1418be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1419feb08c6bSbillm 			break;
1420b87f3af3Sperrin 		}
1421feb08c6bSbillm 	}
142279315247SMatthew Ahrens 	rangelock_exit(lr);
142379315247SMatthew Ahrens 
1424*455e370cSJohn Levon 	ht_end_unsafe();
1425*455e370cSJohn Levon 
1426feb08c6bSbillm 	return (error);
1427fa9e4066Sahrens }
1428fa9e4066Sahrens 
1429fa9e4066Sahrens /*ARGSUSED*/
1430fa9e4066Sahrens int
1431feb08c6bSbillm zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1432fa9e4066Sahrens {
1433c7ca1008Sgw 	minor_t minor = getminor(dev);
1434c7ca1008Sgw 	zvol_state_t *zv;
143573ec3d9cSgw 	uint64_t volsize;
1436feb08c6bSbillm 	int error = 0;
1437510b6c0eSNeil Perrin 	boolean_t sync;
1438feb08c6bSbillm 
1439c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1440c7ca1008Sgw 	if (zv == NULL)
1441be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1442c7ca1008Sgw 
144373ec3d9cSgw 	volsize = zv->zv_volsize;
144473ec3d9cSgw 	if (uio->uio_resid > 0 &&
144573ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1446be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
144773ec3d9cSgw 
1448e7cbe64fSgw 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1449e7cbe64fSgw 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1450e7cbe64fSgw 		    zvol_minphys, uio);
1451e7cbe64fSgw 		return (error);
1452e7cbe64fSgw 	}
1453e7cbe64fSgw 
1454*455e370cSJohn Levon 	ht_begin_unsafe();
1455*455e370cSJohn Levon 
145655da60b9SMark J Musante 	sync = !(zv->zv_flags & ZVOL_WCE) ||
145755da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1458510b6c0eSNeil Perrin 
145979315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
146079315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_WRITER);
146173ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1462feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1463feb08c6bSbillm 		uint64_t off = uio->uio_loffset;
1464feb08c6bSbillm 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
146573ec3d9cSgw 
146673ec3d9cSgw 		if (bytes > volsize - off)	/* don't write past the end */
146773ec3d9cSgw 			bytes = volsize - off;
146873ec3d9cSgw 
1469feb08c6bSbillm 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1470feb08c6bSbillm 		error = dmu_tx_assign(tx, TXG_WAIT);
1471feb08c6bSbillm 		if (error) {
1472feb08c6bSbillm 			dmu_tx_abort(tx);
1473feb08c6bSbillm 			break;
1474feb08c6bSbillm 		}
14758dfe5547SRichard Yao 		error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
1476feb08c6bSbillm 		if (error == 0)
1477510b6c0eSNeil Perrin 			zvol_log_write(zv, tx, off, bytes, sync);
1478feb08c6bSbillm 		dmu_tx_commit(tx);
1479feb08c6bSbillm 
1480feb08c6bSbillm 		if (error)
1481feb08c6bSbillm 			break;
1482feb08c6bSbillm 	}
148379315247SMatthew Ahrens 	rangelock_exit(lr);
148479315247SMatthew Ahrens 
1485510b6c0eSNeil Perrin 	if (sync)
14865002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1487*455e370cSJohn Levon 
1488*455e370cSJohn Levon 	ht_end_unsafe();
1489*455e370cSJohn Levon 
1490feb08c6bSbillm 	return (error);
1491fa9e4066Sahrens }
1492fa9e4066Sahrens 
1493c7f714e2SEric Taylor int
1494c7f714e2SEric Taylor zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1495c7f714e2SEric Taylor {
1496c7f714e2SEric Taylor 	struct uuid uuid = EFI_RESERVED;
1497c7f714e2SEric Taylor 	efi_gpe_t gpe = { 0 };
1498c7f714e2SEric Taylor 	uint32_t crc;
1499c7f714e2SEric Taylor 	dk_efi_t efi;
1500c7f714e2SEric Taylor 	int length;
1501c7f714e2SEric Taylor 	char *ptr;
1502c7f714e2SEric Taylor 
1503c7f714e2SEric Taylor 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1504be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1505c7f714e2SEric Taylor 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1506c7f714e2SEric Taylor 	length = efi.dki_length;
1507c7f714e2SEric Taylor 	/*
1508c7f714e2SEric Taylor 	 * Some clients may attempt to request a PMBR for the
1509c7f714e2SEric Taylor 	 * zvol.  Currently this interface will return EINVAL to
1510c7f714e2SEric Taylor 	 * such requests.  These requests could be supported by
1511c7f714e2SEric Taylor 	 * adding a check for lba == 0 and consing up an appropriate
1512c7f714e2SEric Taylor 	 * PMBR.
1513c7f714e2SEric Taylor 	 */
1514c7f714e2SEric Taylor 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1515be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1516c7f714e2SEric Taylor 
1517c7f714e2SEric Taylor 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1518c7f714e2SEric Taylor 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1519c7f714e2SEric Taylor 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1520c7f714e2SEric Taylor 
1521c7f714e2SEric Taylor 	if (efi.dki_lba == 1) {
1522c7f714e2SEric Taylor 		efi_gpt_t gpt = { 0 };
1523c7f714e2SEric Taylor 
1524c7f714e2SEric Taylor 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1525c7f714e2SEric Taylor 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1526fd797736SJohn Levon 		gpt.efi_gpt_HeaderSize = LE_32(EFI_HEADER_SIZE);
1527c7f714e2SEric Taylor 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1528c7f714e2SEric Taylor 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1529c7f714e2SEric Taylor 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1530c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1531c7f714e2SEric Taylor 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1532c7f714e2SEric Taylor 		gpt.efi_gpt_SizeOfPartitionEntry =
1533c7f714e2SEric Taylor 		    LE_32(sizeof (efi_gpe_t));
1534c7f714e2SEric Taylor 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1535c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1536fd797736SJohn Levon 		CRC32(crc, &gpt, EFI_HEADER_SIZE, -1U, crc32_table);
1537c7f714e2SEric Taylor 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1538c7f714e2SEric Taylor 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1539c7f714e2SEric Taylor 		    flag))
1540be6fd75aSMatthew Ahrens 			return (SET_ERROR(EFAULT));
1541c7f714e2SEric Taylor 		ptr += sizeof (gpt);
1542c7f714e2SEric Taylor 		length -= sizeof (gpt);
1543c7f714e2SEric Taylor 	}
1544c7f714e2SEric Taylor 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1545c7f714e2SEric Taylor 	    length), flag))
1546be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1547c7f714e2SEric Taylor 	return (0);
1548c7f714e2SEric Taylor }
1549c7f714e2SEric Taylor 
15503fb517f7SJames Moore /*
15513fb517f7SJames Moore  * BEGIN entry points to allow external callers access to the volume.
15523fb517f7SJames Moore  */
15533fb517f7SJames Moore /*
15543fb517f7SJames Moore  * Return the volume parameters needed for access from an external caller.
15553fb517f7SJames Moore  * These values are invariant as long as the volume is held open.
15563fb517f7SJames Moore  */
15573fb517f7SJames Moore int
15583fb517f7SJames Moore zvol_get_volume_params(minor_t minor, uint64_t *blksize,
15593fb517f7SJames Moore     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
15608dfe5547SRichard Yao     void **rl_hdl, void **dnode_hdl)
15613fb517f7SJames Moore {
15623fb517f7SJames Moore 	zvol_state_t *zv;
15633fb517f7SJames Moore 
1564c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1565c99e4bdcSChris Kirby 	if (zv == NULL)
1566be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15673fb517f7SJames Moore 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1568be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15693fb517f7SJames Moore 
15703fb517f7SJames Moore 	ASSERT(blksize && max_xfer_len && minor_hdl &&
15718dfe5547SRichard Yao 	    objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
15723fb517f7SJames Moore 
15733fb517f7SJames Moore 	*blksize = zv->zv_volblocksize;
15743fb517f7SJames Moore 	*max_xfer_len = (uint64_t)zvol_maxphys;
15753fb517f7SJames Moore 	*minor_hdl = zv;
15763fb517f7SJames Moore 	*objset_hdl = zv->zv_objset;
15773fb517f7SJames Moore 	*zil_hdl = zv->zv_zilog;
157879315247SMatthew Ahrens 	*rl_hdl = &zv->zv_rangelock;
15798dfe5547SRichard Yao 	*dnode_hdl = zv->zv_dn;
15803fb517f7SJames Moore 	return (0);
15813fb517f7SJames Moore }
15823fb517f7SJames Moore 
15833fb517f7SJames Moore /*
15843fb517f7SJames Moore  * Return the current volume size to an external caller.
15853fb517f7SJames Moore  * The size can change while the volume is open.
15863fb517f7SJames Moore  */
15873fb517f7SJames Moore uint64_t
15883fb517f7SJames Moore zvol_get_volume_size(void *minor_hdl)
15893fb517f7SJames Moore {
15903fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
15913fb517f7SJames Moore 
15923fb517f7SJames Moore 	return (zv->zv_volsize);
15933fb517f7SJames Moore }
15943fb517f7SJames Moore 
15953fb517f7SJames Moore /*
15963fb517f7SJames Moore  * Return the current WCE setting to an external caller.
15973fb517f7SJames Moore  * The WCE setting can change while the volume is open.
15983fb517f7SJames Moore  */
15993fb517f7SJames Moore int
16003fb517f7SJames Moore zvol_get_volume_wce(void *minor_hdl)
16013fb517f7SJames Moore {
16023fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
16033fb517f7SJames Moore 
16043fb517f7SJames Moore 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
16053fb517f7SJames Moore }
16063fb517f7SJames Moore 
16073fb517f7SJames Moore /*
16083fb517f7SJames Moore  * Entry point for external callers to zvol_log_write
16093fb517f7SJames Moore  */
16103fb517f7SJames Moore void
16113fb517f7SJames Moore zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
16123fb517f7SJames Moore     boolean_t sync)
16133fb517f7SJames Moore {
16143fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
16153fb517f7SJames Moore 
16163fb517f7SJames Moore 	zvol_log_write(zv, tx, off, resid, sync);
16173fb517f7SJames Moore }
16183fb517f7SJames Moore /*
16193fb517f7SJames Moore  * END entry points to allow external callers access to the volume.
16203fb517f7SJames Moore  */
16213fb517f7SJames Moore 
1622b77b9231SDan McDonald /*
1623b77b9231SDan McDonald  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1624b77b9231SDan McDonald  */
1625b77b9231SDan McDonald static void
1626b77b9231SDan McDonald zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1627b77b9231SDan McDonald     boolean_t sync)
1628b77b9231SDan McDonald {
1629b77b9231SDan McDonald 	itx_t *itx;
1630b77b9231SDan McDonald 	lr_truncate_t *lr;
1631b77b9231SDan McDonald 	zilog_t *zilog = zv->zv_zilog;
1632b77b9231SDan McDonald 
1633b77b9231SDan McDonald 	if (zil_replaying(zilog, tx))
1634b77b9231SDan McDonald 		return;
1635b77b9231SDan McDonald 
1636b77b9231SDan McDonald 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1637b77b9231SDan McDonald 	lr = (lr_truncate_t *)&itx->itx_lr;
1638b77b9231SDan McDonald 	lr->lr_foid = ZVOL_OBJ;
1639b77b9231SDan McDonald 	lr->lr_offset = off;
1640b77b9231SDan McDonald 	lr->lr_length = len;
1641b77b9231SDan McDonald 
1642b77b9231SDan McDonald 	itx->itx_sync = sync;
1643b77b9231SDan McDonald 	zil_itx_assign(zilog, itx, tx);
1644b77b9231SDan McDonald }
1645b77b9231SDan McDonald 
1646fa9e4066Sahrens /*
1647fa9e4066Sahrens  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1648b77b9231SDan McDonald  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1649fa9e4066Sahrens  */
1650fa9e4066Sahrens /*ARGSUSED*/
1651fa9e4066Sahrens int
1652fa9e4066Sahrens zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1653fa9e4066Sahrens {
1654fa9e4066Sahrens 	zvol_state_t *zv;
1655af2c4821Smaybee 	struct dk_callback *dkc;
1656fa9e4066Sahrens 	int error = 0;
165779315247SMatthew Ahrens 	locked_range_t *lr;
1658fa9e4066Sahrens 
1659c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
1660fa9e4066Sahrens 
1661c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1662fa9e4066Sahrens 
1663fa9e4066Sahrens 	if (zv == NULL) {
1664c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1665be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1666fa9e4066Sahrens 	}
1667701f66c4SEric Taylor 	ASSERT(zv->zv_total_opens > 0);
1668fa9e4066Sahrens 
1669fa9e4066Sahrens 	switch (cmd) {
1670fa9e4066Sahrens 
1671fa9e4066Sahrens 	case DKIOCINFO:
1672a0b60564SGeorge Wilson 	{
1673a0b60564SGeorge Wilson 		struct dk_cinfo dki;
1674a0b60564SGeorge Wilson 
1675af2c4821Smaybee 		bzero(&dki, sizeof (dki));
1676af2c4821Smaybee 		(void) strcpy(dki.dki_cname, "zvol");
1677af2c4821Smaybee 		(void) strcpy(dki.dki_dname, "zvol");
1678af2c4821Smaybee 		dki.dki_ctype = DKC_UNKNOWN;
16793adc9019SEric Taylor 		dki.dki_unit = getminor(dev);
1680b5152584SMatthew Ahrens 		dki.dki_maxtransfer =
1681b5152584SMatthew Ahrens 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1682c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1683af2c4821Smaybee 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1684be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1685fa9e4066Sahrens 		return (error);
1686a0b60564SGeorge Wilson 	}
1687fa9e4066Sahrens 
1688fa9e4066Sahrens 	case DKIOCGMEDIAINFO:
1689a0b60564SGeorge Wilson 	{
1690a0b60564SGeorge Wilson 		struct dk_minfo dkm;
1691a0b60564SGeorge Wilson 
1692fa9e4066Sahrens 		bzero(&dkm, sizeof (dkm));
1693fa9e4066Sahrens 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1694fa9e4066Sahrens 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1695fa9e4066Sahrens 		dkm.dki_media_type = DK_UNKNOWN;
1696c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1697fa9e4066Sahrens 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1698be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1699fa9e4066Sahrens 		return (error);
1700a0b60564SGeorge Wilson 	}
1701a0b60564SGeorge Wilson 
1702a0b60564SGeorge Wilson 	case DKIOCGMEDIAINFOEXT:
1703a0b60564SGeorge Wilson 	{
1704a0b60564SGeorge Wilson 		struct dk_minfo_ext dkmext;
1705a0b60564SGeorge Wilson 
1706a0b60564SGeorge Wilson 		bzero(&dkmext, sizeof (dkmext));
1707a0b60564SGeorge Wilson 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1708a0b60564SGeorge Wilson 		dkmext.dki_pbsize = zv->zv_volblocksize;
1709a0b60564SGeorge Wilson 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1710a0b60564SGeorge Wilson 		dkmext.dki_media_type = DK_UNKNOWN;
1711a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1712a0b60564SGeorge Wilson 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1713a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1714a0b60564SGeorge Wilson 		return (error);
1715a0b60564SGeorge Wilson 	}
1716fa9e4066Sahrens 
1717fa9e4066Sahrens 	case DKIOCGETEFI:
1718a0b60564SGeorge Wilson 	{
1719a0b60564SGeorge Wilson 		uint64_t vs = zv->zv_volsize;
1720a0b60564SGeorge Wilson 		uint8_t bs = zv->zv_min_bs;
1721fa9e4066Sahrens 
1722a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1723a0b60564SGeorge Wilson 		error = zvol_getefi((void *)arg, flag, vs, bs);
1724a0b60564SGeorge Wilson 		return (error);
1725a0b60564SGeorge Wilson 	}
1726fa9e4066Sahrens 
1727feb08c6bSbillm 	case DKIOCFLUSHWRITECACHE:
1728af2c4821Smaybee 		dkc = (struct dk_callback *)arg;
1729c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1730*455e370cSJohn Levon 
1731*455e370cSJohn Levon 		ht_begin_unsafe();
1732*455e370cSJohn Levon 
17335002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1734af2c4821Smaybee 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1735af2c4821Smaybee 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1736af2c4821Smaybee 			error = 0;
1737af2c4821Smaybee 		}
1738*455e370cSJohn Levon 
1739*455e370cSJohn Levon 		ht_end_unsafe();
1740*455e370cSJohn Levon 
1741701f66c4SEric Taylor 		return (error);
1742701f66c4SEric Taylor 
1743701f66c4SEric Taylor 	case DKIOCGETWCE:
1744a0b60564SGeorge Wilson 	{
1745a0b60564SGeorge Wilson 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1746a0b60564SGeorge Wilson 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1747a0b60564SGeorge Wilson 		    flag))
1748a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1749a0b60564SGeorge Wilson 		break;
1750a0b60564SGeorge Wilson 	}
1751a0b60564SGeorge Wilson 	case DKIOCSETWCE:
1752a0b60564SGeorge Wilson 	{
1753a0b60564SGeorge Wilson 		int wce;
1754a0b60564SGeorge Wilson 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1755a0b60564SGeorge Wilson 		    flag)) {
1756a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1757701f66c4SEric Taylor 			break;
1758701f66c4SEric Taylor 		}
1759a0b60564SGeorge Wilson 		if (wce) {
1760a0b60564SGeorge Wilson 			zv->zv_flags |= ZVOL_WCE;
1761a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1762a0b60564SGeorge Wilson 		} else {
1763a0b60564SGeorge Wilson 			zv->zv_flags &= ~ZVOL_WCE;
1764a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1765*455e370cSJohn Levon 			ht_begin_unsafe();
1766a0b60564SGeorge Wilson 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1767*455e370cSJohn Levon 			ht_end_unsafe();
1768701f66c4SEric Taylor 		}
1769a0b60564SGeorge Wilson 		return (0);
1770a0b60564SGeorge Wilson 	}
1771feb08c6bSbillm 
1772b6130eadSmaybee 	case DKIOCGGEOM:
1773b6130eadSmaybee 	case DKIOCGVTOC:
1774e7cbe64fSgw 		/*
1775e7cbe64fSgw 		 * commands using these (like prtvtoc) expect ENOTSUP
1776e7cbe64fSgw 		 * since we're emulating an EFI label
1777e7cbe64fSgw 		 */
1778be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTSUP);
1779b6130eadSmaybee 		break;
1780b6130eadSmaybee 
1781e7cbe64fSgw 	case DKIOCDUMPINIT:
178279315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1783e7cbe64fSgw 		    RL_WRITER);
1784e7cbe64fSgw 		error = zvol_dumpify(zv);
178579315247SMatthew Ahrens 		rangelock_exit(lr);
1786e7cbe64fSgw 		break;
1787e7cbe64fSgw 
1788e7cbe64fSgw 	case DKIOCDUMPFINI:
178906d5ae10SEric Taylor 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
179006d5ae10SEric Taylor 			break;
179179315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1792e7cbe64fSgw 		    RL_WRITER);
1793e7cbe64fSgw 		error = zvol_dump_fini(zv);
179479315247SMatthew Ahrens 		rangelock_exit(lr);
1795e7cbe64fSgw 		break;
1796e7cbe64fSgw 
1797b77b9231SDan McDonald 	case DKIOCFREE:
1798b77b9231SDan McDonald 	{
1799047c81d3SSaso Kiselkov 		dkioc_free_list_t *dfl;
1800b77b9231SDan McDonald 		dmu_tx_t *tx;
1801b77b9231SDan McDonald 
1802893c83baSGeorge Wilson 		if (!zvol_unmap_enabled)
1803893c83baSGeorge Wilson 			break;
1804893c83baSGeorge Wilson 
1805047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL)) {
1806047c81d3SSaso Kiselkov 			error = dfl_copyin((void *)arg, &dfl, flag, KM_SLEEP);
1807047c81d3SSaso Kiselkov 			if (error != 0)
1808047c81d3SSaso Kiselkov 				break;
1809047c81d3SSaso Kiselkov 		} else {
1810047c81d3SSaso Kiselkov 			dfl = (dkioc_free_list_t *)arg;
1811047c81d3SSaso Kiselkov 			ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
1812047c81d3SSaso Kiselkov 			if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
1813047c81d3SSaso Kiselkov 				error = SET_ERROR(EINVAL);
1814047c81d3SSaso Kiselkov 				break;
1815047c81d3SSaso Kiselkov 			}
1816b77b9231SDan McDonald 		}
1817b77b9231SDan McDonald 
1818574e2414SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1819b77b9231SDan McDonald 
1820*455e370cSJohn Levon 		ht_begin_unsafe();
1821*455e370cSJohn Levon 
1822047c81d3SSaso Kiselkov 		for (int i = 0; i < dfl->dfl_num_exts; i++) {
1823047c81d3SSaso Kiselkov 			uint64_t start = dfl->dfl_exts[i].dfle_start,
1824047c81d3SSaso Kiselkov 			    length = dfl->dfl_exts[i].dfle_length,
1825047c81d3SSaso Kiselkov 			    end = start + length;
1826047c81d3SSaso Kiselkov 
1827047c81d3SSaso Kiselkov 			/*
1828047c81d3SSaso Kiselkov 			 * Apply Postel's Law to length-checking.  If they
1829047c81d3SSaso Kiselkov 			 * overshoot, just blank out until the end, if there's
1830047c81d3SSaso Kiselkov 			 * a need to blank out anything.
1831047c81d3SSaso Kiselkov 			 */
1832047c81d3SSaso Kiselkov 			if (start >= zv->zv_volsize)
1833047c81d3SSaso Kiselkov 				continue;	/* No need to do anything... */
1834047c81d3SSaso Kiselkov 			if (end > zv->zv_volsize) {
1835047c81d3SSaso Kiselkov 				end = DMU_OBJECT_END;
1836047c81d3SSaso Kiselkov 				length = end - start;
1837047c81d3SSaso Kiselkov 			}
1838b77b9231SDan McDonald 
183979315247SMatthew Ahrens 			lr = rangelock_enter(&zv->zv_rangelock, start, length,
1840047c81d3SSaso Kiselkov 			    RL_WRITER);
1841047c81d3SSaso Kiselkov 			tx = dmu_tx_create(zv->zv_objset);
1842047c81d3SSaso Kiselkov 			error = dmu_tx_assign(tx, TXG_WAIT);
1843047c81d3SSaso Kiselkov 			if (error != 0) {
1844047c81d3SSaso Kiselkov 				dmu_tx_abort(tx);
1845047c81d3SSaso Kiselkov 			} else {
1846047c81d3SSaso Kiselkov 				zvol_log_truncate(zv, tx, start, length,
1847047c81d3SSaso Kiselkov 				    B_TRUE);
1848047c81d3SSaso Kiselkov 				dmu_tx_commit(tx);
1849047c81d3SSaso Kiselkov 				error = dmu_free_long_range(zv->zv_objset,
1850047c81d3SSaso Kiselkov 				    ZVOL_OBJ, start, length);
1851047c81d3SSaso Kiselkov 			}
1852047c81d3SSaso Kiselkov 
185379315247SMatthew Ahrens 			rangelock_exit(lr);
1854047c81d3SSaso Kiselkov 
1855047c81d3SSaso Kiselkov 			if (error != 0)
1856047c81d3SSaso Kiselkov 				break;
1857047c81d3SSaso Kiselkov 		}
1858b77b9231SDan McDonald 
18591c9272b8SStephen Blinick 		/*
18601c9272b8SStephen Blinick 		 * If the write-cache is disabled, 'sync' property
18611c9272b8SStephen Blinick 		 * is set to 'always', or if the caller is asking for
18621c9272b8SStephen Blinick 		 * a synchronous free, commit this operation to the zil.
18631c9272b8SStephen Blinick 		 * This will sync any previous uncommitted writes to the
18641c9272b8SStephen Blinick 		 * zvol object.
18651c9272b8SStephen Blinick 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
18661c9272b8SStephen Blinick 		 */
18671c9272b8SStephen Blinick 		if ((error == 0) && zvol_unmap_sync_enabled &&
18681c9272b8SStephen Blinick 		    (!(zv->zv_flags & ZVOL_WCE) ||
18691c9272b8SStephen Blinick 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1870047c81d3SSaso Kiselkov 		    (dfl->dfl_flags & DF_WAIT_SYNC))) {
18711c9272b8SStephen Blinick 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1872b77b9231SDan McDonald 		}
18731c9272b8SStephen Blinick 
1874047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL))
1875047c81d3SSaso Kiselkov 			dfl_free(dfl);
1876047c81d3SSaso Kiselkov 
1877*455e370cSJohn Levon 		ht_end_unsafe();
1878*455e370cSJohn Levon 
1879574e2414SGeorge Wilson 		return (error);
1880b77b9231SDan McDonald 	}
1881b77b9231SDan McDonald 
1882fa9e4066Sahrens 	default:
1883be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTTY);
1884fa9e4066Sahrens 		break;
1885fa9e4066Sahrens 
1886fa9e4066Sahrens 	}
1887c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
1888fa9e4066Sahrens 	return (error);
1889fa9e4066Sahrens }
1890fa9e4066Sahrens 
1891fa9e4066Sahrens int
1892fa9e4066Sahrens zvol_busy(void)
1893fa9e4066Sahrens {
1894fa9e4066Sahrens 	return (zvol_minors != 0);
1895fa9e4066Sahrens }
1896fa9e4066Sahrens 
1897fa9e4066Sahrens void
1898fa9e4066Sahrens zvol_init(void)
1899fa9e4066Sahrens {
1900c99e4bdcSChris Kirby 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1901c99e4bdcSChris Kirby 	    1) == 0);
1902c99e4bdcSChris Kirby 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1903fa9e4066Sahrens }
1904fa9e4066Sahrens 
1905fa9e4066Sahrens void
1906fa9e4066Sahrens zvol_fini(void)
1907fa9e4066Sahrens {
1908c99e4bdcSChris Kirby 	mutex_destroy(&zfsdev_state_lock);
1909c99e4bdcSChris Kirby 	ddi_soft_state_fini(&zfsdev_state);
1910fa9e4066Sahrens }
1911e7cbe64fSgw 
1912810e43b2SBill Pijewski /*ARGSUSED*/
1913810e43b2SBill Pijewski static int
1914810e43b2SBill Pijewski zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1915810e43b2SBill Pijewski {
1916810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1917810e43b2SBill Pijewski 
19182acef22dSMatthew Ahrens 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1919810e43b2SBill Pijewski 		return (1);
1920810e43b2SBill Pijewski 	return (0);
1921810e43b2SBill Pijewski }
1922810e43b2SBill Pijewski 
1923810e43b2SBill Pijewski /*ARGSUSED*/
1924810e43b2SBill Pijewski static void
1925810e43b2SBill Pijewski zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1926810e43b2SBill Pijewski {
1927810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1928810e43b2SBill Pijewski 
19292acef22dSMatthew Ahrens 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1930810e43b2SBill Pijewski }
1931810e43b2SBill Pijewski 
1932e7cbe64fSgw static int
1933e7cbe64fSgw zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1934e7cbe64fSgw {
1935e7cbe64fSgw 	dmu_tx_t *tx;
1936810e43b2SBill Pijewski 	int error;
1937e7cbe64fSgw 	objset_t *os = zv->zv_objset;
1938810e43b2SBill Pijewski 	spa_t *spa = dmu_objset_spa(os);
1939810e43b2SBill Pijewski 	vdev_t *vd = spa->spa_root_vdev;
1940e7cbe64fSgw 	nvlist_t *nv = NULL;
1941810e43b2SBill Pijewski 	uint64_t version = spa_version(spa);
1942b10bba72SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
1943e7cbe64fSgw 
1944c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1945810e43b2SBill Pijewski 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1946810e43b2SBill Pijewski 
1947681d9761SEric Taylor 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1948681d9761SEric Taylor 	    DMU_OBJECT_END);
1949b10bba72SGeorge Wilson 	if (error != 0)
1950b10bba72SGeorge Wilson 		return (error);
1951681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
1952681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1953e7cbe64fSgw 
1954810e43b2SBill Pijewski 	/*
1955810e43b2SBill Pijewski 	 * If the pool on which the dump device is being initialized has more
1956810e43b2SBill Pijewski 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1957810e43b2SBill Pijewski 	 * enabled.  If so, bump that feature's counter to indicate that the
1958810e43b2SBill Pijewski 	 * feature is active. We also check the vdev type to handle the
1959810e43b2SBill Pijewski 	 * following case:
1960810e43b2SBill Pijewski 	 *   # zpool create test raidz disk1 disk2 disk3
1961810e43b2SBill Pijewski 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1962810e43b2SBill Pijewski 	 *   the raidz vdev itself has 3 children.
1963810e43b2SBill Pijewski 	 */
1964810e43b2SBill Pijewski 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1965810e43b2SBill Pijewski 		if (!spa_feature_is_enabled(spa,
19662acef22dSMatthew Ahrens 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1967810e43b2SBill Pijewski 			return (SET_ERROR(ENOTSUP));
1968810e43b2SBill Pijewski 		(void) dsl_sync_task(spa_name(spa),
1969810e43b2SBill Pijewski 		    zfs_mvdev_dump_feature_check,
19707d46dc6cSMatthew Ahrens 		    zfs_mvdev_dump_activate_feature_sync, NULL,
19717d46dc6cSMatthew Ahrens 		    2, ZFS_SPACE_CHECK_RESERVED);
1972810e43b2SBill Pijewski 	}
1973810e43b2SBill Pijewski 
1974b10bba72SGeorge Wilson 	if (!resize) {
1975b10bba72SGeorge Wilson 		error = dsl_prop_get_integer(zv->zv_name,
1976b10bba72SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1977b10bba72SGeorge Wilson 		if (error == 0) {
1978b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1979b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1980b10bba72SGeorge Wilson 			    NULL);
1981b10bba72SGeorge Wilson 		}
1982b10bba72SGeorge Wilson 		if (error == 0) {
1983b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1984b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1985b10bba72SGeorge Wilson 			    &refresrv, NULL);
1986b10bba72SGeorge Wilson 		}
1987b10bba72SGeorge Wilson 		if (error == 0) {
1988b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1989b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1990b10bba72SGeorge Wilson 			    NULL);
1991b10bba72SGeorge Wilson 		}
1992b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1993b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1994b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1995b10bba72SGeorge Wilson 		}
1996b10bba72SGeorge Wilson 	}
1997b10bba72SGeorge Wilson 	if (error != 0)
1998b10bba72SGeorge Wilson 		return (error);
1999b10bba72SGeorge Wilson 
2000e7cbe64fSgw 	tx = dmu_tx_create(os);
2001e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2002681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2003e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2004b10bba72SGeorge Wilson 	if (error != 0) {
2005e7cbe64fSgw 		dmu_tx_abort(tx);
2006e7cbe64fSgw 		return (error);
2007e7cbe64fSgw 	}
2008e7cbe64fSgw 
2009e7cbe64fSgw 	/*
2010e7cbe64fSgw 	 * If we are resizing the dump device then we only need to
2011e7cbe64fSgw 	 * update the refreservation to match the newly updated
2012e7cbe64fSgw 	 * zvolsize. Otherwise, we save off the original state of the
2013e7cbe64fSgw 	 * zvol so that we can restore them if the zvol is ever undumpified.
2014e7cbe64fSgw 	 */
2015e7cbe64fSgw 	if (resize) {
2016e7cbe64fSgw 		error = zap_update(os, ZVOL_ZAP_OBJ,
2017e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2018e7cbe64fSgw 		    &zv->zv_volsize, tx);
2019e7cbe64fSgw 	} else {
2020b10bba72SGeorge Wilson 		error = zap_update(os, ZVOL_ZAP_OBJ,
2021e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2022e7cbe64fSgw 		    &compress, tx);
2023b10bba72SGeorge Wilson 		if (error == 0) {
2024b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2025b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2026b10bba72SGeorge Wilson 			    &checksum, tx);
2027b10bba72SGeorge Wilson 		}
2028b10bba72SGeorge Wilson 		if (error == 0) {
2029b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2030b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2031b10bba72SGeorge Wilson 			    &refresrv, tx);
2032b10bba72SGeorge Wilson 		}
2033b10bba72SGeorge Wilson 		if (error == 0) {
2034b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2035b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2036b10bba72SGeorge Wilson 			    &vbs, tx);
2037b10bba72SGeorge Wilson 		}
2038b10bba72SGeorge Wilson 		if (error == 0) {
2039b10bba72SGeorge Wilson 			error = dmu_object_set_blocksize(
2040b10bba72SGeorge Wilson 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2041b10bba72SGeorge Wilson 		}
2042b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2043b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
20448d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
20458d265e66SGeorge Wilson 			    &dedup, tx);
20468d265e66SGeorge Wilson 		}
2047681d9761SEric Taylor 		if (error == 0)
2048b5152584SMatthew Ahrens 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2049e7cbe64fSgw 	}
2050e7cbe64fSgw 	dmu_tx_commit(tx);
2051e7cbe64fSgw 
2052e7cbe64fSgw 	/*
2053e7cbe64fSgw 	 * We only need update the zvol's property if we are initializing
2054e7cbe64fSgw 	 * the dump area for the first time.
2055e7cbe64fSgw 	 */
2056b10bba72SGeorge Wilson 	if (error == 0 && !resize) {
2057b10bba72SGeorge Wilson 		/*
2058b10bba72SGeorge Wilson 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2059b10bba72SGeorge Wilson 		 * function.  Otherwise, use the old default -- OFF.
2060b10bba72SGeorge Wilson 		 */
2061b10bba72SGeorge Wilson 		checksum = spa_feature_is_active(spa,
2062b10bba72SGeorge Wilson 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2063b10bba72SGeorge Wilson 		    ZIO_CHECKSUM_OFF;
2064b10bba72SGeorge Wilson 
2065e7cbe64fSgw 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2066e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2067e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2068e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2069e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2070e7cbe64fSgw 		    ZIO_COMPRESS_OFF) == 0);
2071e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2072e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2073810e43b2SBill Pijewski 		    checksum) == 0);
20748d265e66SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP) {
20758d265e66SGeorge Wilson 			VERIFY(nvlist_add_uint64(nv,
20768d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
20778d265e66SGeorge Wilson 			    ZIO_CHECKSUM_OFF) == 0);
20788d265e66SGeorge Wilson 		}
2079e7cbe64fSgw 
208092241e0bSTom Erickson 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
208192241e0bSTom Erickson 		    nv, NULL);
2082e7cbe64fSgw 		nvlist_free(nv);
2083e7cbe64fSgw 	}
2084e7cbe64fSgw 
2085e7cbe64fSgw 	/* Allocate the space for the dump */
2086b10bba72SGeorge Wilson 	if (error == 0)
2087b10bba72SGeorge Wilson 		error = zvol_prealloc(zv);
2088e7cbe64fSgw 	return (error);
2089e7cbe64fSgw }
2090e7cbe64fSgw 
2091e7cbe64fSgw static int
2092e7cbe64fSgw zvol_dumpify(zvol_state_t *zv)
2093e7cbe64fSgw {
2094e7cbe64fSgw 	int error = 0;
2095e7cbe64fSgw 	uint64_t dumpsize = 0;
2096e7cbe64fSgw 	dmu_tx_t *tx;
2097e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2098e7cbe64fSgw 
2099681d9761SEric Taylor 	if (zv->zv_flags & ZVOL_RDONLY)
2100be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
2101e7cbe64fSgw 
2102e7cbe64fSgw 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2103e7cbe64fSgw 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
21044445fffbSMatthew Ahrens 		boolean_t resize = (dumpsize > 0);
2105e7cbe64fSgw 
2106e7cbe64fSgw 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2107e7cbe64fSgw 			(void) zvol_dump_fini(zv);
2108e7cbe64fSgw 			return (error);
2109e7cbe64fSgw 		}
2110e7cbe64fSgw 	}
2111e7cbe64fSgw 
2112e7cbe64fSgw 	/*
2113e7cbe64fSgw 	 * Build up our lba mapping.
2114e7cbe64fSgw 	 */
2115e7cbe64fSgw 	error = zvol_get_lbas(zv);
2116e7cbe64fSgw 	if (error) {
2117e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2118e7cbe64fSgw 		return (error);
2119e7cbe64fSgw 	}
2120e7cbe64fSgw 
2121e7cbe64fSgw 	tx = dmu_tx_create(os);
2122e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2123e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2124e7cbe64fSgw 	if (error) {
2125e7cbe64fSgw 		dmu_tx_abort(tx);
2126e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2127e7cbe64fSgw 		return (error);
2128e7cbe64fSgw 	}
2129e7cbe64fSgw 
2130e7cbe64fSgw 	zv->zv_flags |= ZVOL_DUMPIFIED;
2131e7cbe64fSgw 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2132e7cbe64fSgw 	    &zv->zv_volsize, tx);
2133e7cbe64fSgw 	dmu_tx_commit(tx);
2134e7cbe64fSgw 
2135e7cbe64fSgw 	if (error) {
2136e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2137e7cbe64fSgw 		return (error);
2138e7cbe64fSgw 	}
2139e7cbe64fSgw 
2140e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
2141e7cbe64fSgw 	return (0);
2142e7cbe64fSgw }
2143e7cbe64fSgw 
2144e7cbe64fSgw static int
2145e7cbe64fSgw zvol_dump_fini(zvol_state_t *zv)
2146e7cbe64fSgw {
2147e7cbe64fSgw 	dmu_tx_t *tx;
2148e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2149e7cbe64fSgw 	nvlist_t *nv;
2150e7cbe64fSgw 	int error = 0;
2151afee20e4SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
21528d265e66SGeorge Wilson 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2153e7cbe64fSgw 
2154b7e50089Smaybee 	/*
2155b7e50089Smaybee 	 * Attempt to restore the zvol back to its pre-dumpified state.
2156b7e50089Smaybee 	 * This is a best-effort attempt as it's possible that not all
2157b7e50089Smaybee 	 * of these properties were initialized during the dumpify process
2158b7e50089Smaybee 	 * (i.e. error during zvol_dump_init).
2159b7e50089Smaybee 	 */
2160b7e50089Smaybee 
2161e7cbe64fSgw 	tx = dmu_tx_create(os);
2162e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2163e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2164e7cbe64fSgw 	if (error) {
2165e7cbe64fSgw 		dmu_tx_abort(tx);
2166e7cbe64fSgw 		return (error);
2167e7cbe64fSgw 	}
2168b7e50089Smaybee 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2169b7e50089Smaybee 	dmu_tx_commit(tx);
2170e7cbe64fSgw 
2171e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2172e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2173e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2174e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2175e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2176e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
217788b7b0f2SMatthew Ahrens 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
217888b7b0f2SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2179e7cbe64fSgw 
2180e7cbe64fSgw 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2181e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2182e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2183e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2184e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2185e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2186e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
21878d265e66SGeorge Wilson 	if (version >= SPA_VERSION_DEDUP &&
21888d265e66SGeorge Wilson 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
21898d265e66SGeorge Wilson 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
21908d265e66SGeorge Wilson 		(void) nvlist_add_uint64(nv,
21918d265e66SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
21928d265e66SGeorge Wilson 	}
219392241e0bSTom Erickson 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
219492241e0bSTom Erickson 	    nv, NULL);
2195e7cbe64fSgw 	nvlist_free(nv);
2196e7cbe64fSgw 
2197b7e50089Smaybee 	zvol_free_extents(zv);
2198b7e50089Smaybee 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2199b7e50089Smaybee 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2200681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
2201681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2202681d9761SEric Taylor 	tx = dmu_tx_create(os);
2203681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2204681d9761SEric Taylor 	error = dmu_tx_assign(tx, TXG_WAIT);
2205681d9761SEric Taylor 	if (error) {
2206681d9761SEric Taylor 		dmu_tx_abort(tx);
2207681d9761SEric Taylor 		return (error);
2208681d9761SEric Taylor 	}
2209b24ab676SJeff Bonwick 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2210b24ab676SJeff Bonwick 		zv->zv_volblocksize = vbs;
2211681d9761SEric Taylor 	dmu_tx_commit(tx);
2212b7e50089Smaybee 
2213e7cbe64fSgw 	return (0);
2214e7cbe64fSgw }
2215