xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 8bf394f116a79c011b8f9f3bd199e09b363742ef)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
22f80ce222SChris Kirby  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23b77b9231SDan McDonald  *
24b77b9231SDan McDonald  * Portions Copyright 2010 Robert Milkowski
25b77b9231SDan McDonald  *
26047c81d3SSaso Kiselkov  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
27b7edcb94SMatthew Ahrens  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28c3d26abcSMatthew Ahrens  * Copyright (c) 2014 Integros [integros.com]
29455e370cSJohn Levon  * Copyright 2019 Joyent, Inc.
30fa9e4066Sahrens  */
31fa9e4066Sahrens 
32fa9e4066Sahrens /*
33fa9e4066Sahrens  * ZFS volume emulation driver.
34fa9e4066Sahrens  *
35fa9e4066Sahrens  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36fa9e4066Sahrens  * Volumes are accessed through the symbolic links named:
37fa9e4066Sahrens  *
38fa9e4066Sahrens  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39fa9e4066Sahrens  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40fa9e4066Sahrens  *
41681d9761SEric Taylor  * These links are created by the /dev filesystem (sdev_zvolops.c).
42fa9e4066Sahrens  * Volumes are persistent through reboot.  No user command needs to be
43fa9e4066Sahrens  * run before opening and using a device.
44fa9e4066Sahrens  */
45fa9e4066Sahrens 
46fa9e4066Sahrens #include <sys/types.h>
47fa9e4066Sahrens #include <sys/param.h>
48fa9e4066Sahrens #include <sys/errno.h>
49fa9e4066Sahrens #include <sys/uio.h>
50fa9e4066Sahrens #include <sys/buf.h>
51fa9e4066Sahrens #include <sys/modctl.h>
52fa9e4066Sahrens #include <sys/open.h>
53fa9e4066Sahrens #include <sys/kmem.h>
54fa9e4066Sahrens #include <sys/conf.h>
55fa9e4066Sahrens #include <sys/cmn_err.h>
56fa9e4066Sahrens #include <sys/stat.h>
57fa9e4066Sahrens #include <sys/zap.h>
58fa9e4066Sahrens #include <sys/spa.h>
59810e43b2SBill Pijewski #include <sys/spa_impl.h>
60fa9e4066Sahrens #include <sys/zio.h>
61e7cbe64fSgw #include <sys/dmu_traverse.h>
62e7cbe64fSgw #include <sys/dnode.h>
63e7cbe64fSgw #include <sys/dsl_dataset.h>
64fa9e4066Sahrens #include <sys/dsl_prop.h>
65fa9e4066Sahrens #include <sys/dkio.h>
66fa9e4066Sahrens #include <sys/efi_partition.h>
67fa9e4066Sahrens #include <sys/byteorder.h>
68fa9e4066Sahrens #include <sys/pathname.h>
69fa9e4066Sahrens #include <sys/ddi.h>
70fa9e4066Sahrens #include <sys/sunddi.h>
71fa9e4066Sahrens #include <sys/crc32.h>
72fa9e4066Sahrens #include <sys/dirent.h>
73fa9e4066Sahrens #include <sys/policy.h>
74fa9e4066Sahrens #include <sys/fs/zfs.h>
75fa9e4066Sahrens #include <sys/zfs_ioctl.h>
76fa9e4066Sahrens #include <sys/mkdev.h>
7722ac5be4Sperrin #include <sys/zil.h>
78c5c6ffa0Smaybee #include <sys/refcount.h>
79c2e6a7d6Sperrin #include <sys/zfs_znode.h>
80c2e6a7d6Sperrin #include <sys/zfs_rlock.h>
81e7cbe64fSgw #include <sys/vdev_disk.h>
82e7cbe64fSgw #include <sys/vdev_impl.h>
83810e43b2SBill Pijewski #include <sys/vdev_raidz.h>
84e7cbe64fSgw #include <sys/zvol.h>
85e7cbe64fSgw #include <sys/dumphdr.h>
861209a471SNeil Perrin #include <sys/zil_impl.h>
8780901aeaSGeorge Wilson #include <sys/dbuf.h>
88810e43b2SBill Pijewski #include <sys/dmu_tx.h>
89810e43b2SBill Pijewski #include <sys/zfeature.h>
90810e43b2SBill Pijewski #include <sys/zio_checksum.h>
911271e4b1SPrakash Surya #include <sys/zil_impl.h>
92c3377ee9SJohn Levon #include <sys/smt.h>
93047c81d3SSaso Kiselkov #include <sys/dkioc_free_util.h>
9479315247SMatthew Ahrens #include <sys/zfs_rlock.h>
95fa9e4066Sahrens 
96fa9e4066Sahrens #include "zfs_namecheck.h"
97fa9e4066Sahrens 
98c99e4bdcSChris Kirby void *zfsdev_state;
99503ad85cSMatthew Ahrens static char *zvol_tag = "zvol_tag";
100fa9e4066Sahrens 
101e7cbe64fSgw #define	ZVOL_DUMPSIZE		"dumpsize"
102e7cbe64fSgw 
103fa9e4066Sahrens /*
104c99e4bdcSChris Kirby  * This lock protects the zfsdev_state structure from being modified
105fa9e4066Sahrens  * while it's being used, e.g. an open that comes in before a create
106fa9e4066Sahrens  * finishes.  It also protects temporary opens of the dataset so that,
107fa9e4066Sahrens  * e.g., an open doesn't get a spurious EBUSY.
108fa9e4066Sahrens  */
109c99e4bdcSChris Kirby kmutex_t zfsdev_state_lock;
110fa9e4066Sahrens static uint32_t zvol_minors;
111fa9e4066Sahrens 
112e7cbe64fSgw typedef struct zvol_extent {
11388b7b0f2SMatthew Ahrens 	list_node_t	ze_node;
114e7cbe64fSgw 	dva_t		ze_dva;		/* dva associated with this extent */
11588b7b0f2SMatthew Ahrens 	uint64_t	ze_nblks;	/* number of blocks in extent */
116e7cbe64fSgw } zvol_extent_t;
117e7cbe64fSgw 
118fa9e4066Sahrens /*
119fa9e4066Sahrens  * The in-core state of each volume.
120fa9e4066Sahrens  */
121fa9e4066Sahrens typedef struct zvol_state {
122fa9e4066Sahrens 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
123fa9e4066Sahrens 	uint64_t	zv_volsize;	/* amount of space we advertise */
12467bd71c6Sperrin 	uint64_t	zv_volblocksize; /* volume block size */
125fa9e4066Sahrens 	minor_t		zv_minor;	/* minor number */
126fa9e4066Sahrens 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
127701f66c4SEric Taylor 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
128fa9e4066Sahrens 	objset_t	*zv_objset;	/* objset handle */
129fa9e4066Sahrens 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
130fa9e4066Sahrens 	uint32_t	zv_total_opens;	/* total open count */
13122ac5be4Sperrin 	zilog_t		*zv_zilog;	/* ZIL handle */
13288b7b0f2SMatthew Ahrens 	list_t		zv_extents;	/* List of extents for dump */
13379315247SMatthew Ahrens 	rangelock_t	zv_rangelock;
1348dfe5547SRichard Yao 	dnode_t		*zv_dn;		/* dnode hold */
135fa9e4066Sahrens } zvol_state_t;
136fa9e4066Sahrens 
137e7cbe64fSgw /*
138e7cbe64fSgw  * zvol specific flags
139e7cbe64fSgw  */
140e7cbe64fSgw #define	ZVOL_RDONLY	0x1
141e7cbe64fSgw #define	ZVOL_DUMPIFIED	0x2
142c7f714e2SEric Taylor #define	ZVOL_EXCL	0x4
143701f66c4SEric Taylor #define	ZVOL_WCE	0x8
144e7cbe64fSgw 
14567bd71c6Sperrin /*
14667bd71c6Sperrin  * zvol maximum transfer in one DMU tx.
14767bd71c6Sperrin  */
14867bd71c6Sperrin int zvol_maxphys = DMU_MAX_ACCESS/2;
14967bd71c6Sperrin 
150893c83baSGeorge Wilson /*
151893c83baSGeorge Wilson  * Toggle unmap functionality.
152893c83baSGeorge Wilson  */
153893c83baSGeorge Wilson boolean_t zvol_unmap_enabled = B_TRUE;
154893c83baSGeorge Wilson 
1551c9272b8SStephen Blinick /*
1561c9272b8SStephen Blinick  * If true, unmaps requested as synchronous are executed synchronously,
1571c9272b8SStephen Blinick  * otherwise all unmaps are asynchronous.
1581c9272b8SStephen Blinick  */
1591c9272b8SStephen Blinick boolean_t zvol_unmap_sync_enabled = B_FALSE;
1601c9272b8SStephen Blinick 
16192241e0bSTom Erickson extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
1624445fffbSMatthew Ahrens     nvlist_t *, nvlist_t *);
163681d9761SEric Taylor static int zvol_remove_zv(zvol_state_t *);
1641271e4b1SPrakash Surya static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
1651271e4b1SPrakash Surya     struct lwb *lwb, zio_t *zio);
166e7cbe64fSgw static int zvol_dumpify(zvol_state_t *zv);
167e7cbe64fSgw static int zvol_dump_fini(zvol_state_t *zv);
168e7cbe64fSgw static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
16967bd71c6Sperrin 
170fa9e4066Sahrens static void
171c61ea566SGeorge Wilson zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
172fa9e4066Sahrens {
173c61ea566SGeorge Wilson 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
174fa9e4066Sahrens 
175c61ea566SGeorge Wilson 	zv->zv_volsize = volsize;
176fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
177681d9761SEric Taylor 	    "Size", volsize) == DDI_SUCCESS);
178fa9e4066Sahrens 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
179681d9761SEric Taylor 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
180e7cbe64fSgw 
181e7cbe64fSgw 	/* Notify specfs to invalidate the cached size */
182e7cbe64fSgw 	spec_size_invalidate(dev, VBLK);
183e7cbe64fSgw 	spec_size_invalidate(dev, VCHR);
184fa9e4066Sahrens }
185fa9e4066Sahrens 
186fa9e4066Sahrens int
187e9dbad6fSeschrock zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
188fa9e4066Sahrens {
189e9dbad6fSeschrock 	if (volsize == 0)
190be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
191fa9e4066Sahrens 
192e9dbad6fSeschrock 	if (volsize % blocksize != 0)
193be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1945c5460e9Seschrock 
195fa9e4066Sahrens #ifdef _ILP32
196e9dbad6fSeschrock 	if (volsize - 1 > SPEC_MAXOFFSET_T)
197be6fd75aSMatthew Ahrens 		return (SET_ERROR(EOVERFLOW));
198fa9e4066Sahrens #endif
199fa9e4066Sahrens 	return (0);
200fa9e4066Sahrens }
201fa9e4066Sahrens 
202fa9e4066Sahrens int
203e9dbad6fSeschrock zvol_check_volblocksize(uint64_t volblocksize)
204fa9e4066Sahrens {
205e9dbad6fSeschrock 	if (volblocksize < SPA_MINBLOCKSIZE ||
206b5152584SMatthew Ahrens 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
207e9dbad6fSeschrock 	    !ISP2(volblocksize))
208be6fd75aSMatthew Ahrens 		return (SET_ERROR(EDOM));
209fa9e4066Sahrens 
210fa9e4066Sahrens 	return (0);
211fa9e4066Sahrens }
212fa9e4066Sahrens 
213fa9e4066Sahrens int
214a2eea2e1Sahrens zvol_get_stats(objset_t *os, nvlist_t *nv)
215fa9e4066Sahrens {
216fa9e4066Sahrens 	int error;
217fa9e4066Sahrens 	dmu_object_info_t doi;
218a2eea2e1Sahrens 	uint64_t val;
219fa9e4066Sahrens 
220a2eea2e1Sahrens 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
221fa9e4066Sahrens 	if (error)
222fa9e4066Sahrens 		return (error);
223fa9e4066Sahrens 
224a2eea2e1Sahrens 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
225a2eea2e1Sahrens 
226fa9e4066Sahrens 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
227fa9e4066Sahrens 
228a2eea2e1Sahrens 	if (error == 0) {
229a2eea2e1Sahrens 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
230a2eea2e1Sahrens 		    doi.doi_data_block_size);
231a2eea2e1Sahrens 	}
232fa9e4066Sahrens 
233fa9e4066Sahrens 	return (error);
234fa9e4066Sahrens }
235fa9e4066Sahrens 
236fa9e4066Sahrens static zvol_state_t *
237e9dbad6fSeschrock zvol_minor_lookup(const char *name)
238fa9e4066Sahrens {
239fa9e4066Sahrens 	minor_t minor;
240fa9e4066Sahrens 	zvol_state_t *zv;
241fa9e4066Sahrens 
242c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
243fa9e4066Sahrens 
244c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
245c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
246fa9e4066Sahrens 		if (zv == NULL)
247fa9e4066Sahrens 			continue;
248fa9e4066Sahrens 		if (strcmp(zv->zv_name, name) == 0)
249f80ce222SChris Kirby 			return (zv);
250fa9e4066Sahrens 	}
251fa9e4066Sahrens 
252f80ce222SChris Kirby 	return (NULL);
253fa9e4066Sahrens }
254fa9e4066Sahrens 
255e7cbe64fSgw /* extent mapping arg */
256e7cbe64fSgw struct maparg {
25788b7b0f2SMatthew Ahrens 	zvol_state_t	*ma_zv;
25888b7b0f2SMatthew Ahrens 	uint64_t	ma_blks;
259e7cbe64fSgw };
260e7cbe64fSgw 
261e7cbe64fSgw /*ARGSUSED*/
262e7cbe64fSgw static int
2631b912ec7SGeorge Wilson zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2647802d7bfSMatthew Ahrens     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
265e7cbe64fSgw {
26688b7b0f2SMatthew Ahrens 	struct maparg *ma = arg;
26788b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
26888b7b0f2SMatthew Ahrens 	int bs = ma->ma_zv->zv_volblocksize;
269e7cbe64fSgw 
270a2cdcdd2SPaul Dagnelie 	if (bp == NULL || BP_IS_HOLE(bp) ||
27143466aaeSMax Grossman 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
27288b7b0f2SMatthew Ahrens 		return (0);
273e7cbe64fSgw 
2745d7b4d43SMatthew Ahrens 	VERIFY(!BP_IS_EMBEDDED(bp));
2755d7b4d43SMatthew Ahrens 
27688b7b0f2SMatthew Ahrens 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
27788b7b0f2SMatthew Ahrens 	ma->ma_blks++;
278e7cbe64fSgw 
27988b7b0f2SMatthew Ahrens 	/* Abort immediately if we have encountered gang blocks */
28088b7b0f2SMatthew Ahrens 	if (BP_IS_GANG(bp))
281be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFRAGS));
282e7cbe64fSgw 
28388b7b0f2SMatthew Ahrens 	/*
28488b7b0f2SMatthew Ahrens 	 * See if the block is at the end of the previous extent.
28588b7b0f2SMatthew Ahrens 	 */
28688b7b0f2SMatthew Ahrens 	ze = list_tail(&ma->ma_zv->zv_extents);
28788b7b0f2SMatthew Ahrens 	if (ze &&
28888b7b0f2SMatthew Ahrens 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
28988b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
29088b7b0f2SMatthew Ahrens 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
29188b7b0f2SMatthew Ahrens 		ze->ze_nblks++;
29288b7b0f2SMatthew Ahrens 		return (0);
293e7cbe64fSgw 	}
294e7cbe64fSgw 
29588b7b0f2SMatthew Ahrens 	dprintf_bp(bp, "%s", "next blkptr:");
296e7cbe64fSgw 
29788b7b0f2SMatthew Ahrens 	/* start a new extent */
29888b7b0f2SMatthew Ahrens 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
29988b7b0f2SMatthew Ahrens 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
30088b7b0f2SMatthew Ahrens 	ze->ze_nblks = 1;
30188b7b0f2SMatthew Ahrens 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
30288b7b0f2SMatthew Ahrens 	return (0);
30388b7b0f2SMatthew Ahrens }
304e7cbe64fSgw 
30588b7b0f2SMatthew Ahrens static void
30688b7b0f2SMatthew Ahrens zvol_free_extents(zvol_state_t *zv)
30788b7b0f2SMatthew Ahrens {
30888b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
309e7cbe64fSgw 
31088b7b0f2SMatthew Ahrens 	while (ze = list_head(&zv->zv_extents)) {
31188b7b0f2SMatthew Ahrens 		list_remove(&zv->zv_extents, ze);
31288b7b0f2SMatthew Ahrens 		kmem_free(ze, sizeof (zvol_extent_t));
313e7cbe64fSgw 	}
31488b7b0f2SMatthew Ahrens }
315e7cbe64fSgw 
31688b7b0f2SMatthew Ahrens static int
31788b7b0f2SMatthew Ahrens zvol_get_lbas(zvol_state_t *zv)
31888b7b0f2SMatthew Ahrens {
3193adc9019SEric Taylor 	objset_t *os = zv->zv_objset;
32088b7b0f2SMatthew Ahrens 	struct maparg	ma;
32188b7b0f2SMatthew Ahrens 	int		err;
32288b7b0f2SMatthew Ahrens 
32388b7b0f2SMatthew Ahrens 	ma.ma_zv = zv;
32488b7b0f2SMatthew Ahrens 	ma.ma_blks = 0;
32588b7b0f2SMatthew Ahrens 	zvol_free_extents(zv);
32688b7b0f2SMatthew Ahrens 
3273adc9019SEric Taylor 	/* commit any in-flight changes before traversing the dataset */
3283adc9019SEric Taylor 	txg_wait_synced(dmu_objset_pool(os), 0);
3293adc9019SEric Taylor 	err = traverse_dataset(dmu_objset_ds(os), 0,
33088b7b0f2SMatthew Ahrens 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
33188b7b0f2SMatthew Ahrens 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
33288b7b0f2SMatthew Ahrens 		zvol_free_extents(zv);
33388b7b0f2SMatthew Ahrens 		return (err ? err : EIO);
334e7cbe64fSgw 	}
33588b7b0f2SMatthew Ahrens 
336e7cbe64fSgw 	return (0);
337e7cbe64fSgw }
338e7cbe64fSgw 
339ecd6cf80Smarks /* ARGSUSED */
340fa9e4066Sahrens void
341ecd6cf80Smarks zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
342fa9e4066Sahrens {
343da6c28aaSamw 	zfs_creat_t *zct = arg;
344da6c28aaSamw 	nvlist_t *nvprops = zct->zct_props;
345fa9e4066Sahrens 	int error;
346e9dbad6fSeschrock 	uint64_t volblocksize, volsize;
347fa9e4066Sahrens 
348ecd6cf80Smarks 	VERIFY(nvlist_lookup_uint64(nvprops,
349e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
350ecd6cf80Smarks 	if (nvlist_lookup_uint64(nvprops,
351e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
352e9dbad6fSeschrock 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
353e9dbad6fSeschrock 
354e9dbad6fSeschrock 	/*
355e7cbe64fSgw 	 * These properties must be removed from the list so the generic
356e9dbad6fSeschrock 	 * property setting step won't apply to them.
357e9dbad6fSeschrock 	 */
358ecd6cf80Smarks 	VERIFY(nvlist_remove_all(nvprops,
359e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
360ecd6cf80Smarks 	(void) nvlist_remove_all(nvprops,
361e9dbad6fSeschrock 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
362e9dbad6fSeschrock 
363e9dbad6fSeschrock 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
364fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
365fa9e4066Sahrens 	ASSERT(error == 0);
366fa9e4066Sahrens 
367fa9e4066Sahrens 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
368fa9e4066Sahrens 	    DMU_OT_NONE, 0, tx);
369fa9e4066Sahrens 	ASSERT(error == 0);
370fa9e4066Sahrens 
371e9dbad6fSeschrock 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
372fa9e4066Sahrens 	ASSERT(error == 0);
373fa9e4066Sahrens }
374fa9e4066Sahrens 
375b77b9231SDan McDonald /*
376b77b9231SDan McDonald  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
377b77b9231SDan McDonald  * implement DKIOCFREE/free-long-range.
378b77b9231SDan McDonald  */
379b77b9231SDan McDonald static int
3803f7978d0SAlan Somers zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
381b77b9231SDan McDonald {
3823f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
3833f7978d0SAlan Somers 	lr_truncate_t *lr = arg2;
384b77b9231SDan McDonald 	uint64_t offset, length;
385b77b9231SDan McDonald 
386b77b9231SDan McDonald 	if (byteswap)
387b77b9231SDan McDonald 		byteswap_uint64_array(lr, sizeof (*lr));
388b77b9231SDan McDonald 
389b77b9231SDan McDonald 	offset = lr->lr_offset;
390b77b9231SDan McDonald 	length = lr->lr_length;
391b77b9231SDan McDonald 
392b77b9231SDan McDonald 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
393b77b9231SDan McDonald }
394b77b9231SDan McDonald 
39522ac5be4Sperrin /*
39622ac5be4Sperrin  * Replay a TX_WRITE ZIL transaction that didn't get committed
39722ac5be4Sperrin  * after a system failure
39822ac5be4Sperrin  */
399eb633035STom Caputi /* ARGSUSED */
40022ac5be4Sperrin static int
4013f7978d0SAlan Somers zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
40222ac5be4Sperrin {
4033f7978d0SAlan Somers 	zvol_state_t *zv = arg1;
4043f7978d0SAlan Somers 	lr_write_t *lr = arg2;
40522ac5be4Sperrin 	objset_t *os = zv->zv_objset;
40622ac5be4Sperrin 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
407b24ab676SJeff Bonwick 	uint64_t offset, length;
40822ac5be4Sperrin 	dmu_tx_t *tx;
40922ac5be4Sperrin 	int error;
41022ac5be4Sperrin 
41122ac5be4Sperrin 	if (byteswap)
41222ac5be4Sperrin 		byteswap_uint64_array(lr, sizeof (*lr));
41322ac5be4Sperrin 
414b24ab676SJeff Bonwick 	offset = lr->lr_offset;
415b24ab676SJeff Bonwick 	length = lr->lr_length;
416b24ab676SJeff Bonwick 
417b24ab676SJeff Bonwick 	/* If it's a dmu_sync() block, write the whole block */
418b24ab676SJeff Bonwick 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
419b24ab676SJeff Bonwick 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
420b24ab676SJeff Bonwick 		if (length < blocksize) {
421b24ab676SJeff Bonwick 			offset -= offset % blocksize;
422b24ab676SJeff Bonwick 			length = blocksize;
423b24ab676SJeff Bonwick 		}
424b24ab676SJeff Bonwick 	}
425975c32a0SNeil Perrin 
42622ac5be4Sperrin 	tx = dmu_tx_create(os);
427b24ab676SJeff Bonwick 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
4281209a471SNeil Perrin 	error = dmu_tx_assign(tx, TXG_WAIT);
42922ac5be4Sperrin 	if (error) {
43022ac5be4Sperrin 		dmu_tx_abort(tx);
43122ac5be4Sperrin 	} else {
432b24ab676SJeff Bonwick 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
43322ac5be4Sperrin 		dmu_tx_commit(tx);
43422ac5be4Sperrin 	}
43522ac5be4Sperrin 
43622ac5be4Sperrin 	return (error);
43722ac5be4Sperrin }
43822ac5be4Sperrin 
43922ac5be4Sperrin /* ARGSUSED */
44022ac5be4Sperrin static int
4413f7978d0SAlan Somers zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
44222ac5be4Sperrin {
443be6fd75aSMatthew Ahrens 	return (SET_ERROR(ENOTSUP));
44422ac5be4Sperrin }
44522ac5be4Sperrin 
44622ac5be4Sperrin /*
44722ac5be4Sperrin  * Callback vectors for replaying records.
448b77b9231SDan McDonald  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
44922ac5be4Sperrin  */
45022ac5be4Sperrin zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
45122ac5be4Sperrin 	zvol_replay_err,	/* 0 no such transaction type */
45222ac5be4Sperrin 	zvol_replay_err,	/* TX_CREATE */
45322ac5be4Sperrin 	zvol_replay_err,	/* TX_MKDIR */
45422ac5be4Sperrin 	zvol_replay_err,	/* TX_MKXATTR */
45522ac5be4Sperrin 	zvol_replay_err,	/* TX_SYMLINK */
45622ac5be4Sperrin 	zvol_replay_err,	/* TX_REMOVE */
45722ac5be4Sperrin 	zvol_replay_err,	/* TX_RMDIR */
45822ac5be4Sperrin 	zvol_replay_err,	/* TX_LINK */
45922ac5be4Sperrin 	zvol_replay_err,	/* TX_RENAME */
46022ac5be4Sperrin 	zvol_replay_write,	/* TX_WRITE */
461b77b9231SDan McDonald 	zvol_replay_truncate,	/* TX_TRUNCATE */
46222ac5be4Sperrin 	zvol_replay_err,	/* TX_SETATTR */
46322ac5be4Sperrin 	zvol_replay_err,	/* TX_ACL */
464975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL */
465975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ATTR */
466975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
467975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL */
468975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ATTR */
469975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
470975c32a0SNeil Perrin 	zvol_replay_err,	/* TX_WRITE2 */
47122ac5be4Sperrin };
47222ac5be4Sperrin 
473681d9761SEric Taylor int
474681d9761SEric Taylor zvol_name2minor(const char *name, minor_t *minor)
475681d9761SEric Taylor {
476681d9761SEric Taylor 	zvol_state_t *zv;
477681d9761SEric Taylor 
478c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
479681d9761SEric Taylor 	zv = zvol_minor_lookup(name);
480681d9761SEric Taylor 	if (minor && zv)
481681d9761SEric Taylor 		*minor = zv->zv_minor;
482c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
483681d9761SEric Taylor 	return (zv ? 0 : -1);
484681d9761SEric Taylor }
485681d9761SEric Taylor 
486e7cbe64fSgw /*
487e7cbe64fSgw  * Create a minor node (plus a whole lot more) for the specified volume.
488fa9e4066Sahrens  */
489fa9e4066Sahrens int
490681d9761SEric Taylor zvol_create_minor(const char *name)
491fa9e4066Sahrens {
492c99e4bdcSChris Kirby 	zfs_soft_state_t *zs;
493fa9e4066Sahrens 	zvol_state_t *zv;
494fa9e4066Sahrens 	objset_t *os;
49567bd71c6Sperrin 	dmu_object_info_t doi;
496fa9e4066Sahrens 	minor_t minor = 0;
497fa9e4066Sahrens 	char chrbuf[30], blkbuf[30];
498fa9e4066Sahrens 	int error;
499fa9e4066Sahrens 
500c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
501fa9e4066Sahrens 
5021195e687SMark J Musante 	if (zvol_minor_lookup(name) != NULL) {
503c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
504be6fd75aSMatthew Ahrens 		return (SET_ERROR(EEXIST));
505fa9e4066Sahrens 	}
506fa9e4066Sahrens 
507503ad85cSMatthew Ahrens 	/* lie and say we're read-only */
508eb633035STom Caputi 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
509fa9e4066Sahrens 
510fa9e4066Sahrens 	if (error) {
511c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
512fa9e4066Sahrens 		return (error);
513fa9e4066Sahrens 	}
514fa9e4066Sahrens 
515c99e4bdcSChris Kirby 	if ((minor = zfsdev_minor_alloc()) == 0) {
516eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
517c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
518be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
519fa9e4066Sahrens 	}
520fa9e4066Sahrens 
521c99e4bdcSChris Kirby 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
522eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
523c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
524be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
525fa9e4066Sahrens 	}
526e9dbad6fSeschrock 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
527e9dbad6fSeschrock 	    (char *)name);
528fa9e4066Sahrens 
529681d9761SEric Taylor 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
530fa9e4066Sahrens 
531fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
532fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
533c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
534eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
535c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
536be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
537fa9e4066Sahrens 	}
538fa9e4066Sahrens 
539681d9761SEric Taylor 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
540fa9e4066Sahrens 
541fa9e4066Sahrens 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
542fa9e4066Sahrens 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
543fa9e4066Sahrens 		ddi_remove_minor_node(zfs_dip, chrbuf);
544c99e4bdcSChris Kirby 		ddi_soft_state_free(zfsdev_state, minor);
545eb633035STom Caputi 		dmu_objset_disown(os, 1, FTAG);
546c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
547be6fd75aSMatthew Ahrens 		return (SET_ERROR(EAGAIN));
548fa9e4066Sahrens 	}
549fa9e4066Sahrens 
550c99e4bdcSChris Kirby 	zs = ddi_get_soft_state(zfsdev_state, minor);
551c99e4bdcSChris Kirby 	zs->zss_type = ZSST_ZVOL;
552c99e4bdcSChris Kirby 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
553681d9761SEric Taylor 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
554fa9e4066Sahrens 	zv->zv_min_bs = DEV_BSHIFT;
555fa9e4066Sahrens 	zv->zv_minor = minor;
556fa9e4066Sahrens 	zv->zv_objset = os;
557f9af39baSGeorge Wilson 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
558681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
55979315247SMatthew Ahrens 	rangelock_init(&zv->zv_rangelock, NULL, NULL);
56088b7b0f2SMatthew Ahrens 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
56188b7b0f2SMatthew Ahrens 	    offsetof(zvol_extent_t, ze_node));
56267bd71c6Sperrin 	/* get and cache the blocksize */
56367bd71c6Sperrin 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
56467bd71c6Sperrin 	ASSERT(error == 0);
56567bd71c6Sperrin 	zv->zv_volblocksize = doi.doi_data_block_size;
56622ac5be4Sperrin 
567f9af39baSGeorge Wilson 	if (spa_writeable(dmu_objset_spa(os))) {
568f9af39baSGeorge Wilson 		if (zil_replay_disable)
569f9af39baSGeorge Wilson 			zil_destroy(dmu_objset_zil(os), B_FALSE);
570f9af39baSGeorge Wilson 		else
571f9af39baSGeorge Wilson 			zil_replay(os, zv, zvol_replay_vector);
572f9af39baSGeorge Wilson 	}
573eb633035STom Caputi 	dmu_objset_disown(os, 1, FTAG);
574681d9761SEric Taylor 	zv->zv_objset = NULL;
575fa9e4066Sahrens 
576fa9e4066Sahrens 	zvol_minors++;
577fa9e4066Sahrens 
578c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
579fa9e4066Sahrens 
580fa9e4066Sahrens 	return (0);
581fa9e4066Sahrens }
582fa9e4066Sahrens 
583fa9e4066Sahrens /*
584fa9e4066Sahrens  * Remove minor node for the specified volume.
585fa9e4066Sahrens  */
586681d9761SEric Taylor static int
587681d9761SEric Taylor zvol_remove_zv(zvol_state_t *zv)
588681d9761SEric Taylor {
589681d9761SEric Taylor 	char nmbuf[20];
590c99e4bdcSChris Kirby 	minor_t minor = zv->zv_minor;
591681d9761SEric Taylor 
592c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
593681d9761SEric Taylor 	if (zv->zv_total_opens != 0)
594be6fd75aSMatthew Ahrens 		return (SET_ERROR(EBUSY));
595681d9761SEric Taylor 
596c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
597681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
598681d9761SEric Taylor 
599c99e4bdcSChris Kirby 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
600681d9761SEric Taylor 	ddi_remove_minor_node(zfs_dip, nmbuf);
601681d9761SEric Taylor 
60279315247SMatthew Ahrens 	rangelock_fini(&zv->zv_rangelock);
603681d9761SEric Taylor 
604c99e4bdcSChris Kirby 	kmem_free(zv, sizeof (zvol_state_t));
605c99e4bdcSChris Kirby 
606c99e4bdcSChris Kirby 	ddi_soft_state_free(zfsdev_state, minor);
607681d9761SEric Taylor 
608681d9761SEric Taylor 	zvol_minors--;
609681d9761SEric Taylor 	return (0);
610681d9761SEric Taylor }
611681d9761SEric Taylor 
612fa9e4066Sahrens int
613e9dbad6fSeschrock zvol_remove_minor(const char *name)
614fa9e4066Sahrens {
615fa9e4066Sahrens 	zvol_state_t *zv;
616681d9761SEric Taylor 	int rc;
617fa9e4066Sahrens 
618c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
619e9dbad6fSeschrock 	if ((zv = zvol_minor_lookup(name)) == NULL) {
620c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
621be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
622fa9e4066Sahrens 	}
623681d9761SEric Taylor 	rc = zvol_remove_zv(zv);
624c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
625681d9761SEric Taylor 	return (rc);
626681d9761SEric Taylor }
627fa9e4066Sahrens 
628681d9761SEric Taylor int
629*8bf394f1STom Caputi zvol_first_open(zvol_state_t *zv, boolean_t rdonly)
630681d9761SEric Taylor {
631681d9761SEric Taylor 	objset_t *os;
632681d9761SEric Taylor 	uint64_t volsize;
633681d9761SEric Taylor 	int error;
634681d9761SEric Taylor 	uint64_t readonly;
635*8bf394f1STom Caputi 	boolean_t ro;
636fa9e4066Sahrens 
637*8bf394f1STom Caputi 	ro = (rdonly || (strchr(zv->zv_name, '@') != NULL));
638*8bf394f1STom Caputi 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
639681d9761SEric Taylor 	if (error)
640681d9761SEric Taylor 		return (error);
641fa9e4066Sahrens 
642c61ea566SGeorge Wilson 	zv->zv_objset = os;
643681d9761SEric Taylor 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
644681d9761SEric Taylor 	if (error) {
645681d9761SEric Taylor 		ASSERT(error == 0);
646eb633035STom Caputi 		dmu_objset_disown(os, 1, zvol_tag);
647681d9761SEric Taylor 		return (error);
648681d9761SEric Taylor 	}
649c61ea566SGeorge Wilson 
6508dfe5547SRichard Yao 	error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
65194d1a210STim Haley 	if (error) {
652eb633035STom Caputi 		dmu_objset_disown(os, 1, zvol_tag);
65394d1a210STim Haley 		return (error);
65494d1a210STim Haley 	}
655c61ea566SGeorge Wilson 
656c61ea566SGeorge Wilson 	zvol_size_changed(zv, volsize);
657681d9761SEric Taylor 	zv->zv_zilog = zil_open(os, zvol_get_data);
658fa9e4066Sahrens 
659681d9761SEric Taylor 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
660681d9761SEric Taylor 	    NULL) == 0);
661f9af39baSGeorge Wilson 	if (readonly || dmu_objset_is_snapshot(os) ||
662f9af39baSGeorge Wilson 	    !spa_writeable(dmu_objset_spa(os)))
663681d9761SEric Taylor 		zv->zv_flags |= ZVOL_RDONLY;
664681d9761SEric Taylor 	else
665681d9761SEric Taylor 		zv->zv_flags &= ~ZVOL_RDONLY;
666681d9761SEric Taylor 	return (error);
667681d9761SEric Taylor }
668fa9e4066Sahrens 
669681d9761SEric Taylor void
670681d9761SEric Taylor zvol_last_close(zvol_state_t *zv)
671681d9761SEric Taylor {
67222ac5be4Sperrin 	zil_close(zv->zv_zilog);
67322ac5be4Sperrin 	zv->zv_zilog = NULL;
6742e2c1355SMatthew Ahrens 
6758dfe5547SRichard Yao 	dnode_rele(zv->zv_dn, zvol_tag);
6768dfe5547SRichard Yao 	zv->zv_dn = NULL;
6772e2c1355SMatthew Ahrens 
6782e2c1355SMatthew Ahrens 	/*
6792e2c1355SMatthew Ahrens 	 * Evict cached data
6802e2c1355SMatthew Ahrens 	 */
6812e2c1355SMatthew Ahrens 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
6822e2c1355SMatthew Ahrens 	    !(zv->zv_flags & ZVOL_RDONLY))
6832e2c1355SMatthew Ahrens 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
6843b2aab18SMatthew Ahrens 	dmu_objset_evict_dbufs(zv->zv_objset);
6852e2c1355SMatthew Ahrens 
686eb633035STom Caputi 	dmu_objset_disown(zv->zv_objset, 1, zvol_tag);
687fa9e4066Sahrens 	zv->zv_objset = NULL;
688fa9e4066Sahrens }
689fa9e4066Sahrens 
690e7cbe64fSgw int
691e7cbe64fSgw zvol_prealloc(zvol_state_t *zv)
692e7cbe64fSgw {
693e7cbe64fSgw 	objset_t *os = zv->zv_objset;
694e7cbe64fSgw 	dmu_tx_t *tx;
695e7cbe64fSgw 	uint64_t refd, avail, usedobjs, availobjs;
696e7cbe64fSgw 	uint64_t resid = zv->zv_volsize;
697e7cbe64fSgw 	uint64_t off = 0;
698e7cbe64fSgw 
699e7cbe64fSgw 	/* Check the space usage before attempting to allocate the space */
700e7cbe64fSgw 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
701e7cbe64fSgw 	if (avail < zv->zv_volsize)
702be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOSPC));
703e7cbe64fSgw 
704e7cbe64fSgw 	/* Free old extents if they exist */
705e7cbe64fSgw 	zvol_free_extents(zv);
706e7cbe64fSgw 
707e7cbe64fSgw 	while (resid != 0) {
708e7cbe64fSgw 		int error;
709b5152584SMatthew Ahrens 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
710e7cbe64fSgw 
711e7cbe64fSgw 		tx = dmu_tx_create(os);
712e7cbe64fSgw 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
713e7cbe64fSgw 		error = dmu_tx_assign(tx, TXG_WAIT);
714e7cbe64fSgw 		if (error) {
715e7cbe64fSgw 			dmu_tx_abort(tx);
716cdb0ab79Smaybee 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
717e7cbe64fSgw 			return (error);
718e7cbe64fSgw 		}
71982c9918fSTim Haley 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
720e7cbe64fSgw 		dmu_tx_commit(tx);
721e7cbe64fSgw 		off += bytes;
722e7cbe64fSgw 		resid -= bytes;
723e7cbe64fSgw 	}
724e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
725e7cbe64fSgw 
726e7cbe64fSgw 	return (0);
727e7cbe64fSgw }
728e7cbe64fSgw 
7293b2aab18SMatthew Ahrens static int
730681d9761SEric Taylor zvol_update_volsize(objset_t *os, uint64_t volsize)
731e7cbe64fSgw {
732e7cbe64fSgw 	dmu_tx_t *tx;
733e7cbe64fSgw 	int error;
734eb633035STom Caputi 	uint64_t txg;
735e7cbe64fSgw 
736c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
737e7cbe64fSgw 
738681d9761SEric Taylor 	tx = dmu_tx_create(os);
739e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
7404bb73804SMatthew Ahrens 	dmu_tx_mark_netfree(tx);
741e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
742e7cbe64fSgw 	if (error) {
743e7cbe64fSgw 		dmu_tx_abort(tx);
744e7cbe64fSgw 		return (error);
745e7cbe64fSgw 	}
746eb633035STom Caputi 	txg = dmu_tx_get_txg(tx);
747e7cbe64fSgw 
748681d9761SEric Taylor 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
749e7cbe64fSgw 	    &volsize, tx);
750e7cbe64fSgw 	dmu_tx_commit(tx);
751e7cbe64fSgw 
752eb633035STom Caputi 	txg_wait_synced(dmu_objset_pool(os), txg);
753eb633035STom Caputi 
754e7cbe64fSgw 	if (error == 0)
755681d9761SEric Taylor 		error = dmu_free_long_range(os,
756cdb0ab79Smaybee 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
757681d9761SEric Taylor 	return (error);
758681d9761SEric Taylor }
759e7cbe64fSgw 
760681d9761SEric Taylor void
761681d9761SEric Taylor zvol_remove_minors(const char *name)
762681d9761SEric Taylor {
763681d9761SEric Taylor 	zvol_state_t *zv;
764681d9761SEric Taylor 	char *namebuf;
765681d9761SEric Taylor 	minor_t minor;
766681d9761SEric Taylor 
767681d9761SEric Taylor 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
768681d9761SEric Taylor 	(void) strncpy(namebuf, name, strlen(name));
769681d9761SEric Taylor 	(void) strcat(namebuf, "/");
770c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
771c99e4bdcSChris Kirby 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
772681d9761SEric Taylor 
773c99e4bdcSChris Kirby 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
774681d9761SEric Taylor 		if (zv == NULL)
775681d9761SEric Taylor 			continue;
776681d9761SEric Taylor 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
777681d9761SEric Taylor 			(void) zvol_remove_zv(zv);
778e7cbe64fSgw 	}
779681d9761SEric Taylor 	kmem_free(namebuf, strlen(name) + 2);
780681d9761SEric Taylor 
781c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
782e7cbe64fSgw }
783e7cbe64fSgw 
784c61ea566SGeorge Wilson static int
7853b2aab18SMatthew Ahrens zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
786fa9e4066Sahrens {
787e7cbe64fSgw 	uint64_t old_volsize = 0ULL;
7883b2aab18SMatthew Ahrens 	int error = 0;
789fa9e4066Sahrens 
790c61ea566SGeorge Wilson 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
791c61ea566SGeorge Wilson 
792e7cbe64fSgw 	/*
793e7cbe64fSgw 	 * Reinitialize the dump area to the new size. If we
794681d9761SEric Taylor 	 * failed to resize the dump area then restore it back to
795c61ea566SGeorge Wilson 	 * its original size.  We must set the new volsize prior
796c61ea566SGeorge Wilson 	 * to calling dumpvp_resize() to ensure that the devices'
797c61ea566SGeorge Wilson 	 * size(9P) is not visible by the dump subsystem.
798e7cbe64fSgw 	 */
7993b2aab18SMatthew Ahrens 	old_volsize = zv->zv_volsize;
8003b2aab18SMatthew Ahrens 	zvol_size_changed(zv, volsize);
8013b2aab18SMatthew Ahrens 
8023b2aab18SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
8033b2aab18SMatthew Ahrens 		if ((error = zvol_dumpify(zv)) != 0 ||
8043b2aab18SMatthew Ahrens 		    (error = dumpvp_resize()) != 0) {
8053b2aab18SMatthew Ahrens 			int dumpify_error;
8063b2aab18SMatthew Ahrens 
8073b2aab18SMatthew Ahrens 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
8083b2aab18SMatthew Ahrens 			zvol_size_changed(zv, old_volsize);
8093b2aab18SMatthew Ahrens 			dumpify_error = zvol_dumpify(zv);
8103b2aab18SMatthew Ahrens 			error = dumpify_error ? dumpify_error : error;
811681d9761SEric Taylor 		}
812fa9e4066Sahrens 	}
813fa9e4066Sahrens 
814573ca77eSGeorge Wilson 	/*
815573ca77eSGeorge Wilson 	 * Generate a LUN expansion event.
816573ca77eSGeorge Wilson 	 */
8173b2aab18SMatthew Ahrens 	if (error == 0) {
818573ca77eSGeorge Wilson 		sysevent_id_t eid;
819573ca77eSGeorge Wilson 		nvlist_t *attr;
820573ca77eSGeorge Wilson 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
821573ca77eSGeorge Wilson 
822681d9761SEric Taylor 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
823573ca77eSGeorge Wilson 		    zv->zv_minor);
824573ca77eSGeorge Wilson 
825573ca77eSGeorge Wilson 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
826573ca77eSGeorge Wilson 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
827573ca77eSGeorge Wilson 
828573ca77eSGeorge Wilson 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
829573ca77eSGeorge Wilson 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
830573ca77eSGeorge Wilson 
831573ca77eSGeorge Wilson 		nvlist_free(attr);
832573ca77eSGeorge Wilson 		kmem_free(physpath, MAXPATHLEN);
833573ca77eSGeorge Wilson 	}
834c61ea566SGeorge Wilson 	return (error);
835c61ea566SGeorge Wilson }
836573ca77eSGeorge Wilson 
837c61ea566SGeorge Wilson int
838c61ea566SGeorge Wilson zvol_set_volsize(const char *name, uint64_t volsize)
839c61ea566SGeorge Wilson {
840c61ea566SGeorge Wilson 	zvol_state_t *zv = NULL;
841c61ea566SGeorge Wilson 	objset_t *os;
842c61ea566SGeorge Wilson 	int error;
843c61ea566SGeorge Wilson 	dmu_object_info_t doi;
844c61ea566SGeorge Wilson 	uint64_t readonly;
8453b2aab18SMatthew Ahrens 	boolean_t owned = B_FALSE;
8463b2aab18SMatthew Ahrens 
8473b2aab18SMatthew Ahrens 	error = dsl_prop_get_integer(name,
8483b2aab18SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
8493b2aab18SMatthew Ahrens 	if (error != 0)
8503b2aab18SMatthew Ahrens 		return (error);
8513b2aab18SMatthew Ahrens 	if (readonly)
852be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
853c61ea566SGeorge Wilson 
854c61ea566SGeorge Wilson 	mutex_enter(&zfsdev_state_lock);
855c61ea566SGeorge Wilson 	zv = zvol_minor_lookup(name);
8563b2aab18SMatthew Ahrens 
8573b2aab18SMatthew Ahrens 	if (zv == NULL || zv->zv_objset == NULL) {
858eb633035STom Caputi 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
8593b2aab18SMatthew Ahrens 		    FTAG, &os)) != 0) {
8603b2aab18SMatthew Ahrens 			mutex_exit(&zfsdev_state_lock);
8613b2aab18SMatthew Ahrens 			return (error);
8623b2aab18SMatthew Ahrens 		}
8633b2aab18SMatthew Ahrens 		owned = B_TRUE;
8643b2aab18SMatthew Ahrens 		if (zv != NULL)
8653b2aab18SMatthew Ahrens 			zv->zv_objset = os;
8663b2aab18SMatthew Ahrens 	} else {
8673b2aab18SMatthew Ahrens 		os = zv->zv_objset;
868c61ea566SGeorge Wilson 	}
869c61ea566SGeorge Wilson 
870c61ea566SGeorge Wilson 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
8713b2aab18SMatthew Ahrens 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
872c61ea566SGeorge Wilson 		goto out;
873c61ea566SGeorge Wilson 
8743b2aab18SMatthew Ahrens 	error = zvol_update_volsize(os, volsize);
875c61ea566SGeorge Wilson 
8763b2aab18SMatthew Ahrens 	if (error == 0 && zv != NULL)
8773b2aab18SMatthew Ahrens 		error = zvol_update_live_volsize(zv, volsize);
878bb0ade09Sahrens out:
8793b2aab18SMatthew Ahrens 	if (owned) {
880eb633035STom Caputi 		dmu_objset_disown(os, B_TRUE, FTAG);
8813b2aab18SMatthew Ahrens 		if (zv != NULL)
8823b2aab18SMatthew Ahrens 			zv->zv_objset = NULL;
8833b2aab18SMatthew Ahrens 	}
884c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
885fa9e4066Sahrens 	return (error);
886fa9e4066Sahrens }
887fa9e4066Sahrens 
888fa9e4066Sahrens /*ARGSUSED*/
889fa9e4066Sahrens int
890fa9e4066Sahrens zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
891fa9e4066Sahrens {
892fa9e4066Sahrens 	zvol_state_t *zv;
893681d9761SEric Taylor 	int err = 0;
894fa9e4066Sahrens 
895c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
896fa9e4066Sahrens 
897c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
898fa9e4066Sahrens 	if (zv == NULL) {
899c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
900be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
901fa9e4066Sahrens 	}
902fa9e4066Sahrens 
903681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
904*8bf394f1STom Caputi 		err = zvol_first_open(zv, !(flag & FWRITE));
905681d9761SEric Taylor 	if (err) {
906c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
907681d9761SEric Taylor 		return (err);
908681d9761SEric Taylor 	}
909*8bf394f1STom Caputi 
910*8bf394f1STom Caputi 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
911be6fd75aSMatthew Ahrens 		err = SET_ERROR(EROFS);
912681d9761SEric Taylor 		goto out;
913fa9e4066Sahrens 	}
914c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
915be6fd75aSMatthew Ahrens 		err = SET_ERROR(EBUSY);
916681d9761SEric Taylor 		goto out;
917c7f714e2SEric Taylor 	}
918c7f714e2SEric Taylor 	if (flag & FEXCL) {
919c7f714e2SEric Taylor 		if (zv->zv_total_opens != 0) {
920be6fd75aSMatthew Ahrens 			err = SET_ERROR(EBUSY);
921681d9761SEric Taylor 			goto out;
922c7f714e2SEric Taylor 		}
923c7f714e2SEric Taylor 		zv->zv_flags |= ZVOL_EXCL;
924c7f714e2SEric Taylor 	}
925fa9e4066Sahrens 
926fa9e4066Sahrens 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
927fa9e4066Sahrens 		zv->zv_open_count[otyp]++;
928fa9e4066Sahrens 		zv->zv_total_opens++;
929fa9e4066Sahrens 	}
930c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
931fa9e4066Sahrens 
932681d9761SEric Taylor 	return (err);
933681d9761SEric Taylor out:
934681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
935681d9761SEric Taylor 		zvol_last_close(zv);
936c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
937681d9761SEric Taylor 	return (err);
938fa9e4066Sahrens }
939fa9e4066Sahrens 
940fa9e4066Sahrens /*ARGSUSED*/
941fa9e4066Sahrens int
942fa9e4066Sahrens zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
943fa9e4066Sahrens {
944fa9e4066Sahrens 	minor_t minor = getminor(dev);
945fa9e4066Sahrens 	zvol_state_t *zv;
946681d9761SEric Taylor 	int error = 0;
947fa9e4066Sahrens 
948c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
949fa9e4066Sahrens 
950c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
951fa9e4066Sahrens 	if (zv == NULL) {
952c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
953be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
954fa9e4066Sahrens 	}
955fa9e4066Sahrens 
956c7f714e2SEric Taylor 	if (zv->zv_flags & ZVOL_EXCL) {
957c7f714e2SEric Taylor 		ASSERT(zv->zv_total_opens == 1);
958c7f714e2SEric Taylor 		zv->zv_flags &= ~ZVOL_EXCL;
959fa9e4066Sahrens 	}
960fa9e4066Sahrens 
961fa9e4066Sahrens 	/*
962fa9e4066Sahrens 	 * If the open count is zero, this is a spurious close.
963fa9e4066Sahrens 	 * That indicates a bug in the kernel / DDI framework.
964fa9e4066Sahrens 	 */
965fa9e4066Sahrens 	ASSERT(zv->zv_open_count[otyp] != 0);
966fa9e4066Sahrens 	ASSERT(zv->zv_total_opens != 0);
967fa9e4066Sahrens 
968fa9e4066Sahrens 	/*
969fa9e4066Sahrens 	 * You may get multiple opens, but only one close.
970fa9e4066Sahrens 	 */
971fa9e4066Sahrens 	zv->zv_open_count[otyp]--;
972fa9e4066Sahrens 	zv->zv_total_opens--;
973fa9e4066Sahrens 
974681d9761SEric Taylor 	if (zv->zv_total_opens == 0)
975681d9761SEric Taylor 		zvol_last_close(zv);
976fa9e4066Sahrens 
977c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
978681d9761SEric Taylor 	return (error);
979fa9e4066Sahrens }
980fa9e4066Sahrens 
981cab3a55eSPrakash Surya /* ARGSUSED */
982feb08c6bSbillm static void
983b24ab676SJeff Bonwick zvol_get_done(zgd_t *zgd, int error)
98467bd71c6Sperrin {
985b24ab676SJeff Bonwick 	if (zgd->zgd_db)
986b24ab676SJeff Bonwick 		dmu_buf_rele(zgd->zgd_db, zgd);
987b24ab676SJeff Bonwick 
98879315247SMatthew Ahrens 	rangelock_exit(zgd->zgd_lr);
989b24ab676SJeff Bonwick 
99067bd71c6Sperrin 	kmem_free(zgd, sizeof (zgd_t));
99167bd71c6Sperrin }
99267bd71c6Sperrin 
99367bd71c6Sperrin /*
99467bd71c6Sperrin  * Get data to generate a TX_WRITE intent log record.
99567bd71c6Sperrin  */
996feb08c6bSbillm static int
9971271e4b1SPrakash Surya zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
99867bd71c6Sperrin {
99967bd71c6Sperrin 	zvol_state_t *zv = arg;
1000b24ab676SJeff Bonwick 	uint64_t offset = lr->lr_offset;
1001b24ab676SJeff Bonwick 	uint64_t size = lr->lr_length;	/* length of user data */
100267bd71c6Sperrin 	dmu_buf_t *db;
100367bd71c6Sperrin 	zgd_t *zgd;
100467bd71c6Sperrin 	int error;
100567bd71c6Sperrin 
10061271e4b1SPrakash Surya 	ASSERT3P(lwb, !=, NULL);
10071271e4b1SPrakash Surya 	ASSERT3P(zio, !=, NULL);
10081271e4b1SPrakash Surya 	ASSERT3U(size, !=, 0);
1009b24ab676SJeff Bonwick 
1010b24ab676SJeff Bonwick 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
10111271e4b1SPrakash Surya 	zgd->zgd_lwb = lwb;
1012feb08c6bSbillm 
1013c2e6a7d6Sperrin 	/*
1014c2e6a7d6Sperrin 	 * Write records come in two flavors: immediate and indirect.
1015c2e6a7d6Sperrin 	 * For small writes it's cheaper to store the data with the
1016c2e6a7d6Sperrin 	 * log record (immediate); for large writes it's cheaper to
1017c2e6a7d6Sperrin 	 * sync the data and get a pointer to it (indirect) so that
1018c2e6a7d6Sperrin 	 * we don't have to write the data twice.
1019c2e6a7d6Sperrin 	 */
102042b14111SLOLi 	if (buf != NULL) { /* immediate write */
102179315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
102242b14111SLOLi 		    RL_READER);
10238dfe5547SRichard Yao 		error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
1024b24ab676SJeff Bonwick 		    DMU_READ_NO_PREFETCH);
102542b14111SLOLi 	} else { /* indirect write */
102642b14111SLOLi 		/*
102742b14111SLOLi 		 * Have to lock the whole block to ensure when it's written out
102842b14111SLOLi 		 * and its checksum is being calculated that no one can change
102942b14111SLOLi 		 * the data. Contrarily to zfs_get_data we need not re-check
103042b14111SLOLi 		 * blocksize after we get the lock because it cannot be changed.
103142b14111SLOLi 		 */
1032b24ab676SJeff Bonwick 		size = zv->zv_volblocksize;
1033b24ab676SJeff Bonwick 		offset = P2ALIGN(offset, size);
103479315247SMatthew Ahrens 		zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
103542b14111SLOLi 		    RL_READER);
10368dfe5547SRichard Yao 		error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
103747cb52daSJeff Bonwick 		    DMU_READ_NO_PREFETCH);
1038b24ab676SJeff Bonwick 		if (error == 0) {
1039b7edcb94SMatthew Ahrens 			blkptr_t *bp = &lr->lr_blkptr;
104080901aeaSGeorge Wilson 
1041b24ab676SJeff Bonwick 			zgd->zgd_db = db;
1042b24ab676SJeff Bonwick 			zgd->zgd_bp = bp;
104367bd71c6Sperrin 
1044b24ab676SJeff Bonwick 			ASSERT(db->db_offset == offset);
1045b24ab676SJeff Bonwick 			ASSERT(db->db_size == size);
104667bd71c6Sperrin 
1047b24ab676SJeff Bonwick 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1048b24ab676SJeff Bonwick 			    zvol_get_done, zgd);
1049975c32a0SNeil Perrin 
1050b24ab676SJeff Bonwick 			if (error == 0)
1051b24ab676SJeff Bonwick 				return (0);
1052b24ab676SJeff Bonwick 		}
1053975c32a0SNeil Perrin 	}
1054975c32a0SNeil Perrin 
1055b24ab676SJeff Bonwick 	zvol_get_done(zgd, error);
1056b24ab676SJeff Bonwick 
105767bd71c6Sperrin 	return (error);
105867bd71c6Sperrin }
105967bd71c6Sperrin 
1060a24e15ceSperrin /*
1061a24e15ceSperrin  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
106222ac5be4Sperrin  *
106322ac5be4Sperrin  * We store data in the log buffers if it's small enough.
106467bd71c6Sperrin  * Otherwise we will later flush the data out via dmu_sync().
106522ac5be4Sperrin  */
106667bd71c6Sperrin ssize_t zvol_immediate_write_sz = 32768;
106722ac5be4Sperrin 
1068feb08c6bSbillm static void
1069510b6c0eSNeil Perrin zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1070510b6c0eSNeil Perrin     boolean_t sync)
107122ac5be4Sperrin {
1072feb08c6bSbillm 	uint32_t blocksize = zv->zv_volblocksize;
10731209a471SNeil Perrin 	zilog_t *zilog = zv->zv_zilog;
1074c5ee4681SAlexander Motin 	itx_wr_state_t write_state;
1075510b6c0eSNeil Perrin 
1076b24ab676SJeff Bonwick 	if (zil_replaying(zilog, tx))
10771209a471SNeil Perrin 		return;
10781209a471SNeil Perrin 
1079c5ee4681SAlexander Motin 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1080c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1081c5ee4681SAlexander Motin 	else if (!spa_has_slogs(zilog->zl_spa) &&
1082c5ee4681SAlexander Motin 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1083c5ee4681SAlexander Motin 		write_state = WR_INDIRECT;
1084c5ee4681SAlexander Motin 	else if (sync)
1085c5ee4681SAlexander Motin 		write_state = WR_COPIED;
1086c5ee4681SAlexander Motin 	else
1087c5ee4681SAlexander Motin 		write_state = WR_NEED_COPY;
1088feb08c6bSbillm 
1089510b6c0eSNeil Perrin 	while (resid) {
1090510b6c0eSNeil Perrin 		itx_t *itx;
1091510b6c0eSNeil Perrin 		lr_write_t *lr;
1092c5ee4681SAlexander Motin 		itx_wr_state_t wr_state = write_state;
1093c5ee4681SAlexander Motin 		ssize_t len = resid;
1094c5ee4681SAlexander Motin 
1095c5ee4681SAlexander Motin 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1096c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1097c5ee4681SAlexander Motin 		else if (wr_state == WR_INDIRECT)
1098c5ee4681SAlexander Motin 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1099510b6c0eSNeil Perrin 
1100510b6c0eSNeil Perrin 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1101c5ee4681SAlexander Motin 		    (wr_state == WR_COPIED ? len : 0));
1102feb08c6bSbillm 		lr = (lr_write_t *)&itx->itx_lr;
11038dfe5547SRichard Yao 		if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
11048dfe5547SRichard Yao 		    off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1105b24ab676SJeff Bonwick 			zil_itx_destroy(itx);
1106510b6c0eSNeil Perrin 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1107510b6c0eSNeil Perrin 			lr = (lr_write_t *)&itx->itx_lr;
1108c5ee4681SAlexander Motin 			wr_state = WR_NEED_COPY;
1109510b6c0eSNeil Perrin 		}
1110510b6c0eSNeil Perrin 
1111c5ee4681SAlexander Motin 		itx->itx_wr_state = wr_state;
1112feb08c6bSbillm 		lr->lr_foid = ZVOL_OBJ;
1113feb08c6bSbillm 		lr->lr_offset = off;
1114510b6c0eSNeil Perrin 		lr->lr_length = len;
1115b24ab676SJeff Bonwick 		lr->lr_blkoff = 0;
1116feb08c6bSbillm 		BP_ZERO(&lr->lr_blkptr);
1117feb08c6bSbillm 
1118510b6c0eSNeil Perrin 		itx->itx_private = zv;
1119510b6c0eSNeil Perrin 		itx->itx_sync = sync;
1120510b6c0eSNeil Perrin 
11215002558fSNeil Perrin 		zil_itx_assign(zilog, itx, tx);
1122510b6c0eSNeil Perrin 
1123510b6c0eSNeil Perrin 		off += len;
1124510b6c0eSNeil Perrin 		resid -= len;
112522ac5be4Sperrin 	}
112622ac5be4Sperrin }
112722ac5be4Sperrin 
112888b7b0f2SMatthew Ahrens static int
1129810e43b2SBill Pijewski zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1130810e43b2SBill Pijewski     uint64_t size, boolean_t doread, boolean_t isdump)
1131e7cbe64fSgw {
1132e7cbe64fSgw 	vdev_disk_t *dvd;
1133e7cbe64fSgw 	int c;
1134e7cbe64fSgw 	int numerrors = 0;
1135e7cbe64fSgw 
1136810e43b2SBill Pijewski 	if (vd->vdev_ops == &vdev_mirror_ops ||
1137810e43b2SBill Pijewski 	    vd->vdev_ops == &vdev_replacing_ops ||
1138810e43b2SBill Pijewski 	    vd->vdev_ops == &vdev_spare_ops) {
1139810e43b2SBill Pijewski 		for (c = 0; c < vd->vdev_children; c++) {
1140810e43b2SBill Pijewski 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1141810e43b2SBill Pijewski 			    addr, offset, origoffset, size, doread, isdump);
1142810e43b2SBill Pijewski 			if (err != 0) {
1143810e43b2SBill Pijewski 				numerrors++;
1144810e43b2SBill Pijewski 			} else if (doread) {
1145810e43b2SBill Pijewski 				break;
1146810e43b2SBill Pijewski 			}
1147e7cbe64fSgw 		}
1148e7cbe64fSgw 	}
1149e7cbe64fSgw 
1150810e43b2SBill Pijewski 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1151e7cbe64fSgw 		return (numerrors < vd->vdev_children ? 0 : EIO);
1152e7cbe64fSgw 
1153dc0bb255SEric Taylor 	if (doread && !vdev_readable(vd))
1154be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1155dc0bb255SEric Taylor 	else if (!doread && !vdev_writeable(vd))
1156be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1157e7cbe64fSgw 
1158810e43b2SBill Pijewski 	if (vd->vdev_ops == &vdev_raidz_ops) {
1159810e43b2SBill Pijewski 		return (vdev_raidz_physio(vd,
1160810e43b2SBill Pijewski 		    addr, size, offset, origoffset, doread, isdump));
1161810e43b2SBill Pijewski 	}
1162810e43b2SBill Pijewski 
1163e7cbe64fSgw 	offset += VDEV_LABEL_START_SIZE;
1164e7cbe64fSgw 
1165e7cbe64fSgw 	if (ddi_in_panic() || isdump) {
116688b7b0f2SMatthew Ahrens 		ASSERT(!doread);
116788b7b0f2SMatthew Ahrens 		if (doread)
1168be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
1169810e43b2SBill Pijewski 		dvd = vd->vdev_tsd;
1170810e43b2SBill Pijewski 		ASSERT3P(dvd, !=, NULL);
1171e7cbe64fSgw 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1172e7cbe64fSgw 		    lbtodb(size)));
1173e7cbe64fSgw 	} else {
1174810e43b2SBill Pijewski 		dvd = vd->vdev_tsd;
1175810e43b2SBill Pijewski 		ASSERT3P(dvd, !=, NULL);
1176810e43b2SBill Pijewski 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1177810e43b2SBill Pijewski 		    offset, doread ? B_READ : B_WRITE));
1178e7cbe64fSgw 	}
1179e7cbe64fSgw }
1180e7cbe64fSgw 
118188b7b0f2SMatthew Ahrens static int
118288b7b0f2SMatthew Ahrens zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
118388b7b0f2SMatthew Ahrens     boolean_t doread, boolean_t isdump)
1184e7cbe64fSgw {
1185e7cbe64fSgw 	vdev_t *vd;
1186e7cbe64fSgw 	int error;
118788b7b0f2SMatthew Ahrens 	zvol_extent_t *ze;
1188e7cbe64fSgw 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1189e7cbe64fSgw 
119088b7b0f2SMatthew Ahrens 	/* Must be sector aligned, and not stradle a block boundary. */
119188b7b0f2SMatthew Ahrens 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
119288b7b0f2SMatthew Ahrens 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1193be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
119488b7b0f2SMatthew Ahrens 	}
119588b7b0f2SMatthew Ahrens 	ASSERT(size <= zv->zv_volblocksize);
1196e7cbe64fSgw 
119788b7b0f2SMatthew Ahrens 	/* Locate the extent this belongs to */
119888b7b0f2SMatthew Ahrens 	ze = list_head(&zv->zv_extents);
119988b7b0f2SMatthew Ahrens 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
120088b7b0f2SMatthew Ahrens 		offset -= ze->ze_nblks * zv->zv_volblocksize;
120188b7b0f2SMatthew Ahrens 		ze = list_next(&zv->zv_extents, ze);
120288b7b0f2SMatthew Ahrens 	}
120324cc0e1cSGeorge Wilson 
12043b2aab18SMatthew Ahrens 	if (ze == NULL)
1205be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
12063b2aab18SMatthew Ahrens 
120724cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
120824cc0e1cSGeorge Wilson 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
120924cc0e1cSGeorge Wilson 
121088b7b0f2SMatthew Ahrens 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
121188b7b0f2SMatthew Ahrens 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1212810e43b2SBill Pijewski 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1213810e43b2SBill Pijewski 	    size, doread, isdump);
121424cc0e1cSGeorge Wilson 
121524cc0e1cSGeorge Wilson 	if (!ddi_in_panic())
121624cc0e1cSGeorge Wilson 		spa_config_exit(spa, SCL_STATE, FTAG);
121724cc0e1cSGeorge Wilson 
1218e7cbe64fSgw 	return (error);
1219e7cbe64fSgw }
1220e7cbe64fSgw 
1221fa9e4066Sahrens int
1222fa9e4066Sahrens zvol_strategy(buf_t *bp)
1223fa9e4066Sahrens {
1224c99e4bdcSChris Kirby 	zfs_soft_state_t *zs = NULL;
1225c99e4bdcSChris Kirby 	zvol_state_t *zv;
1226fa9e4066Sahrens 	uint64_t off, volsize;
122788b7b0f2SMatthew Ahrens 	size_t resid;
1228fa9e4066Sahrens 	char *addr;
122922ac5be4Sperrin 	objset_t *os;
1230fa9e4066Sahrens 	int error = 0;
123188b7b0f2SMatthew Ahrens 	boolean_t doread = bp->b_flags & B_READ;
1232810e43b2SBill Pijewski 	boolean_t is_dumpified;
1233510b6c0eSNeil Perrin 	boolean_t sync;
1234fa9e4066Sahrens 
1235c99e4bdcSChris Kirby 	if (getminor(bp->b_edev) == 0) {
1236be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
1237c99e4bdcSChris Kirby 	} else {
1238c99e4bdcSChris Kirby 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1239c99e4bdcSChris Kirby 		if (zs == NULL)
1240be6fd75aSMatthew Ahrens 			error = SET_ERROR(ENXIO);
1241c99e4bdcSChris Kirby 		else if (zs->zss_type != ZSST_ZVOL)
1242be6fd75aSMatthew Ahrens 			error = SET_ERROR(EINVAL);
1243fa9e4066Sahrens 	}
1244fa9e4066Sahrens 
1245c99e4bdcSChris Kirby 	if (error) {
1246c99e4bdcSChris Kirby 		bioerror(bp, error);
1247fa9e4066Sahrens 		biodone(bp);
1248fa9e4066Sahrens 		return (0);
1249fa9e4066Sahrens 	}
1250fa9e4066Sahrens 
1251c99e4bdcSChris Kirby 	zv = zs->zss_data;
1252c99e4bdcSChris Kirby 
1253681d9761SEric Taylor 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1254fa9e4066Sahrens 		bioerror(bp, EROFS);
1255fa9e4066Sahrens 		biodone(bp);
1256fa9e4066Sahrens 		return (0);
1257fa9e4066Sahrens 	}
1258fa9e4066Sahrens 
1259fa9e4066Sahrens 	off = ldbtob(bp->b_blkno);
1260fa9e4066Sahrens 	volsize = zv->zv_volsize;
1261fa9e4066Sahrens 
126222ac5be4Sperrin 	os = zv->zv_objset;
126322ac5be4Sperrin 	ASSERT(os != NULL);
1264fa9e4066Sahrens 
1265fa9e4066Sahrens 	bp_mapin(bp);
1266fa9e4066Sahrens 	addr = bp->b_un.b_addr;
1267fa9e4066Sahrens 	resid = bp->b_bcount;
1268fa9e4066Sahrens 
126988b7b0f2SMatthew Ahrens 	if (resid > 0 && (off < 0 || off >= volsize)) {
127088b7b0f2SMatthew Ahrens 		bioerror(bp, EIO);
127188b7b0f2SMatthew Ahrens 		biodone(bp);
127288b7b0f2SMatthew Ahrens 		return (0);
127388b7b0f2SMatthew Ahrens 	}
127473ec3d9cSgw 
1275810e43b2SBill Pijewski 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
127655da60b9SMark J Musante 	sync = ((!(bp->b_flags & B_ASYNC) &&
127755da60b9SMark J Musante 	    !(zv->zv_flags & ZVOL_WCE)) ||
127855da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1279810e43b2SBill Pijewski 	    !doread && !is_dumpified;
1280510b6c0eSNeil Perrin 
1281c3377ee9SJohn Levon 	smt_begin_unsafe();
1282455e370cSJohn Levon 
1283a24e15ceSperrin 	/*
1284a24e15ceSperrin 	 * There must be no buffer changes when doing a dmu_sync() because
1285a24e15ceSperrin 	 * we can't change the data whilst calculating the checksum.
1286a24e15ceSperrin 	 */
128779315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock, off, resid,
128888b7b0f2SMatthew Ahrens 	    doread ? RL_READER : RL_WRITER);
1289fa9e4066Sahrens 
1290e7cbe64fSgw 	while (resid != 0 && off < volsize) {
129188b7b0f2SMatthew Ahrens 		size_t size = MIN(resid, zvol_maxphys);
1292810e43b2SBill Pijewski 		if (is_dumpified) {
1293e7cbe64fSgw 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
129488b7b0f2SMatthew Ahrens 			error = zvol_dumpio(zv, addr, off, size,
129588b7b0f2SMatthew Ahrens 			    doread, B_FALSE);
129688b7b0f2SMatthew Ahrens 		} else if (doread) {
12977bfdf011SNeil Perrin 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
12987bfdf011SNeil Perrin 			    DMU_READ_PREFETCH);
1299fa9e4066Sahrens 		} else {
130022ac5be4Sperrin 			dmu_tx_t *tx = dmu_tx_create(os);
1301fa9e4066Sahrens 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1302fa9e4066Sahrens 			error = dmu_tx_assign(tx, TXG_WAIT);
1303fa9e4066Sahrens 			if (error) {
1304fa9e4066Sahrens 				dmu_tx_abort(tx);
1305fa9e4066Sahrens 			} else {
130622ac5be4Sperrin 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1307510b6c0eSNeil Perrin 				zvol_log_write(zv, tx, off, size, sync);
1308fa9e4066Sahrens 				dmu_tx_commit(tx);
1309fa9e4066Sahrens 			}
1310fa9e4066Sahrens 		}
1311b87f3af3Sperrin 		if (error) {
1312b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1313b87f3af3Sperrin 			if (error == ECKSUM)
1314be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1315fa9e4066Sahrens 			break;
1316b87f3af3Sperrin 		}
1317fa9e4066Sahrens 		off += size;
1318fa9e4066Sahrens 		addr += size;
1319fa9e4066Sahrens 		resid -= size;
1320fa9e4066Sahrens 	}
132179315247SMatthew Ahrens 	rangelock_exit(lr);
1322fa9e4066Sahrens 
1323fa9e4066Sahrens 	if ((bp->b_resid = resid) == bp->b_bcount)
1324fa9e4066Sahrens 		bioerror(bp, off > volsize ? EINVAL : error);
1325fa9e4066Sahrens 
1326510b6c0eSNeil Perrin 	if (sync)
13275002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1328feb08c6bSbillm 	biodone(bp);
132922ac5be4Sperrin 
1330c3377ee9SJohn Levon 	smt_end_unsafe();
1331455e370cSJohn Levon 
1332fa9e4066Sahrens 	return (0);
1333fa9e4066Sahrens }
1334fa9e4066Sahrens 
133567bd71c6Sperrin /*
133667bd71c6Sperrin  * Set the buffer count to the zvol maximum transfer.
133767bd71c6Sperrin  * Using our own routine instead of the default minphys()
133867bd71c6Sperrin  * means that for larger writes we write bigger buffers on X86
133967bd71c6Sperrin  * (128K instead of 56K) and flush the disk write cache less often
134067bd71c6Sperrin  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
134167bd71c6Sperrin  * 56K on X86 and 128K on sparc).
134267bd71c6Sperrin  */
134367bd71c6Sperrin void
134467bd71c6Sperrin zvol_minphys(struct buf *bp)
134567bd71c6Sperrin {
134667bd71c6Sperrin 	if (bp->b_bcount > zvol_maxphys)
134767bd71c6Sperrin 		bp->b_bcount = zvol_maxphys;
134867bd71c6Sperrin }
134967bd71c6Sperrin 
1350e7cbe64fSgw int
1351e7cbe64fSgw zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1352e7cbe64fSgw {
1353e7cbe64fSgw 	minor_t minor = getminor(dev);
1354e7cbe64fSgw 	zvol_state_t *zv;
1355e7cbe64fSgw 	int error = 0;
1356e7cbe64fSgw 	uint64_t size;
1357e7cbe64fSgw 	uint64_t boff;
1358e7cbe64fSgw 	uint64_t resid;
1359e7cbe64fSgw 
1360c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1361e7cbe64fSgw 	if (zv == NULL)
1362be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1363e7cbe64fSgw 
13643b2aab18SMatthew Ahrens 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1365be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
13663b2aab18SMatthew Ahrens 
1367e7cbe64fSgw 	boff = ldbtob(blkno);
1368e7cbe64fSgw 	resid = ldbtob(nblocks);
136988b7b0f2SMatthew Ahrens 
137088b7b0f2SMatthew Ahrens 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
137188b7b0f2SMatthew Ahrens 
1372e7cbe64fSgw 	while (resid) {
1373e7cbe64fSgw 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
137488b7b0f2SMatthew Ahrens 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1375e7cbe64fSgw 		if (error)
1376e7cbe64fSgw 			break;
1377e7cbe64fSgw 		boff += size;
1378e7cbe64fSgw 		addr += size;
1379e7cbe64fSgw 		resid -= size;
1380e7cbe64fSgw 	}
1381e7cbe64fSgw 
1382e7cbe64fSgw 	return (error);
1383e7cbe64fSgw }
1384e7cbe64fSgw 
1385fa9e4066Sahrens /*ARGSUSED*/
1386fa9e4066Sahrens int
1387feb08c6bSbillm zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1388fa9e4066Sahrens {
1389c7ca1008Sgw 	minor_t minor = getminor(dev);
1390c7ca1008Sgw 	zvol_state_t *zv;
139173ec3d9cSgw 	uint64_t volsize;
1392feb08c6bSbillm 	int error = 0;
1393fa9e4066Sahrens 
1394c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1395c7ca1008Sgw 	if (zv == NULL)
1396be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1397c7ca1008Sgw 
139873ec3d9cSgw 	volsize = zv->zv_volsize;
139973ec3d9cSgw 	if (uio->uio_resid > 0 &&
140073ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1401be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
140273ec3d9cSgw 
140388b7b0f2SMatthew Ahrens 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
140488b7b0f2SMatthew Ahrens 		error = physio(zvol_strategy, NULL, dev, B_READ,
140588b7b0f2SMatthew Ahrens 		    zvol_minphys, uio);
140688b7b0f2SMatthew Ahrens 		return (error);
140788b7b0f2SMatthew Ahrens 	}
140888b7b0f2SMatthew Ahrens 
1409c3377ee9SJohn Levon 	smt_begin_unsafe();
1410455e370cSJohn Levon 
141179315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
141279315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_READER);
141373ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1414feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1415fa9e4066Sahrens 
141673ec3d9cSgw 		/* don't read past the end */
141773ec3d9cSgw 		if (bytes > volsize - uio->uio_loffset)
141873ec3d9cSgw 			bytes = volsize - uio->uio_loffset;
141973ec3d9cSgw 
1420feb08c6bSbillm 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1421b87f3af3Sperrin 		if (error) {
1422b87f3af3Sperrin 			/* convert checksum errors into IO errors */
1423b87f3af3Sperrin 			if (error == ECKSUM)
1424be6fd75aSMatthew Ahrens 				error = SET_ERROR(EIO);
1425feb08c6bSbillm 			break;
1426b87f3af3Sperrin 		}
1427feb08c6bSbillm 	}
142879315247SMatthew Ahrens 	rangelock_exit(lr);
142979315247SMatthew Ahrens 
1430c3377ee9SJohn Levon 	smt_end_unsafe();
1431455e370cSJohn Levon 
1432feb08c6bSbillm 	return (error);
1433fa9e4066Sahrens }
1434fa9e4066Sahrens 
1435fa9e4066Sahrens /*ARGSUSED*/
1436fa9e4066Sahrens int
1437feb08c6bSbillm zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1438fa9e4066Sahrens {
1439c7ca1008Sgw 	minor_t minor = getminor(dev);
1440c7ca1008Sgw 	zvol_state_t *zv;
144173ec3d9cSgw 	uint64_t volsize;
1442feb08c6bSbillm 	int error = 0;
1443510b6c0eSNeil Perrin 	boolean_t sync;
1444feb08c6bSbillm 
1445c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1446c7ca1008Sgw 	if (zv == NULL)
1447be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1448c7ca1008Sgw 
144973ec3d9cSgw 	volsize = zv->zv_volsize;
145073ec3d9cSgw 	if (uio->uio_resid > 0 &&
145173ec3d9cSgw 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1452be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
145373ec3d9cSgw 
1454e7cbe64fSgw 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1455e7cbe64fSgw 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1456e7cbe64fSgw 		    zvol_minphys, uio);
1457e7cbe64fSgw 		return (error);
1458e7cbe64fSgw 	}
1459e7cbe64fSgw 
1460c3377ee9SJohn Levon 	smt_begin_unsafe();
1461455e370cSJohn Levon 
146255da60b9SMark J Musante 	sync = !(zv->zv_flags & ZVOL_WCE) ||
146355da60b9SMark J Musante 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1464510b6c0eSNeil Perrin 
146579315247SMatthew Ahrens 	locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
146679315247SMatthew Ahrens 	    uio->uio_loffset, uio->uio_resid, RL_WRITER);
146773ec3d9cSgw 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1468feb08c6bSbillm 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1469feb08c6bSbillm 		uint64_t off = uio->uio_loffset;
1470feb08c6bSbillm 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
147173ec3d9cSgw 
147273ec3d9cSgw 		if (bytes > volsize - off)	/* don't write past the end */
147373ec3d9cSgw 			bytes = volsize - off;
147473ec3d9cSgw 
1475feb08c6bSbillm 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1476feb08c6bSbillm 		error = dmu_tx_assign(tx, TXG_WAIT);
1477feb08c6bSbillm 		if (error) {
1478feb08c6bSbillm 			dmu_tx_abort(tx);
1479feb08c6bSbillm 			break;
1480feb08c6bSbillm 		}
14818dfe5547SRichard Yao 		error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
1482feb08c6bSbillm 		if (error == 0)
1483510b6c0eSNeil Perrin 			zvol_log_write(zv, tx, off, bytes, sync);
1484feb08c6bSbillm 		dmu_tx_commit(tx);
1485feb08c6bSbillm 
1486feb08c6bSbillm 		if (error)
1487feb08c6bSbillm 			break;
1488feb08c6bSbillm 	}
148979315247SMatthew Ahrens 	rangelock_exit(lr);
149079315247SMatthew Ahrens 
1491510b6c0eSNeil Perrin 	if (sync)
14925002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1493455e370cSJohn Levon 
1494c3377ee9SJohn Levon 	smt_end_unsafe();
1495455e370cSJohn Levon 
1496feb08c6bSbillm 	return (error);
1497fa9e4066Sahrens }
1498fa9e4066Sahrens 
1499c7f714e2SEric Taylor int
1500c7f714e2SEric Taylor zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1501c7f714e2SEric Taylor {
1502c7f714e2SEric Taylor 	struct uuid uuid = EFI_RESERVED;
1503c7f714e2SEric Taylor 	efi_gpe_t gpe = { 0 };
1504c7f714e2SEric Taylor 	uint32_t crc;
1505c7f714e2SEric Taylor 	dk_efi_t efi;
1506c7f714e2SEric Taylor 	int length;
1507c7f714e2SEric Taylor 	char *ptr;
1508c7f714e2SEric Taylor 
1509c7f714e2SEric Taylor 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1510be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1511c7f714e2SEric Taylor 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1512c7f714e2SEric Taylor 	length = efi.dki_length;
1513c7f714e2SEric Taylor 	/*
1514c7f714e2SEric Taylor 	 * Some clients may attempt to request a PMBR for the
1515c7f714e2SEric Taylor 	 * zvol.  Currently this interface will return EINVAL to
1516c7f714e2SEric Taylor 	 * such requests.  These requests could be supported by
1517c7f714e2SEric Taylor 	 * adding a check for lba == 0 and consing up an appropriate
1518c7f714e2SEric Taylor 	 * PMBR.
1519c7f714e2SEric Taylor 	 */
1520c7f714e2SEric Taylor 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1521be6fd75aSMatthew Ahrens 		return (SET_ERROR(EINVAL));
1522c7f714e2SEric Taylor 
1523c7f714e2SEric Taylor 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1524c7f714e2SEric Taylor 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1525c7f714e2SEric Taylor 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1526c7f714e2SEric Taylor 
1527c7f714e2SEric Taylor 	if (efi.dki_lba == 1) {
1528c7f714e2SEric Taylor 		efi_gpt_t gpt = { 0 };
1529c7f714e2SEric Taylor 
1530c7f714e2SEric Taylor 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1531c7f714e2SEric Taylor 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1532fd797736SJohn Levon 		gpt.efi_gpt_HeaderSize = LE_32(EFI_HEADER_SIZE);
1533c7f714e2SEric Taylor 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1534c7f714e2SEric Taylor 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1535c7f714e2SEric Taylor 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1536c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1537c7f714e2SEric Taylor 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1538c7f714e2SEric Taylor 		gpt.efi_gpt_SizeOfPartitionEntry =
1539c7f714e2SEric Taylor 		    LE_32(sizeof (efi_gpe_t));
1540c7f714e2SEric Taylor 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1541c7f714e2SEric Taylor 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1542fd797736SJohn Levon 		CRC32(crc, &gpt, EFI_HEADER_SIZE, -1U, crc32_table);
1543c7f714e2SEric Taylor 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1544c7f714e2SEric Taylor 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1545c7f714e2SEric Taylor 		    flag))
1546be6fd75aSMatthew Ahrens 			return (SET_ERROR(EFAULT));
1547c7f714e2SEric Taylor 		ptr += sizeof (gpt);
1548c7f714e2SEric Taylor 		length -= sizeof (gpt);
1549c7f714e2SEric Taylor 	}
1550c7f714e2SEric Taylor 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1551c7f714e2SEric Taylor 	    length), flag))
1552be6fd75aSMatthew Ahrens 		return (SET_ERROR(EFAULT));
1553c7f714e2SEric Taylor 	return (0);
1554c7f714e2SEric Taylor }
1555c7f714e2SEric Taylor 
15563fb517f7SJames Moore /*
15573fb517f7SJames Moore  * BEGIN entry points to allow external callers access to the volume.
15583fb517f7SJames Moore  */
15593fb517f7SJames Moore /*
15603fb517f7SJames Moore  * Return the volume parameters needed for access from an external caller.
15613fb517f7SJames Moore  * These values are invariant as long as the volume is held open.
15623fb517f7SJames Moore  */
15633fb517f7SJames Moore int
15643fb517f7SJames Moore zvol_get_volume_params(minor_t minor, uint64_t *blksize,
15653fb517f7SJames Moore     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
15668dfe5547SRichard Yao     void **rl_hdl, void **dnode_hdl)
15673fb517f7SJames Moore {
15683fb517f7SJames Moore 	zvol_state_t *zv;
15693fb517f7SJames Moore 
1570c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1571c99e4bdcSChris Kirby 	if (zv == NULL)
1572be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15733fb517f7SJames Moore 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1574be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
15753fb517f7SJames Moore 
15763fb517f7SJames Moore 	ASSERT(blksize && max_xfer_len && minor_hdl &&
15778dfe5547SRichard Yao 	    objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
15783fb517f7SJames Moore 
15793fb517f7SJames Moore 	*blksize = zv->zv_volblocksize;
15803fb517f7SJames Moore 	*max_xfer_len = (uint64_t)zvol_maxphys;
15813fb517f7SJames Moore 	*minor_hdl = zv;
15823fb517f7SJames Moore 	*objset_hdl = zv->zv_objset;
15833fb517f7SJames Moore 	*zil_hdl = zv->zv_zilog;
158479315247SMatthew Ahrens 	*rl_hdl = &zv->zv_rangelock;
15858dfe5547SRichard Yao 	*dnode_hdl = zv->zv_dn;
15863fb517f7SJames Moore 	return (0);
15873fb517f7SJames Moore }
15883fb517f7SJames Moore 
15893fb517f7SJames Moore /*
15903fb517f7SJames Moore  * Return the current volume size to an external caller.
15913fb517f7SJames Moore  * The size can change while the volume is open.
15923fb517f7SJames Moore  */
15933fb517f7SJames Moore uint64_t
15943fb517f7SJames Moore zvol_get_volume_size(void *minor_hdl)
15953fb517f7SJames Moore {
15963fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
15973fb517f7SJames Moore 
15983fb517f7SJames Moore 	return (zv->zv_volsize);
15993fb517f7SJames Moore }
16003fb517f7SJames Moore 
16013fb517f7SJames Moore /*
16023fb517f7SJames Moore  * Return the current WCE setting to an external caller.
16033fb517f7SJames Moore  * The WCE setting can change while the volume is open.
16043fb517f7SJames Moore  */
16053fb517f7SJames Moore int
16063fb517f7SJames Moore zvol_get_volume_wce(void *minor_hdl)
16073fb517f7SJames Moore {
16083fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
16093fb517f7SJames Moore 
16103fb517f7SJames Moore 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
16113fb517f7SJames Moore }
16123fb517f7SJames Moore 
16133fb517f7SJames Moore /*
16143fb517f7SJames Moore  * Entry point for external callers to zvol_log_write
16153fb517f7SJames Moore  */
16163fb517f7SJames Moore void
16173fb517f7SJames Moore zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
16183fb517f7SJames Moore     boolean_t sync)
16193fb517f7SJames Moore {
16203fb517f7SJames Moore 	zvol_state_t *zv = minor_hdl;
16213fb517f7SJames Moore 
16223fb517f7SJames Moore 	zvol_log_write(zv, tx, off, resid, sync);
16233fb517f7SJames Moore }
16243fb517f7SJames Moore /*
16253fb517f7SJames Moore  * END entry points to allow external callers access to the volume.
16263fb517f7SJames Moore  */
16273fb517f7SJames Moore 
1628b77b9231SDan McDonald /*
1629b77b9231SDan McDonald  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1630b77b9231SDan McDonald  */
1631b77b9231SDan McDonald static void
1632b77b9231SDan McDonald zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1633b77b9231SDan McDonald     boolean_t sync)
1634b77b9231SDan McDonald {
1635b77b9231SDan McDonald 	itx_t *itx;
1636b77b9231SDan McDonald 	lr_truncate_t *lr;
1637b77b9231SDan McDonald 	zilog_t *zilog = zv->zv_zilog;
1638b77b9231SDan McDonald 
1639b77b9231SDan McDonald 	if (zil_replaying(zilog, tx))
1640b77b9231SDan McDonald 		return;
1641b77b9231SDan McDonald 
1642b77b9231SDan McDonald 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1643b77b9231SDan McDonald 	lr = (lr_truncate_t *)&itx->itx_lr;
1644b77b9231SDan McDonald 	lr->lr_foid = ZVOL_OBJ;
1645b77b9231SDan McDonald 	lr->lr_offset = off;
1646b77b9231SDan McDonald 	lr->lr_length = len;
1647b77b9231SDan McDonald 
1648b77b9231SDan McDonald 	itx->itx_sync = sync;
1649b77b9231SDan McDonald 	zil_itx_assign(zilog, itx, tx);
1650b77b9231SDan McDonald }
1651b77b9231SDan McDonald 
1652fa9e4066Sahrens /*
1653fa9e4066Sahrens  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1654b77b9231SDan McDonald  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1655fa9e4066Sahrens  */
1656fa9e4066Sahrens /*ARGSUSED*/
1657fa9e4066Sahrens int
1658fa9e4066Sahrens zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1659fa9e4066Sahrens {
1660fa9e4066Sahrens 	zvol_state_t *zv;
1661af2c4821Smaybee 	struct dk_callback *dkc;
1662efe44a03SJerry Jelinek 	int i, error = 0;
166379315247SMatthew Ahrens 	locked_range_t *lr;
1664fa9e4066Sahrens 
1665c99e4bdcSChris Kirby 	mutex_enter(&zfsdev_state_lock);
1666fa9e4066Sahrens 
1667c99e4bdcSChris Kirby 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1668fa9e4066Sahrens 
1669fa9e4066Sahrens 	if (zv == NULL) {
1670c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1671be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENXIO));
1672fa9e4066Sahrens 	}
1673701f66c4SEric Taylor 	ASSERT(zv->zv_total_opens > 0);
1674fa9e4066Sahrens 
1675fa9e4066Sahrens 	switch (cmd) {
1676fa9e4066Sahrens 
1677fa9e4066Sahrens 	case DKIOCINFO:
1678a0b60564SGeorge Wilson 	{
1679a0b60564SGeorge Wilson 		struct dk_cinfo dki;
1680a0b60564SGeorge Wilson 
1681af2c4821Smaybee 		bzero(&dki, sizeof (dki));
1682af2c4821Smaybee 		(void) strcpy(dki.dki_cname, "zvol");
1683af2c4821Smaybee 		(void) strcpy(dki.dki_dname, "zvol");
1684af2c4821Smaybee 		dki.dki_ctype = DKC_UNKNOWN;
16853adc9019SEric Taylor 		dki.dki_unit = getminor(dev);
1686b5152584SMatthew Ahrens 		dki.dki_maxtransfer =
1687b5152584SMatthew Ahrens 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1688c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1689af2c4821Smaybee 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1690be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1691fa9e4066Sahrens 		return (error);
1692a0b60564SGeorge Wilson 	}
1693fa9e4066Sahrens 
1694fa9e4066Sahrens 	case DKIOCGMEDIAINFO:
1695a0b60564SGeorge Wilson 	{
1696a0b60564SGeorge Wilson 		struct dk_minfo dkm;
1697a0b60564SGeorge Wilson 
1698fa9e4066Sahrens 		bzero(&dkm, sizeof (dkm));
1699fa9e4066Sahrens 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1700fa9e4066Sahrens 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1701fa9e4066Sahrens 		dkm.dki_media_type = DK_UNKNOWN;
1702c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1703fa9e4066Sahrens 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1704be6fd75aSMatthew Ahrens 			error = SET_ERROR(EFAULT);
1705fa9e4066Sahrens 		return (error);
1706a0b60564SGeorge Wilson 	}
1707a0b60564SGeorge Wilson 
1708a0b60564SGeorge Wilson 	case DKIOCGMEDIAINFOEXT:
1709a0b60564SGeorge Wilson 	{
1710a0b60564SGeorge Wilson 		struct dk_minfo_ext dkmext;
1711a0b60564SGeorge Wilson 
1712a0b60564SGeorge Wilson 		bzero(&dkmext, sizeof (dkmext));
1713a0b60564SGeorge Wilson 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1714a0b60564SGeorge Wilson 		dkmext.dki_pbsize = zv->zv_volblocksize;
1715a0b60564SGeorge Wilson 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1716a0b60564SGeorge Wilson 		dkmext.dki_media_type = DK_UNKNOWN;
1717a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1718a0b60564SGeorge Wilson 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1719a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1720a0b60564SGeorge Wilson 		return (error);
1721a0b60564SGeorge Wilson 	}
1722fa9e4066Sahrens 
1723fa9e4066Sahrens 	case DKIOCGETEFI:
1724a0b60564SGeorge Wilson 	{
1725a0b60564SGeorge Wilson 		uint64_t vs = zv->zv_volsize;
1726a0b60564SGeorge Wilson 		uint8_t bs = zv->zv_min_bs;
1727fa9e4066Sahrens 
1728a0b60564SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1729a0b60564SGeorge Wilson 		error = zvol_getefi((void *)arg, flag, vs, bs);
1730a0b60564SGeorge Wilson 		return (error);
1731a0b60564SGeorge Wilson 	}
1732fa9e4066Sahrens 
1733feb08c6bSbillm 	case DKIOCFLUSHWRITECACHE:
1734af2c4821Smaybee 		dkc = (struct dk_callback *)arg;
1735c99e4bdcSChris Kirby 		mutex_exit(&zfsdev_state_lock);
1736455e370cSJohn Levon 
1737c3377ee9SJohn Levon 		smt_begin_unsafe();
1738455e370cSJohn Levon 
17395002558fSNeil Perrin 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1740af2c4821Smaybee 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1741af2c4821Smaybee 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1742af2c4821Smaybee 			error = 0;
1743af2c4821Smaybee 		}
1744455e370cSJohn Levon 
1745c3377ee9SJohn Levon 		smt_end_unsafe();
1746455e370cSJohn Levon 
1747701f66c4SEric Taylor 		return (error);
1748701f66c4SEric Taylor 
1749701f66c4SEric Taylor 	case DKIOCGETWCE:
1750a0b60564SGeorge Wilson 	{
1751a0b60564SGeorge Wilson 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1752a0b60564SGeorge Wilson 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1753a0b60564SGeorge Wilson 		    flag))
1754a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1755a0b60564SGeorge Wilson 		break;
1756a0b60564SGeorge Wilson 	}
1757a0b60564SGeorge Wilson 	case DKIOCSETWCE:
1758a0b60564SGeorge Wilson 	{
1759a0b60564SGeorge Wilson 		int wce;
1760a0b60564SGeorge Wilson 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1761a0b60564SGeorge Wilson 		    flag)) {
1762a0b60564SGeorge Wilson 			error = SET_ERROR(EFAULT);
1763701f66c4SEric Taylor 			break;
1764701f66c4SEric Taylor 		}
1765a0b60564SGeorge Wilson 		if (wce) {
1766a0b60564SGeorge Wilson 			zv->zv_flags |= ZVOL_WCE;
1767a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1768a0b60564SGeorge Wilson 		} else {
1769a0b60564SGeorge Wilson 			zv->zv_flags &= ~ZVOL_WCE;
1770a0b60564SGeorge Wilson 			mutex_exit(&zfsdev_state_lock);
1771c3377ee9SJohn Levon 			smt_begin_unsafe();
1772a0b60564SGeorge Wilson 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1773c3377ee9SJohn Levon 			smt_end_unsafe();
1774701f66c4SEric Taylor 		}
1775a0b60564SGeorge Wilson 		return (0);
1776a0b60564SGeorge Wilson 	}
1777feb08c6bSbillm 
1778b6130eadSmaybee 	case DKIOCGGEOM:
1779b6130eadSmaybee 	case DKIOCGVTOC:
1780e7cbe64fSgw 		/*
1781e7cbe64fSgw 		 * commands using these (like prtvtoc) expect ENOTSUP
1782e7cbe64fSgw 		 * since we're emulating an EFI label
1783e7cbe64fSgw 		 */
1784be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTSUP);
1785b6130eadSmaybee 		break;
1786b6130eadSmaybee 
1787e7cbe64fSgw 	case DKIOCDUMPINIT:
178879315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1789e7cbe64fSgw 		    RL_WRITER);
1790e7cbe64fSgw 		error = zvol_dumpify(zv);
179179315247SMatthew Ahrens 		rangelock_exit(lr);
1792e7cbe64fSgw 		break;
1793e7cbe64fSgw 
1794e7cbe64fSgw 	case DKIOCDUMPFINI:
179506d5ae10SEric Taylor 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
179606d5ae10SEric Taylor 			break;
179779315247SMatthew Ahrens 		lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1798e7cbe64fSgw 		    RL_WRITER);
1799e7cbe64fSgw 		error = zvol_dump_fini(zv);
180079315247SMatthew Ahrens 		rangelock_exit(lr);
1801e7cbe64fSgw 		break;
1802e7cbe64fSgw 
1803b77b9231SDan McDonald 	case DKIOCFREE:
1804b77b9231SDan McDonald 	{
1805047c81d3SSaso Kiselkov 		dkioc_free_list_t *dfl;
1806b77b9231SDan McDonald 		dmu_tx_t *tx;
1807b77b9231SDan McDonald 
1808893c83baSGeorge Wilson 		if (!zvol_unmap_enabled)
1809893c83baSGeorge Wilson 			break;
1810893c83baSGeorge Wilson 
1811047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL)) {
1812047c81d3SSaso Kiselkov 			error = dfl_copyin((void *)arg, &dfl, flag, KM_SLEEP);
1813047c81d3SSaso Kiselkov 			if (error != 0)
1814047c81d3SSaso Kiselkov 				break;
1815047c81d3SSaso Kiselkov 		} else {
1816047c81d3SSaso Kiselkov 			dfl = (dkioc_free_list_t *)arg;
1817047c81d3SSaso Kiselkov 			ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
1818047c81d3SSaso Kiselkov 			if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
1819047c81d3SSaso Kiselkov 				error = SET_ERROR(EINVAL);
1820047c81d3SSaso Kiselkov 				break;
1821047c81d3SSaso Kiselkov 			}
1822b77b9231SDan McDonald 		}
1823b77b9231SDan McDonald 
1824574e2414SGeorge Wilson 		mutex_exit(&zfsdev_state_lock);
1825b77b9231SDan McDonald 
1826c3377ee9SJohn Levon 		smt_begin_unsafe();
1827455e370cSJohn Levon 
1828047c81d3SSaso Kiselkov 		for (int i = 0; i < dfl->dfl_num_exts; i++) {
1829047c81d3SSaso Kiselkov 			uint64_t start = dfl->dfl_exts[i].dfle_start,
1830047c81d3SSaso Kiselkov 			    length = dfl->dfl_exts[i].dfle_length,
1831047c81d3SSaso Kiselkov 			    end = start + length;
1832047c81d3SSaso Kiselkov 
1833047c81d3SSaso Kiselkov 			/*
1834047c81d3SSaso Kiselkov 			 * Apply Postel's Law to length-checking.  If they
1835047c81d3SSaso Kiselkov 			 * overshoot, just blank out until the end, if there's
1836047c81d3SSaso Kiselkov 			 * a need to blank out anything.
1837047c81d3SSaso Kiselkov 			 */
1838047c81d3SSaso Kiselkov 			if (start >= zv->zv_volsize)
1839047c81d3SSaso Kiselkov 				continue;	/* No need to do anything... */
1840047c81d3SSaso Kiselkov 			if (end > zv->zv_volsize) {
1841047c81d3SSaso Kiselkov 				end = DMU_OBJECT_END;
1842047c81d3SSaso Kiselkov 				length = end - start;
1843047c81d3SSaso Kiselkov 			}
1844b77b9231SDan McDonald 
184579315247SMatthew Ahrens 			lr = rangelock_enter(&zv->zv_rangelock, start, length,
1846047c81d3SSaso Kiselkov 			    RL_WRITER);
1847047c81d3SSaso Kiselkov 			tx = dmu_tx_create(zv->zv_objset);
1848047c81d3SSaso Kiselkov 			error = dmu_tx_assign(tx, TXG_WAIT);
1849047c81d3SSaso Kiselkov 			if (error != 0) {
1850047c81d3SSaso Kiselkov 				dmu_tx_abort(tx);
1851047c81d3SSaso Kiselkov 			} else {
1852047c81d3SSaso Kiselkov 				zvol_log_truncate(zv, tx, start, length,
1853047c81d3SSaso Kiselkov 				    B_TRUE);
1854047c81d3SSaso Kiselkov 				dmu_tx_commit(tx);
1855047c81d3SSaso Kiselkov 				error = dmu_free_long_range(zv->zv_objset,
1856047c81d3SSaso Kiselkov 				    ZVOL_OBJ, start, length);
1857047c81d3SSaso Kiselkov 			}
1858047c81d3SSaso Kiselkov 
185979315247SMatthew Ahrens 			rangelock_exit(lr);
1860047c81d3SSaso Kiselkov 
1861047c81d3SSaso Kiselkov 			if (error != 0)
1862047c81d3SSaso Kiselkov 				break;
1863047c81d3SSaso Kiselkov 		}
1864b77b9231SDan McDonald 
18651c9272b8SStephen Blinick 		/*
18661c9272b8SStephen Blinick 		 * If the write-cache is disabled, 'sync' property
18671c9272b8SStephen Blinick 		 * is set to 'always', or if the caller is asking for
18681c9272b8SStephen Blinick 		 * a synchronous free, commit this operation to the zil.
18691c9272b8SStephen Blinick 		 * This will sync any previous uncommitted writes to the
18701c9272b8SStephen Blinick 		 * zvol object.
18711c9272b8SStephen Blinick 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
18721c9272b8SStephen Blinick 		 */
18731c9272b8SStephen Blinick 		if ((error == 0) && zvol_unmap_sync_enabled &&
18741c9272b8SStephen Blinick 		    (!(zv->zv_flags & ZVOL_WCE) ||
18751c9272b8SStephen Blinick 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1876047c81d3SSaso Kiselkov 		    (dfl->dfl_flags & DF_WAIT_SYNC))) {
18771c9272b8SStephen Blinick 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1878b77b9231SDan McDonald 		}
18791c9272b8SStephen Blinick 
1880047c81d3SSaso Kiselkov 		if (!(flag & FKIOCTL))
1881047c81d3SSaso Kiselkov 			dfl_free(dfl);
1882047c81d3SSaso Kiselkov 
1883c3377ee9SJohn Levon 		smt_end_unsafe();
1884455e370cSJohn Levon 
1885574e2414SGeorge Wilson 		return (error);
1886b77b9231SDan McDonald 	}
1887b77b9231SDan McDonald 
1888efe44a03SJerry Jelinek 	case DKIOC_CANFREE:
1889efe44a03SJerry Jelinek 		i = zvol_unmap_enabled ? 1 : 0;
1890efe44a03SJerry Jelinek 		if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
1891efe44a03SJerry Jelinek 			error = EFAULT;
1892efe44a03SJerry Jelinek 		} else {
1893efe44a03SJerry Jelinek 			error = 0;
1894efe44a03SJerry Jelinek 		}
1895efe44a03SJerry Jelinek 		break;
1896efe44a03SJerry Jelinek 
1897fa9e4066Sahrens 	default:
1898be6fd75aSMatthew Ahrens 		error = SET_ERROR(ENOTTY);
1899fa9e4066Sahrens 		break;
1900fa9e4066Sahrens 
1901fa9e4066Sahrens 	}
1902c99e4bdcSChris Kirby 	mutex_exit(&zfsdev_state_lock);
1903fa9e4066Sahrens 	return (error);
1904fa9e4066Sahrens }
1905fa9e4066Sahrens 
1906fa9e4066Sahrens int
1907fa9e4066Sahrens zvol_busy(void)
1908fa9e4066Sahrens {
1909fa9e4066Sahrens 	return (zvol_minors != 0);
1910fa9e4066Sahrens }
1911fa9e4066Sahrens 
1912fa9e4066Sahrens void
1913fa9e4066Sahrens zvol_init(void)
1914fa9e4066Sahrens {
1915c99e4bdcSChris Kirby 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1916c99e4bdcSChris Kirby 	    1) == 0);
1917c99e4bdcSChris Kirby 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1918fa9e4066Sahrens }
1919fa9e4066Sahrens 
1920fa9e4066Sahrens void
1921fa9e4066Sahrens zvol_fini(void)
1922fa9e4066Sahrens {
1923c99e4bdcSChris Kirby 	mutex_destroy(&zfsdev_state_lock);
1924c99e4bdcSChris Kirby 	ddi_soft_state_fini(&zfsdev_state);
1925fa9e4066Sahrens }
1926e7cbe64fSgw 
1927810e43b2SBill Pijewski /*ARGSUSED*/
1928810e43b2SBill Pijewski static int
1929810e43b2SBill Pijewski zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1930810e43b2SBill Pijewski {
1931810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1932810e43b2SBill Pijewski 
19332acef22dSMatthew Ahrens 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1934810e43b2SBill Pijewski 		return (1);
1935810e43b2SBill Pijewski 	return (0);
1936810e43b2SBill Pijewski }
1937810e43b2SBill Pijewski 
1938810e43b2SBill Pijewski /*ARGSUSED*/
1939810e43b2SBill Pijewski static void
1940810e43b2SBill Pijewski zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1941810e43b2SBill Pijewski {
1942810e43b2SBill Pijewski 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1943810e43b2SBill Pijewski 
19442acef22dSMatthew Ahrens 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1945810e43b2SBill Pijewski }
1946810e43b2SBill Pijewski 
1947e7cbe64fSgw static int
1948e7cbe64fSgw zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1949e7cbe64fSgw {
1950e7cbe64fSgw 	dmu_tx_t *tx;
1951810e43b2SBill Pijewski 	int error;
1952e7cbe64fSgw 	objset_t *os = zv->zv_objset;
1953810e43b2SBill Pijewski 	spa_t *spa = dmu_objset_spa(os);
1954810e43b2SBill Pijewski 	vdev_t *vd = spa->spa_root_vdev;
1955e7cbe64fSgw 	nvlist_t *nv = NULL;
1956810e43b2SBill Pijewski 	uint64_t version = spa_version(spa);
1957b10bba72SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
1958e7cbe64fSgw 
1959c99e4bdcSChris Kirby 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1960810e43b2SBill Pijewski 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1961810e43b2SBill Pijewski 
1962681d9761SEric Taylor 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1963681d9761SEric Taylor 	    DMU_OBJECT_END);
1964b10bba72SGeorge Wilson 	if (error != 0)
1965b10bba72SGeorge Wilson 		return (error);
1966681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
1967681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1968e7cbe64fSgw 
1969810e43b2SBill Pijewski 	/*
1970810e43b2SBill Pijewski 	 * If the pool on which the dump device is being initialized has more
1971810e43b2SBill Pijewski 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1972810e43b2SBill Pijewski 	 * enabled.  If so, bump that feature's counter to indicate that the
1973810e43b2SBill Pijewski 	 * feature is active. We also check the vdev type to handle the
1974810e43b2SBill Pijewski 	 * following case:
1975810e43b2SBill Pijewski 	 *   # zpool create test raidz disk1 disk2 disk3
1976810e43b2SBill Pijewski 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1977810e43b2SBill Pijewski 	 *   the raidz vdev itself has 3 children.
1978810e43b2SBill Pijewski 	 */
1979810e43b2SBill Pijewski 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1980810e43b2SBill Pijewski 		if (!spa_feature_is_enabled(spa,
19812acef22dSMatthew Ahrens 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1982810e43b2SBill Pijewski 			return (SET_ERROR(ENOTSUP));
1983810e43b2SBill Pijewski 		(void) dsl_sync_task(spa_name(spa),
1984810e43b2SBill Pijewski 		    zfs_mvdev_dump_feature_check,
19857d46dc6cSMatthew Ahrens 		    zfs_mvdev_dump_activate_feature_sync, NULL,
19867d46dc6cSMatthew Ahrens 		    2, ZFS_SPACE_CHECK_RESERVED);
1987810e43b2SBill Pijewski 	}
1988810e43b2SBill Pijewski 
1989b10bba72SGeorge Wilson 	if (!resize) {
1990b10bba72SGeorge Wilson 		error = dsl_prop_get_integer(zv->zv_name,
1991b10bba72SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1992b10bba72SGeorge Wilson 		if (error == 0) {
1993b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1994b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1995b10bba72SGeorge Wilson 			    NULL);
1996b10bba72SGeorge Wilson 		}
1997b10bba72SGeorge Wilson 		if (error == 0) {
1998b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
1999b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
2000b10bba72SGeorge Wilson 			    &refresrv, NULL);
2001b10bba72SGeorge Wilson 		}
2002b10bba72SGeorge Wilson 		if (error == 0) {
2003b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
2004b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
2005b10bba72SGeorge Wilson 			    NULL);
2006b10bba72SGeorge Wilson 		}
2007b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2008b10bba72SGeorge Wilson 			error = dsl_prop_get_integer(zv->zv_name,
2009b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
2010b10bba72SGeorge Wilson 		}
2011b10bba72SGeorge Wilson 	}
2012b10bba72SGeorge Wilson 	if (error != 0)
2013b10bba72SGeorge Wilson 		return (error);
2014b10bba72SGeorge Wilson 
2015e7cbe64fSgw 	tx = dmu_tx_create(os);
2016e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2017681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2018e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2019b10bba72SGeorge Wilson 	if (error != 0) {
2020e7cbe64fSgw 		dmu_tx_abort(tx);
2021e7cbe64fSgw 		return (error);
2022e7cbe64fSgw 	}
2023e7cbe64fSgw 
2024e7cbe64fSgw 	/*
2025e7cbe64fSgw 	 * If we are resizing the dump device then we only need to
2026e7cbe64fSgw 	 * update the refreservation to match the newly updated
2027e7cbe64fSgw 	 * zvolsize. Otherwise, we save off the original state of the
2028e7cbe64fSgw 	 * zvol so that we can restore them if the zvol is ever undumpified.
2029e7cbe64fSgw 	 */
2030e7cbe64fSgw 	if (resize) {
2031e7cbe64fSgw 		error = zap_update(os, ZVOL_ZAP_OBJ,
2032e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2033e7cbe64fSgw 		    &zv->zv_volsize, tx);
2034e7cbe64fSgw 	} else {
2035b10bba72SGeorge Wilson 		error = zap_update(os, ZVOL_ZAP_OBJ,
2036e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
2037e7cbe64fSgw 		    &compress, tx);
2038b10bba72SGeorge Wilson 		if (error == 0) {
2039b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2040b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2041b10bba72SGeorge Wilson 			    &checksum, tx);
2042b10bba72SGeorge Wilson 		}
2043b10bba72SGeorge Wilson 		if (error == 0) {
2044b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2045b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2046b10bba72SGeorge Wilson 			    &refresrv, tx);
2047b10bba72SGeorge Wilson 		}
2048b10bba72SGeorge Wilson 		if (error == 0) {
2049b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
2050b10bba72SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2051b10bba72SGeorge Wilson 			    &vbs, tx);
2052b10bba72SGeorge Wilson 		}
2053b10bba72SGeorge Wilson 		if (error == 0) {
2054b10bba72SGeorge Wilson 			error = dmu_object_set_blocksize(
2055b10bba72SGeorge Wilson 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2056b10bba72SGeorge Wilson 		}
2057b10bba72SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2058b10bba72SGeorge Wilson 			error = zap_update(os, ZVOL_ZAP_OBJ,
20598d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
20608d265e66SGeorge Wilson 			    &dedup, tx);
20618d265e66SGeorge Wilson 		}
2062681d9761SEric Taylor 		if (error == 0)
2063b5152584SMatthew Ahrens 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2064e7cbe64fSgw 	}
2065e7cbe64fSgw 	dmu_tx_commit(tx);
2066e7cbe64fSgw 
2067e7cbe64fSgw 	/*
2068e7cbe64fSgw 	 * We only need update the zvol's property if we are initializing
2069e7cbe64fSgw 	 * the dump area for the first time.
2070e7cbe64fSgw 	 */
2071b10bba72SGeorge Wilson 	if (error == 0 && !resize) {
2072b10bba72SGeorge Wilson 		/*
2073b10bba72SGeorge Wilson 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2074b10bba72SGeorge Wilson 		 * function.  Otherwise, use the old default -- OFF.
2075b10bba72SGeorge Wilson 		 */
2076b10bba72SGeorge Wilson 		checksum = spa_feature_is_active(spa,
2077b10bba72SGeorge Wilson 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2078b10bba72SGeorge Wilson 		    ZIO_CHECKSUM_OFF;
2079b10bba72SGeorge Wilson 
2080e7cbe64fSgw 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2081e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2082e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2083e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2084e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2085e7cbe64fSgw 		    ZIO_COMPRESS_OFF) == 0);
2086e7cbe64fSgw 		VERIFY(nvlist_add_uint64(nv,
2087e7cbe64fSgw 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2088810e43b2SBill Pijewski 		    checksum) == 0);
20898d265e66SGeorge Wilson 		if (version >= SPA_VERSION_DEDUP) {
20908d265e66SGeorge Wilson 			VERIFY(nvlist_add_uint64(nv,
20918d265e66SGeorge Wilson 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
20928d265e66SGeorge Wilson 			    ZIO_CHECKSUM_OFF) == 0);
20938d265e66SGeorge Wilson 		}
2094e7cbe64fSgw 
209592241e0bSTom Erickson 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
209692241e0bSTom Erickson 		    nv, NULL);
2097e7cbe64fSgw 		nvlist_free(nv);
2098e7cbe64fSgw 	}
2099e7cbe64fSgw 
2100e7cbe64fSgw 	/* Allocate the space for the dump */
2101b10bba72SGeorge Wilson 	if (error == 0)
2102b10bba72SGeorge Wilson 		error = zvol_prealloc(zv);
2103e7cbe64fSgw 	return (error);
2104e7cbe64fSgw }
2105e7cbe64fSgw 
2106e7cbe64fSgw static int
2107e7cbe64fSgw zvol_dumpify(zvol_state_t *zv)
2108e7cbe64fSgw {
2109e7cbe64fSgw 	int error = 0;
2110e7cbe64fSgw 	uint64_t dumpsize = 0;
2111e7cbe64fSgw 	dmu_tx_t *tx;
2112e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2113e7cbe64fSgw 
2114681d9761SEric Taylor 	if (zv->zv_flags & ZVOL_RDONLY)
2115be6fd75aSMatthew Ahrens 		return (SET_ERROR(EROFS));
2116e7cbe64fSgw 
2117eb633035STom Caputi 	if (os->os_encrypted)
2118eb633035STom Caputi 		return (SET_ERROR(ENOTSUP));
2119eb633035STom Caputi 
2120e7cbe64fSgw 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2121e7cbe64fSgw 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
21224445fffbSMatthew Ahrens 		boolean_t resize = (dumpsize > 0);
2123e7cbe64fSgw 
2124e7cbe64fSgw 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2125e7cbe64fSgw 			(void) zvol_dump_fini(zv);
2126e7cbe64fSgw 			return (error);
2127e7cbe64fSgw 		}
2128e7cbe64fSgw 	}
2129e7cbe64fSgw 
2130e7cbe64fSgw 	/*
2131e7cbe64fSgw 	 * Build up our lba mapping.
2132e7cbe64fSgw 	 */
2133e7cbe64fSgw 	error = zvol_get_lbas(zv);
2134e7cbe64fSgw 	if (error) {
2135e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2136e7cbe64fSgw 		return (error);
2137e7cbe64fSgw 	}
2138e7cbe64fSgw 
2139e7cbe64fSgw 	tx = dmu_tx_create(os);
2140e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2141e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2142e7cbe64fSgw 	if (error) {
2143e7cbe64fSgw 		dmu_tx_abort(tx);
2144e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2145e7cbe64fSgw 		return (error);
2146e7cbe64fSgw 	}
2147e7cbe64fSgw 
2148e7cbe64fSgw 	zv->zv_flags |= ZVOL_DUMPIFIED;
2149e7cbe64fSgw 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2150e7cbe64fSgw 	    &zv->zv_volsize, tx);
2151e7cbe64fSgw 	dmu_tx_commit(tx);
2152e7cbe64fSgw 
2153e7cbe64fSgw 	if (error) {
2154e7cbe64fSgw 		(void) zvol_dump_fini(zv);
2155e7cbe64fSgw 		return (error);
2156e7cbe64fSgw 	}
2157e7cbe64fSgw 
2158e7cbe64fSgw 	txg_wait_synced(dmu_objset_pool(os), 0);
2159e7cbe64fSgw 	return (0);
2160e7cbe64fSgw }
2161e7cbe64fSgw 
2162e7cbe64fSgw static int
2163e7cbe64fSgw zvol_dump_fini(zvol_state_t *zv)
2164e7cbe64fSgw {
2165e7cbe64fSgw 	dmu_tx_t *tx;
2166e7cbe64fSgw 	objset_t *os = zv->zv_objset;
2167e7cbe64fSgw 	nvlist_t *nv;
2168e7cbe64fSgw 	int error = 0;
2169afee20e4SGeorge Wilson 	uint64_t checksum, compress, refresrv, vbs, dedup;
21708d265e66SGeorge Wilson 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2171e7cbe64fSgw 
2172b7e50089Smaybee 	/*
2173b7e50089Smaybee 	 * Attempt to restore the zvol back to its pre-dumpified state.
2174b7e50089Smaybee 	 * This is a best-effort attempt as it's possible that not all
2175b7e50089Smaybee 	 * of these properties were initialized during the dumpify process
2176b7e50089Smaybee 	 * (i.e. error during zvol_dump_init).
2177b7e50089Smaybee 	 */
2178b7e50089Smaybee 
2179e7cbe64fSgw 	tx = dmu_tx_create(os);
2180e7cbe64fSgw 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2181e7cbe64fSgw 	error = dmu_tx_assign(tx, TXG_WAIT);
2182e7cbe64fSgw 	if (error) {
2183e7cbe64fSgw 		dmu_tx_abort(tx);
2184e7cbe64fSgw 		return (error);
2185e7cbe64fSgw 	}
2186b7e50089Smaybee 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2187b7e50089Smaybee 	dmu_tx_commit(tx);
2188e7cbe64fSgw 
2189e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2190e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2191e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2192e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2193e7cbe64fSgw 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2194e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
219588b7b0f2SMatthew Ahrens 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
219688b7b0f2SMatthew Ahrens 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2197e7cbe64fSgw 
2198e7cbe64fSgw 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2199e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2200e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2201e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2202e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2203e7cbe64fSgw 	(void) nvlist_add_uint64(nv,
2204e7cbe64fSgw 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
22058d265e66SGeorge Wilson 	if (version >= SPA_VERSION_DEDUP &&
22068d265e66SGeorge Wilson 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
22078d265e66SGeorge Wilson 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
22088d265e66SGeorge Wilson 		(void) nvlist_add_uint64(nv,
22098d265e66SGeorge Wilson 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
22108d265e66SGeorge Wilson 	}
221192241e0bSTom Erickson 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
221292241e0bSTom Erickson 	    nv, NULL);
2213e7cbe64fSgw 	nvlist_free(nv);
2214e7cbe64fSgw 
2215b7e50089Smaybee 	zvol_free_extents(zv);
2216b7e50089Smaybee 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2217b7e50089Smaybee 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2218681d9761SEric Taylor 	/* wait for dmu_free_long_range to actually free the blocks */
2219681d9761SEric Taylor 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2220681d9761SEric Taylor 	tx = dmu_tx_create(os);
2221681d9761SEric Taylor 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2222681d9761SEric Taylor 	error = dmu_tx_assign(tx, TXG_WAIT);
2223681d9761SEric Taylor 	if (error) {
2224681d9761SEric Taylor 		dmu_tx_abort(tx);
2225681d9761SEric Taylor 		return (error);
2226681d9761SEric Taylor 	}
2227b24ab676SJeff Bonwick 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2228b24ab676SJeff Bonwick 		zv->zv_volblocksize = vbs;
2229681d9761SEric Taylor 	dmu_tx_commit(tx);
2230b7e50089Smaybee 
2231e7cbe64fSgw 	return (0);
2232e7cbe64fSgw }
2233