xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu.c (revision 503ad85c)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
2214843421SMatthew Ahrens  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23fa9e4066Sahrens  * Use is subject to license terms.
24fa9e4066Sahrens  */
25fa9e4066Sahrens 
26fa9e4066Sahrens #include <sys/dmu.h>
27fa9e4066Sahrens #include <sys/dmu_impl.h>
28fa9e4066Sahrens #include <sys/dmu_tx.h>
29fa9e4066Sahrens #include <sys/dbuf.h>
30fa9e4066Sahrens #include <sys/dnode.h>
31fa9e4066Sahrens #include <sys/zfs_context.h>
32fa9e4066Sahrens #include <sys/dmu_objset.h>
33fa9e4066Sahrens #include <sys/dmu_traverse.h>
34fa9e4066Sahrens #include <sys/dsl_dataset.h>
35fa9e4066Sahrens #include <sys/dsl_dir.h>
36fa9e4066Sahrens #include <sys/dsl_pool.h>
371d452cf5Sahrens #include <sys/dsl_synctask.h>
38a2eea2e1Sahrens #include <sys/dsl_prop.h>
39fa9e4066Sahrens #include <sys/dmu_zfetch.h>
40fa9e4066Sahrens #include <sys/zfs_ioctl.h>
41fa9e4066Sahrens #include <sys/zap.h>
42ea8dc4b6Seschrock #include <sys/zio_checksum.h>
4344eda4d7Smaybee #ifdef _KERNEL
4444eda4d7Smaybee #include <sys/vmsystm.h>
450fab61baSJonathan W Adams #include <sys/zfs_znode.h>
4644eda4d7Smaybee #endif
47fa9e4066Sahrens 
48fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
49fa9e4066Sahrens 	{	byteswap_uint8_array,	TRUE,	"unallocated"		},
50fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"object directory"	},
51fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"object array"		},
52fa9e4066Sahrens 	{	byteswap_uint8_array,	TRUE,	"packed nvlist"		},
53fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"packed nvlist size"	},
54fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"bplist"		},
55fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"bplist header"		},
56fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"SPA space map header"	},
57fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"SPA space map"		},
58fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"ZIL intent log"	},
59fa9e4066Sahrens 	{	dnode_buf_byteswap,	TRUE,	"DMU dnode"		},
60fa9e4066Sahrens 	{	dmu_objset_byteswap,	TRUE,	"DMU objset"		},
61fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"DSL directory"		},
62fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"DSL directory child map"},
63fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"DSL dataset snap map"	},
64fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"DSL props"		},
65fa9e4066Sahrens 	{	byteswap_uint64_array,	TRUE,	"DSL dataset"		},
66fa9e4066Sahrens 	{	zfs_znode_byteswap,	TRUE,	"ZFS znode"		},
67da6c28aaSamw 	{	zfs_oldacl_byteswap,	TRUE,	"ZFS V0 ACL"		},
68fa9e4066Sahrens 	{	byteswap_uint8_array,	FALSE,	"ZFS plain file"	},
69fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"ZFS directory"		},
70fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"ZFS master node"	},
71fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"ZFS delete queue"	},
72fa9e4066Sahrens 	{	byteswap_uint8_array,	FALSE,	"zvol object"		},
73fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"zvol prop"		},
74fa9e4066Sahrens 	{	byteswap_uint8_array,	FALSE,	"other uint8[]"		},
75fa9e4066Sahrens 	{	byteswap_uint64_array,	FALSE,	"other uint64[]"	},
76fa9e4066Sahrens 	{	zap_byteswap,		TRUE,	"other ZAP"		},
77ea8dc4b6Seschrock 	{	zap_byteswap,		TRUE,	"persistent error log"	},
7806eeb2adSek 	{	byteswap_uint8_array,	TRUE,	"SPA history"		},
7906eeb2adSek 	{	byteswap_uint64_array,	TRUE,	"SPA history offsets"	},
80ecd6cf80Smarks 	{	zap_byteswap,		TRUE,	"Pool properties"	},
81da6c28aaSamw 	{	zap_byteswap,		TRUE,	"DSL permissions"	},
82da6c28aaSamw 	{	zfs_acl_byteswap,	TRUE,	"ZFS ACL"		},
83da6c28aaSamw 	{	byteswap_uint8_array,	TRUE,	"ZFS SYSACL"		},
84da6c28aaSamw 	{	byteswap_uint8_array,	TRUE,	"FUID table"		},
85add89791Smarks 	{	byteswap_uint64_array,	TRUE,	"FUID table size"	},
86088f3894Sahrens 	{	zap_byteswap,		TRUE,	"DSL dataset next clones"},
87088f3894Sahrens 	{	zap_byteswap,		TRUE,	"scrub work queue"	},
8814843421SMatthew Ahrens 	{	zap_byteswap,		TRUE,	"ZFS user/group used"	},
8914843421SMatthew Ahrens 	{	zap_byteswap,		TRUE,	"ZFS user/group quota"	},
90842727c2SChris Kirby 	{	zap_byteswap,		TRUE,	"snapshot refcount tags"},
91fa9e4066Sahrens };
92fa9e4066Sahrens 
93fa9e4066Sahrens int
94ea8dc4b6Seschrock dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
95ea8dc4b6Seschrock     void *tag, dmu_buf_t **dbp)
96fa9e4066Sahrens {
97fa9e4066Sahrens 	dnode_t *dn;
98fa9e4066Sahrens 	uint64_t blkid;
99fa9e4066Sahrens 	dmu_buf_impl_t *db;
100ea8dc4b6Seschrock 	int err;
101fa9e4066Sahrens 
102*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
103ea8dc4b6Seschrock 	if (err)
104ea8dc4b6Seschrock 		return (err);
105fa9e4066Sahrens 	blkid = dbuf_whichblock(dn, offset);
106fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
107ea8dc4b6Seschrock 	db = dbuf_hold(dn, blkid, tag);
108fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
109ea8dc4b6Seschrock 	if (db == NULL) {
110ea8dc4b6Seschrock 		err = EIO;
111ea8dc4b6Seschrock 	} else {
112ea8dc4b6Seschrock 		err = dbuf_read(db, NULL, DB_RF_CANFAIL);
113ea8dc4b6Seschrock 		if (err) {
114ea8dc4b6Seschrock 			dbuf_rele(db, tag);
115ea8dc4b6Seschrock 			db = NULL;
116ea8dc4b6Seschrock 		}
117ea8dc4b6Seschrock 	}
118fa9e4066Sahrens 
119ea8dc4b6Seschrock 	dnode_rele(dn, FTAG);
120ea8dc4b6Seschrock 	*dbp = &db->db;
121ea8dc4b6Seschrock 	return (err);
122fa9e4066Sahrens }
123fa9e4066Sahrens 
124fa9e4066Sahrens int
125fa9e4066Sahrens dmu_bonus_max(void)
126fa9e4066Sahrens {
127fa9e4066Sahrens 	return (DN_MAX_BONUSLEN);
128fa9e4066Sahrens }
129fa9e4066Sahrens 
1301934e92fSmaybee int
1311934e92fSmaybee dmu_set_bonus(dmu_buf_t *db, int newsize, dmu_tx_t *tx)
1321934e92fSmaybee {
1331934e92fSmaybee 	dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
1341934e92fSmaybee 
1351934e92fSmaybee 	if (dn->dn_bonus != (dmu_buf_impl_t *)db)
1361934e92fSmaybee 		return (EINVAL);
1371934e92fSmaybee 	if (newsize < 0 || newsize > db->db_size)
1381934e92fSmaybee 		return (EINVAL);
1391934e92fSmaybee 	dnode_setbonuslen(dn, newsize, tx);
1401934e92fSmaybee 	return (0);
1411934e92fSmaybee }
1421934e92fSmaybee 
143fa9e4066Sahrens /*
144ea8dc4b6Seschrock  * returns ENOENT, EIO, or 0.
145fa9e4066Sahrens  */
146ea8dc4b6Seschrock int
147ea8dc4b6Seschrock dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
148fa9e4066Sahrens {
149ea8dc4b6Seschrock 	dnode_t *dn;
150fa9e4066Sahrens 	dmu_buf_impl_t *db;
1511934e92fSmaybee 	int error;
152fa9e4066Sahrens 
153*503ad85cSMatthew Ahrens 	error = dnode_hold(os, object, FTAG, &dn);
1541934e92fSmaybee 	if (error)
1551934e92fSmaybee 		return (error);
156fa9e4066Sahrens 
157ea8dc4b6Seschrock 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
158ea8dc4b6Seschrock 	if (dn->dn_bonus == NULL) {
159fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
160ea8dc4b6Seschrock 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
161ea8dc4b6Seschrock 		if (dn->dn_bonus == NULL)
1621934e92fSmaybee 			dbuf_create_bonus(dn);
163fa9e4066Sahrens 	}
164ea8dc4b6Seschrock 	db = dn->dn_bonus;
165ea8dc4b6Seschrock 	rw_exit(&dn->dn_struct_rwlock);
1661934e92fSmaybee 
1671934e92fSmaybee 	/* as long as the bonus buf is held, the dnode will be held */
1681934e92fSmaybee 	if (refcount_add(&db->db_holds, tag) == 1)
1691934e92fSmaybee 		VERIFY(dnode_add_ref(dn, db));
1701934e92fSmaybee 
171fa9e4066Sahrens 	dnode_rele(dn, FTAG);
172ea8dc4b6Seschrock 
173ea8dc4b6Seschrock 	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED));
174ea8dc4b6Seschrock 
175ea8dc4b6Seschrock 	*dbp = &db->db;
176ea8dc4b6Seschrock 	return (0);
177fa9e4066Sahrens }
178fa9e4066Sahrens 
17913506d1eSmaybee /*
18013506d1eSmaybee  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
18113506d1eSmaybee  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
18213506d1eSmaybee  * and can induce severe lock contention when writing to several files
18313506d1eSmaybee  * whose dnodes are in the same block.
18413506d1eSmaybee  */
18513506d1eSmaybee static int
1867bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
1877bfdf011SNeil Perrin     int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
188fa9e4066Sahrens {
18905715f94SMark Maybee 	dsl_pool_t *dp = NULL;
190fa9e4066Sahrens 	dmu_buf_t **dbp;
191fa9e4066Sahrens 	uint64_t blkid, nblks, i;
1927bfdf011SNeil Perrin 	uint32_t dbuf_flags;
193ea8dc4b6Seschrock 	int err;
194ea8dc4b6Seschrock 	zio_t *zio;
19505715f94SMark Maybee 	hrtime_t start;
196ea8dc4b6Seschrock 
197ea8dc4b6Seschrock 	ASSERT(length <= DMU_MAX_ACCESS);
198fa9e4066Sahrens 
199c87b8fc5SMark J Musante 	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
2007bfdf011SNeil Perrin 	if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
2017bfdf011SNeil Perrin 		dbuf_flags |= DB_RF_NOPREFETCH;
202ea8dc4b6Seschrock 
203fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
204fa9e4066Sahrens 	if (dn->dn_datablkshift) {
205fa9e4066Sahrens 		int blkshift = dn->dn_datablkshift;
206fa9e4066Sahrens 		nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
207b1b8ab34Slling 		    P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
208fa9e4066Sahrens 	} else {
2090125049cSahrens 		if (offset + length > dn->dn_datablksz) {
2100125049cSahrens 			zfs_panic_recover("zfs: accessing past end of object "
2110125049cSahrens 			    "%llx/%llx (size=%u access=%llu+%llu)",
2120125049cSahrens 			    (longlong_t)dn->dn_objset->
2130125049cSahrens 			    os_dsl_dataset->ds_object,
2140125049cSahrens 			    (longlong_t)dn->dn_object, dn->dn_datablksz,
2150125049cSahrens 			    (longlong_t)offset, (longlong_t)length);
216c87b8fc5SMark J Musante 			rw_exit(&dn->dn_struct_rwlock);
2170125049cSahrens 			return (EIO);
2180125049cSahrens 		}
219fa9e4066Sahrens 		nblks = 1;
220fa9e4066Sahrens 	}
221ea8dc4b6Seschrock 	dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
222fa9e4066Sahrens 
22305715f94SMark Maybee 	if (dn->dn_objset->os_dsl_dataset)
22405715f94SMark Maybee 		dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool;
22505715f94SMark Maybee 	if (dp && dsl_pool_sync_context(dp))
22605715f94SMark Maybee 		start = gethrtime();
227e14bb325SJeff Bonwick 	zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
228fa9e4066Sahrens 	blkid = dbuf_whichblock(dn, offset);
229fa9e4066Sahrens 	for (i = 0; i < nblks; i++) {
230ea8dc4b6Seschrock 		dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
231ea8dc4b6Seschrock 		if (db == NULL) {
232ea8dc4b6Seschrock 			rw_exit(&dn->dn_struct_rwlock);
233ea8dc4b6Seschrock 			dmu_buf_rele_array(dbp, nblks, tag);
234ea8dc4b6Seschrock 			zio_nowait(zio);
235ea8dc4b6Seschrock 			return (EIO);
236ea8dc4b6Seschrock 		}
237ea8dc4b6Seschrock 		/* initiate async i/o */
23813506d1eSmaybee 		if (read) {
2397bfdf011SNeil Perrin 			(void) dbuf_read(db, zio, dbuf_flags);
240ea8dc4b6Seschrock 		}
241ea8dc4b6Seschrock 		dbp[i] = &db->db;
242fa9e4066Sahrens 	}
243fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
244fa9e4066Sahrens 
245ea8dc4b6Seschrock 	/* wait for async i/o */
246ea8dc4b6Seschrock 	err = zio_wait(zio);
24705715f94SMark Maybee 	/* track read overhead when we are in sync context */
24805715f94SMark Maybee 	if (dp && dsl_pool_sync_context(dp))
24905715f94SMark Maybee 		dp->dp_read_overhead += gethrtime() - start;
250ea8dc4b6Seschrock 	if (err) {
251ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, nblks, tag);
252ea8dc4b6Seschrock 		return (err);
253ea8dc4b6Seschrock 	}
254fa9e4066Sahrens 
255ea8dc4b6Seschrock 	/* wait for other io to complete */
256ea8dc4b6Seschrock 	if (read) {
257ea8dc4b6Seschrock 		for (i = 0; i < nblks; i++) {
258ea8dc4b6Seschrock 			dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
259ea8dc4b6Seschrock 			mutex_enter(&db->db_mtx);
260ea8dc4b6Seschrock 			while (db->db_state == DB_READ ||
261ea8dc4b6Seschrock 			    db->db_state == DB_FILL)
262ea8dc4b6Seschrock 				cv_wait(&db->db_changed, &db->db_mtx);
263ea8dc4b6Seschrock 			if (db->db_state == DB_UNCACHED)
264ea8dc4b6Seschrock 				err = EIO;
265ea8dc4b6Seschrock 			mutex_exit(&db->db_mtx);
266ea8dc4b6Seschrock 			if (err) {
267ea8dc4b6Seschrock 				dmu_buf_rele_array(dbp, nblks, tag);
268ea8dc4b6Seschrock 				return (err);
269ea8dc4b6Seschrock 			}
270ea8dc4b6Seschrock 		}
271ea8dc4b6Seschrock 	}
272fa9e4066Sahrens 
273ea8dc4b6Seschrock 	*numbufsp = nblks;
274ea8dc4b6Seschrock 	*dbpp = dbp;
275ea8dc4b6Seschrock 	return (0);
276fa9e4066Sahrens }
277fa9e4066Sahrens 
278a2eea2e1Sahrens static int
27913506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
28013506d1eSmaybee     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
28113506d1eSmaybee {
28213506d1eSmaybee 	dnode_t *dn;
28313506d1eSmaybee 	int err;
28413506d1eSmaybee 
285*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
28613506d1eSmaybee 	if (err)
28713506d1eSmaybee 		return (err);
28813506d1eSmaybee 
28913506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
2907bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
29113506d1eSmaybee 
29213506d1eSmaybee 	dnode_rele(dn, FTAG);
29313506d1eSmaybee 
29413506d1eSmaybee 	return (err);
29513506d1eSmaybee }
29613506d1eSmaybee 
29713506d1eSmaybee int
29813506d1eSmaybee dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
29913506d1eSmaybee     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
30013506d1eSmaybee {
30113506d1eSmaybee 	dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
30213506d1eSmaybee 	int err;
30313506d1eSmaybee 
30413506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
3057bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
30613506d1eSmaybee 
30713506d1eSmaybee 	return (err);
30813506d1eSmaybee }
30913506d1eSmaybee 
310fa9e4066Sahrens void
311ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
312fa9e4066Sahrens {
313fa9e4066Sahrens 	int i;
314fa9e4066Sahrens 	dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
315fa9e4066Sahrens 
316fa9e4066Sahrens 	if (numbufs == 0)
317fa9e4066Sahrens 		return;
318fa9e4066Sahrens 
319ea8dc4b6Seschrock 	for (i = 0; i < numbufs; i++) {
320ea8dc4b6Seschrock 		if (dbp[i])
321ea8dc4b6Seschrock 			dbuf_rele(dbp[i], tag);
322ea8dc4b6Seschrock 	}
323fa9e4066Sahrens 
324fa9e4066Sahrens 	kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
325fa9e4066Sahrens }
326fa9e4066Sahrens 
327fa9e4066Sahrens void
328fa9e4066Sahrens dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
329fa9e4066Sahrens {
330fa9e4066Sahrens 	dnode_t *dn;
331fa9e4066Sahrens 	uint64_t blkid;
332ea8dc4b6Seschrock 	int nblks, i, err;
333fa9e4066Sahrens 
334416e0cd8Sek 	if (zfs_prefetch_disable)
335416e0cd8Sek 		return;
336416e0cd8Sek 
337fa9e4066Sahrens 	if (len == 0) {  /* they're interested in the bonus buffer */
338*503ad85cSMatthew Ahrens 		dn = os->os_meta_dnode;
339fa9e4066Sahrens 
340fa9e4066Sahrens 		if (object == 0 || object >= DN_MAX_OBJECT)
341fa9e4066Sahrens 			return;
342fa9e4066Sahrens 
343fa9e4066Sahrens 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
344fa9e4066Sahrens 		blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t));
345fa9e4066Sahrens 		dbuf_prefetch(dn, blkid);
346fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
347fa9e4066Sahrens 		return;
348fa9e4066Sahrens 	}
349fa9e4066Sahrens 
350fa9e4066Sahrens 	/*
351fa9e4066Sahrens 	 * XXX - Note, if the dnode for the requested object is not
352fa9e4066Sahrens 	 * already cached, we will do a *synchronous* read in the
353fa9e4066Sahrens 	 * dnode_hold() call.  The same is true for any indirects.
354fa9e4066Sahrens 	 */
355*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
356ea8dc4b6Seschrock 	if (err != 0)
357fa9e4066Sahrens 		return;
358fa9e4066Sahrens 
359fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
360fa9e4066Sahrens 	if (dn->dn_datablkshift) {
361fa9e4066Sahrens 		int blkshift = dn->dn_datablkshift;
362fa9e4066Sahrens 		nblks = (P2ROUNDUP(offset+len, 1<<blkshift) -
363b1b8ab34Slling 		    P2ALIGN(offset, 1<<blkshift)) >> blkshift;
364fa9e4066Sahrens 	} else {
365fa9e4066Sahrens 		nblks = (offset < dn->dn_datablksz);
366fa9e4066Sahrens 	}
367fa9e4066Sahrens 
368fa9e4066Sahrens 	if (nblks != 0) {
369fa9e4066Sahrens 		blkid = dbuf_whichblock(dn, offset);
370fa9e4066Sahrens 		for (i = 0; i < nblks; i++)
371fa9e4066Sahrens 			dbuf_prefetch(dn, blkid+i);
372fa9e4066Sahrens 	}
373fa9e4066Sahrens 
374fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
375fa9e4066Sahrens 
376fa9e4066Sahrens 	dnode_rele(dn, FTAG);
377fa9e4066Sahrens }
378fa9e4066Sahrens 
37976256205SMark Maybee /*
38076256205SMark Maybee  * Get the next "chunk" of file data to free.  We traverse the file from
38176256205SMark Maybee  * the end so that the file gets shorter over time (if we crashes in the
38276256205SMark Maybee  * middle, this will leave us in a better state).  We find allocated file
38376256205SMark Maybee  * data by simply searching the allocated level 1 indirects.
38476256205SMark Maybee  */
385cdb0ab79Smaybee static int
38676256205SMark Maybee get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t limit)
387cdb0ab79Smaybee {
38876256205SMark Maybee 	uint64_t len = *start - limit;
38976256205SMark Maybee 	uint64_t blkcnt = 0;
39076256205SMark Maybee 	uint64_t maxblks = DMU_MAX_ACCESS / (1ULL << (dn->dn_indblkshift + 1));
39176256205SMark Maybee 	uint64_t iblkrange =
3921c8564a7SMark Maybee 	    dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
393cdb0ab79Smaybee 
39476256205SMark Maybee 	ASSERT(limit <= *start);
395cdb0ab79Smaybee 
39676256205SMark Maybee 	if (len <= iblkrange * maxblks) {
39776256205SMark Maybee 		*start = limit;
398cdb0ab79Smaybee 		return (0);
399cdb0ab79Smaybee 	}
40076256205SMark Maybee 	ASSERT(ISP2(iblkrange));
401cdb0ab79Smaybee 
40276256205SMark Maybee 	while (*start > limit && blkcnt < maxblks) {
4031c8564a7SMark Maybee 		int err;
404cdb0ab79Smaybee 
40576256205SMark Maybee 		/* find next allocated L1 indirect */
406cdb0ab79Smaybee 		err = dnode_next_offset(dn,
40776256205SMark Maybee 		    DNODE_FIND_BACKWARDS, start, 2, 1, 0);
408cdb0ab79Smaybee 
40976256205SMark Maybee 		/* if there are no more, then we are done */
41076256205SMark Maybee 		if (err == ESRCH) {
41176256205SMark Maybee 			*start = limit;
412cdb0ab79Smaybee 			return (0);
41376256205SMark Maybee 		} else if (err) {
414cdb0ab79Smaybee 			return (err);
41576256205SMark Maybee 		}
41676256205SMark Maybee 		blkcnt += 1;
41776256205SMark Maybee 
41876256205SMark Maybee 		/* reset offset to end of "next" block back */
41976256205SMark Maybee 		*start = P2ALIGN(*start, iblkrange);
42076256205SMark Maybee 		if (*start <= limit)
42176256205SMark Maybee 			*start = limit;
42276256205SMark Maybee 		else
42376256205SMark Maybee 			*start -= 1;
424cdb0ab79Smaybee 	}
425cdb0ab79Smaybee 	return (0);
426cdb0ab79Smaybee }
427cdb0ab79Smaybee 
428cdb0ab79Smaybee static int
429cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
430cdb0ab79Smaybee     uint64_t length, boolean_t free_dnode)
431cdb0ab79Smaybee {
432cdb0ab79Smaybee 	dmu_tx_t *tx;
433cdb0ab79Smaybee 	uint64_t object_size, start, end, len;
434cdb0ab79Smaybee 	boolean_t trunc = (length == DMU_OBJECT_END);
435cdb0ab79Smaybee 	int align, err;
436cdb0ab79Smaybee 
437cdb0ab79Smaybee 	align = 1 << dn->dn_datablkshift;
438cdb0ab79Smaybee 	ASSERT(align > 0);
439cdb0ab79Smaybee 	object_size = align == 1 ? dn->dn_datablksz :
440cdb0ab79Smaybee 	    (dn->dn_maxblkid + 1) << dn->dn_datablkshift;
441cdb0ab79Smaybee 
44214843421SMatthew Ahrens 	end = offset + length;
44314843421SMatthew Ahrens 	if (trunc || end > object_size)
444cdb0ab79Smaybee 		end = object_size;
445cdb0ab79Smaybee 	if (end <= offset)
446cdb0ab79Smaybee 		return (0);
447cdb0ab79Smaybee 	length = end - offset;
448cdb0ab79Smaybee 
449cdb0ab79Smaybee 	while (length) {
450cdb0ab79Smaybee 		start = end;
45114843421SMatthew Ahrens 		/* assert(offset <= start) */
452cdb0ab79Smaybee 		err = get_next_chunk(dn, &start, offset);
453cdb0ab79Smaybee 		if (err)
454cdb0ab79Smaybee 			return (err);
455cdb0ab79Smaybee 		len = trunc ? DMU_OBJECT_END : end - start;
456cdb0ab79Smaybee 
457cdb0ab79Smaybee 		tx = dmu_tx_create(os);
458cdb0ab79Smaybee 		dmu_tx_hold_free(tx, dn->dn_object, start, len);
459cdb0ab79Smaybee 		err = dmu_tx_assign(tx, TXG_WAIT);
460cdb0ab79Smaybee 		if (err) {
461cdb0ab79Smaybee 			dmu_tx_abort(tx);
462cdb0ab79Smaybee 			return (err);
463cdb0ab79Smaybee 		}
464cdb0ab79Smaybee 
465cdb0ab79Smaybee 		dnode_free_range(dn, start, trunc ? -1 : len, tx);
466cdb0ab79Smaybee 
4671c8564a7SMark Maybee 		if (start == 0 && free_dnode) {
4681c8564a7SMark Maybee 			ASSERT(trunc);
469cdb0ab79Smaybee 			dnode_free(dn, tx);
4701c8564a7SMark Maybee 		}
471cdb0ab79Smaybee 
472cdb0ab79Smaybee 		length -= end - start;
473cdb0ab79Smaybee 
474cdb0ab79Smaybee 		dmu_tx_commit(tx);
475cdb0ab79Smaybee 		end = start;
476cdb0ab79Smaybee 	}
477cdb0ab79Smaybee 	return (0);
478cdb0ab79Smaybee }
479cdb0ab79Smaybee 
480cdb0ab79Smaybee int
481cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object,
482cdb0ab79Smaybee     uint64_t offset, uint64_t length)
483cdb0ab79Smaybee {
484cdb0ab79Smaybee 	dnode_t *dn;
485cdb0ab79Smaybee 	int err;
486cdb0ab79Smaybee 
487*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
488cdb0ab79Smaybee 	if (err != 0)
489cdb0ab79Smaybee 		return (err);
490cdb0ab79Smaybee 	err = dmu_free_long_range_impl(os, dn, offset, length, FALSE);
491cdb0ab79Smaybee 	dnode_rele(dn, FTAG);
492cdb0ab79Smaybee 	return (err);
493cdb0ab79Smaybee }
494cdb0ab79Smaybee 
495cdb0ab79Smaybee int
496cdb0ab79Smaybee dmu_free_object(objset_t *os, uint64_t object)
497cdb0ab79Smaybee {
498cdb0ab79Smaybee 	dnode_t *dn;
499cdb0ab79Smaybee 	dmu_tx_t *tx;
500cdb0ab79Smaybee 	int err;
501cdb0ab79Smaybee 
502*503ad85cSMatthew Ahrens 	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
503cdb0ab79Smaybee 	    FTAG, &dn);
504cdb0ab79Smaybee 	if (err != 0)
505cdb0ab79Smaybee 		return (err);
506cdb0ab79Smaybee 	if (dn->dn_nlevels == 1) {
507cdb0ab79Smaybee 		tx = dmu_tx_create(os);
508cdb0ab79Smaybee 		dmu_tx_hold_bonus(tx, object);
509cdb0ab79Smaybee 		dmu_tx_hold_free(tx, dn->dn_object, 0, DMU_OBJECT_END);
510cdb0ab79Smaybee 		err = dmu_tx_assign(tx, TXG_WAIT);
511cdb0ab79Smaybee 		if (err == 0) {
512cdb0ab79Smaybee 			dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
513cdb0ab79Smaybee 			dnode_free(dn, tx);
514cdb0ab79Smaybee 			dmu_tx_commit(tx);
515cdb0ab79Smaybee 		} else {
516cdb0ab79Smaybee 			dmu_tx_abort(tx);
517cdb0ab79Smaybee 		}
518cdb0ab79Smaybee 	} else {
519cdb0ab79Smaybee 		err = dmu_free_long_range_impl(os, dn, 0, DMU_OBJECT_END, TRUE);
520cdb0ab79Smaybee 	}
521cdb0ab79Smaybee 	dnode_rele(dn, FTAG);
522cdb0ab79Smaybee 	return (err);
523cdb0ab79Smaybee }
524cdb0ab79Smaybee 
525ea8dc4b6Seschrock int
526fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
527fa9e4066Sahrens     uint64_t size, dmu_tx_t *tx)
528fa9e4066Sahrens {
529ea8dc4b6Seschrock 	dnode_t *dn;
530*503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
531ea8dc4b6Seschrock 	if (err)
532ea8dc4b6Seschrock 		return (err);
533fa9e4066Sahrens 	ASSERT(offset < UINT64_MAX);
534fa9e4066Sahrens 	ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
535fa9e4066Sahrens 	dnode_free_range(dn, offset, size, tx);
536fa9e4066Sahrens 	dnode_rele(dn, FTAG);
537ea8dc4b6Seschrock 	return (0);
538fa9e4066Sahrens }
539fa9e4066Sahrens 
540ea8dc4b6Seschrock int
541ea8dc4b6Seschrock dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
5427bfdf011SNeil Perrin     void *buf, uint32_t flags)
543fa9e4066Sahrens {
544fa9e4066Sahrens 	dnode_t *dn;
545fa9e4066Sahrens 	dmu_buf_t **dbp;
546c87b8fc5SMark J Musante 	int numbufs, err;
547fa9e4066Sahrens 
548*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
549ea8dc4b6Seschrock 	if (err)
550ea8dc4b6Seschrock 		return (err);
551feb08c6bSbillm 
552feb08c6bSbillm 	/*
553feb08c6bSbillm 	 * Deal with odd block sizes, where there can't be data past the first
554feb08c6bSbillm 	 * block.  If we ever do the tail block optimization, we will need to
555feb08c6bSbillm 	 * handle that here as well.
556feb08c6bSbillm 	 */
557c87b8fc5SMark J Musante 	if (dn->dn_maxblkid == 0) {
558fa9e4066Sahrens 		int newsz = offset > dn->dn_datablksz ? 0 :
559fa9e4066Sahrens 		    MIN(size, dn->dn_datablksz - offset);
560fa9e4066Sahrens 		bzero((char *)buf + newsz, size - newsz);
561fa9e4066Sahrens 		size = newsz;
562fa9e4066Sahrens 	}
563fa9e4066Sahrens 
564fa9e4066Sahrens 	while (size > 0) {
565fa9e4066Sahrens 		uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
566c87b8fc5SMark J Musante 		int i;
567fa9e4066Sahrens 
568fa9e4066Sahrens 		/*
569fa9e4066Sahrens 		 * NB: we could do this block-at-a-time, but it's nice
570fa9e4066Sahrens 		 * to be reading in parallel.
571fa9e4066Sahrens 		 */
572a2eea2e1Sahrens 		err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
5737bfdf011SNeil Perrin 		    TRUE, FTAG, &numbufs, &dbp, flags);
574ea8dc4b6Seschrock 		if (err)
5751934e92fSmaybee 			break;
576fa9e4066Sahrens 
577fa9e4066Sahrens 		for (i = 0; i < numbufs; i++) {
578fa9e4066Sahrens 			int tocpy;
579fa9e4066Sahrens 			int bufoff;
580fa9e4066Sahrens 			dmu_buf_t *db = dbp[i];
581fa9e4066Sahrens 
582fa9e4066Sahrens 			ASSERT(size > 0);
583fa9e4066Sahrens 
584fa9e4066Sahrens 			bufoff = offset - db->db_offset;
585fa9e4066Sahrens 			tocpy = (int)MIN(db->db_size - bufoff, size);
586fa9e4066Sahrens 
587fa9e4066Sahrens 			bcopy((char *)db->db_data + bufoff, buf, tocpy);
588fa9e4066Sahrens 
589fa9e4066Sahrens 			offset += tocpy;
590fa9e4066Sahrens 			size -= tocpy;
591fa9e4066Sahrens 			buf = (char *)buf + tocpy;
592fa9e4066Sahrens 		}
593ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, numbufs, FTAG);
594fa9e4066Sahrens 	}
595a2eea2e1Sahrens 	dnode_rele(dn, FTAG);
5961934e92fSmaybee 	return (err);
597fa9e4066Sahrens }
598fa9e4066Sahrens 
599fa9e4066Sahrens void
600fa9e4066Sahrens dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
601fa9e4066Sahrens     const void *buf, dmu_tx_t *tx)
602fa9e4066Sahrens {
603fa9e4066Sahrens 	dmu_buf_t **dbp;
604fa9e4066Sahrens 	int numbufs, i;
605fa9e4066Sahrens 
60613506d1eSmaybee 	if (size == 0)
60713506d1eSmaybee 		return;
60813506d1eSmaybee 
609ea8dc4b6Seschrock 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
610ea8dc4b6Seschrock 	    FALSE, FTAG, &numbufs, &dbp));
611fa9e4066Sahrens 
612fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
613fa9e4066Sahrens 		int tocpy;
614fa9e4066Sahrens 		int bufoff;
615fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
616fa9e4066Sahrens 
617fa9e4066Sahrens 		ASSERT(size > 0);
618fa9e4066Sahrens 
619fa9e4066Sahrens 		bufoff = offset - db->db_offset;
620fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
621fa9e4066Sahrens 
622fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
623fa9e4066Sahrens 
624fa9e4066Sahrens 		if (tocpy == db->db_size)
625fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
626fa9e4066Sahrens 		else
627fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
628fa9e4066Sahrens 
629fa9e4066Sahrens 		bcopy(buf, (char *)db->db_data + bufoff, tocpy);
630fa9e4066Sahrens 
631fa9e4066Sahrens 		if (tocpy == db->db_size)
632fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
633fa9e4066Sahrens 
634fa9e4066Sahrens 		offset += tocpy;
635fa9e4066Sahrens 		size -= tocpy;
636fa9e4066Sahrens 		buf = (char *)buf + tocpy;
637fa9e4066Sahrens 	}
638ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
639fa9e4066Sahrens }
640fa9e4066Sahrens 
64182c9918fSTim Haley void
64282c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
64382c9918fSTim Haley     dmu_tx_t *tx)
64482c9918fSTim Haley {
64582c9918fSTim Haley 	dmu_buf_t **dbp;
64682c9918fSTim Haley 	int numbufs, i;
64782c9918fSTim Haley 
64882c9918fSTim Haley 	if (size == 0)
64982c9918fSTim Haley 		return;
65082c9918fSTim Haley 
65182c9918fSTim Haley 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
65282c9918fSTim Haley 	    FALSE, FTAG, &numbufs, &dbp));
65382c9918fSTim Haley 
65482c9918fSTim Haley 	for (i = 0; i < numbufs; i++) {
65582c9918fSTim Haley 		dmu_buf_t *db = dbp[i];
65682c9918fSTim Haley 
65782c9918fSTim Haley 		dmu_buf_will_not_fill(db, tx);
65882c9918fSTim Haley 	}
65982c9918fSTim Haley 	dmu_buf_rele_array(dbp, numbufs, FTAG);
66082c9918fSTim Haley }
66182c9918fSTim Haley 
662fa9e4066Sahrens #ifdef _KERNEL
663fa9e4066Sahrens int
664feb08c6bSbillm dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
665feb08c6bSbillm {
666feb08c6bSbillm 	dmu_buf_t **dbp;
667feb08c6bSbillm 	int numbufs, i, err;
668feb08c6bSbillm 
669feb08c6bSbillm 	/*
670feb08c6bSbillm 	 * NB: we could do this block-at-a-time, but it's nice
671feb08c6bSbillm 	 * to be reading in parallel.
672feb08c6bSbillm 	 */
673feb08c6bSbillm 	err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
674feb08c6bSbillm 	    &numbufs, &dbp);
675feb08c6bSbillm 	if (err)
676feb08c6bSbillm 		return (err);
677feb08c6bSbillm 
678feb08c6bSbillm 	for (i = 0; i < numbufs; i++) {
679feb08c6bSbillm 		int tocpy;
680feb08c6bSbillm 		int bufoff;
681feb08c6bSbillm 		dmu_buf_t *db = dbp[i];
682feb08c6bSbillm 
683feb08c6bSbillm 		ASSERT(size > 0);
684feb08c6bSbillm 
685feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
686feb08c6bSbillm 		tocpy = (int)MIN(db->db_size - bufoff, size);
687feb08c6bSbillm 
688feb08c6bSbillm 		err = uiomove((char *)db->db_data + bufoff, tocpy,
689feb08c6bSbillm 		    UIO_READ, uio);
690feb08c6bSbillm 		if (err)
691feb08c6bSbillm 			break;
692feb08c6bSbillm 
693feb08c6bSbillm 		size -= tocpy;
694feb08c6bSbillm 	}
695feb08c6bSbillm 	dmu_buf_rele_array(dbp, numbufs, FTAG);
696feb08c6bSbillm 
697feb08c6bSbillm 	return (err);
698feb08c6bSbillm }
699feb08c6bSbillm 
700feb08c6bSbillm int
701feb08c6bSbillm dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
702feb08c6bSbillm     dmu_tx_t *tx)
703fa9e4066Sahrens {
704fa9e4066Sahrens 	dmu_buf_t **dbp;
705fa9e4066Sahrens 	int numbufs, i;
706fa9e4066Sahrens 	int err = 0;
707fa9e4066Sahrens 
70813506d1eSmaybee 	if (size == 0)
70913506d1eSmaybee 		return (0);
71013506d1eSmaybee 
711feb08c6bSbillm 	err = dmu_buf_hold_array(os, object, uio->uio_loffset, size,
712ea8dc4b6Seschrock 	    FALSE, FTAG, &numbufs, &dbp);
713ea8dc4b6Seschrock 	if (err)
714ea8dc4b6Seschrock 		return (err);
715fa9e4066Sahrens 
716fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
717fa9e4066Sahrens 		int tocpy;
718fa9e4066Sahrens 		int bufoff;
719fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
720fa9e4066Sahrens 
721fa9e4066Sahrens 		ASSERT(size > 0);
722fa9e4066Sahrens 
723feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
724fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
725fa9e4066Sahrens 
726fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
727fa9e4066Sahrens 
728fa9e4066Sahrens 		if (tocpy == db->db_size)
729fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
730fa9e4066Sahrens 		else
731fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
732fa9e4066Sahrens 
733fa9e4066Sahrens 		/*
734fa9e4066Sahrens 		 * XXX uiomove could block forever (eg. nfs-backed
735fa9e4066Sahrens 		 * pages).  There needs to be a uiolockdown() function
736fa9e4066Sahrens 		 * to lock the pages in memory, so that uiomove won't
737fa9e4066Sahrens 		 * block.
738fa9e4066Sahrens 		 */
739fa9e4066Sahrens 		err = uiomove((char *)db->db_data + bufoff, tocpy,
740fa9e4066Sahrens 		    UIO_WRITE, uio);
741fa9e4066Sahrens 
742fa9e4066Sahrens 		if (tocpy == db->db_size)
743fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
744fa9e4066Sahrens 
745fa9e4066Sahrens 		if (err)
746fa9e4066Sahrens 			break;
747fa9e4066Sahrens 
748fa9e4066Sahrens 		size -= tocpy;
749fa9e4066Sahrens 	}
750ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
751fa9e4066Sahrens 	return (err);
752fa9e4066Sahrens }
75344eda4d7Smaybee 
75444eda4d7Smaybee int
75544eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
75644eda4d7Smaybee     page_t *pp, dmu_tx_t *tx)
75744eda4d7Smaybee {
75844eda4d7Smaybee 	dmu_buf_t **dbp;
75944eda4d7Smaybee 	int numbufs, i;
76044eda4d7Smaybee 	int err;
76144eda4d7Smaybee 
76244eda4d7Smaybee 	if (size == 0)
76344eda4d7Smaybee 		return (0);
76444eda4d7Smaybee 
76544eda4d7Smaybee 	err = dmu_buf_hold_array(os, object, offset, size,
76644eda4d7Smaybee 	    FALSE, FTAG, &numbufs, &dbp);
76744eda4d7Smaybee 	if (err)
76844eda4d7Smaybee 		return (err);
76944eda4d7Smaybee 
77044eda4d7Smaybee 	for (i = 0; i < numbufs; i++) {
77144eda4d7Smaybee 		int tocpy, copied, thiscpy;
77244eda4d7Smaybee 		int bufoff;
77344eda4d7Smaybee 		dmu_buf_t *db = dbp[i];
77444eda4d7Smaybee 		caddr_t va;
77544eda4d7Smaybee 
77644eda4d7Smaybee 		ASSERT(size > 0);
77744eda4d7Smaybee 		ASSERT3U(db->db_size, >=, PAGESIZE);
77844eda4d7Smaybee 
77944eda4d7Smaybee 		bufoff = offset - db->db_offset;
78044eda4d7Smaybee 		tocpy = (int)MIN(db->db_size - bufoff, size);
78144eda4d7Smaybee 
78244eda4d7Smaybee 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
78344eda4d7Smaybee 
78444eda4d7Smaybee 		if (tocpy == db->db_size)
78544eda4d7Smaybee 			dmu_buf_will_fill(db, tx);
78644eda4d7Smaybee 		else
78744eda4d7Smaybee 			dmu_buf_will_dirty(db, tx);
78844eda4d7Smaybee 
78944eda4d7Smaybee 		for (copied = 0; copied < tocpy; copied += PAGESIZE) {
79044eda4d7Smaybee 			ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
79144eda4d7Smaybee 			thiscpy = MIN(PAGESIZE, tocpy - copied);
7920fab61baSJonathan W Adams 			va = zfs_map_page(pp, S_READ);
79344eda4d7Smaybee 			bcopy(va, (char *)db->db_data + bufoff, thiscpy);
7940fab61baSJonathan W Adams 			zfs_unmap_page(pp, va);
79544eda4d7Smaybee 			pp = pp->p_next;
79644eda4d7Smaybee 			bufoff += PAGESIZE;
79744eda4d7Smaybee 		}
79844eda4d7Smaybee 
79944eda4d7Smaybee 		if (tocpy == db->db_size)
80044eda4d7Smaybee 			dmu_buf_fill_done(db, tx);
80144eda4d7Smaybee 
80244eda4d7Smaybee 		offset += tocpy;
80344eda4d7Smaybee 		size -= tocpy;
80444eda4d7Smaybee 	}
80544eda4d7Smaybee 	dmu_buf_rele_array(dbp, numbufs, FTAG);
80644eda4d7Smaybee 	return (err);
80744eda4d7Smaybee }
808fa9e4066Sahrens #endif
809fa9e4066Sahrens 
8102fdbea25SAleksandr Guzovskiy /*
8112fdbea25SAleksandr Guzovskiy  * Allocate a loaned anonymous arc buffer.
8122fdbea25SAleksandr Guzovskiy  */
8132fdbea25SAleksandr Guzovskiy arc_buf_t *
8142fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size)
8152fdbea25SAleksandr Guzovskiy {
8162fdbea25SAleksandr Guzovskiy 	dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode;
8172fdbea25SAleksandr Guzovskiy 
8182fdbea25SAleksandr Guzovskiy 	return (arc_loan_buf(dn->dn_objset->os_spa, size));
8192fdbea25SAleksandr Guzovskiy }
8202fdbea25SAleksandr Guzovskiy 
8212fdbea25SAleksandr Guzovskiy /*
8222fdbea25SAleksandr Guzovskiy  * Free a loaned arc buffer.
8232fdbea25SAleksandr Guzovskiy  */
8242fdbea25SAleksandr Guzovskiy void
8252fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf)
8262fdbea25SAleksandr Guzovskiy {
8272fdbea25SAleksandr Guzovskiy 	arc_return_buf(buf, FTAG);
8282fdbea25SAleksandr Guzovskiy 	VERIFY(arc_buf_remove_ref(buf, FTAG) == 1);
8292fdbea25SAleksandr Guzovskiy }
8302fdbea25SAleksandr Guzovskiy 
8312fdbea25SAleksandr Guzovskiy /*
8322fdbea25SAleksandr Guzovskiy  * When possible directly assign passed loaned arc buffer to a dbuf.
8332fdbea25SAleksandr Guzovskiy  * If this is not possible copy the contents of passed arc buf via
8342fdbea25SAleksandr Guzovskiy  * dmu_write().
8352fdbea25SAleksandr Guzovskiy  */
8362fdbea25SAleksandr Guzovskiy void
8372fdbea25SAleksandr Guzovskiy dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
8382fdbea25SAleksandr Guzovskiy     dmu_tx_t *tx)
8392fdbea25SAleksandr Guzovskiy {
8402fdbea25SAleksandr Guzovskiy 	dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode;
8412fdbea25SAleksandr Guzovskiy 	dmu_buf_impl_t *db;
8422fdbea25SAleksandr Guzovskiy 	uint32_t blksz = (uint32_t)arc_buf_size(buf);
8432fdbea25SAleksandr Guzovskiy 	uint64_t blkid;
8442fdbea25SAleksandr Guzovskiy 
8452fdbea25SAleksandr Guzovskiy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
8462fdbea25SAleksandr Guzovskiy 	blkid = dbuf_whichblock(dn, offset);
8472fdbea25SAleksandr Guzovskiy 	VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
8482fdbea25SAleksandr Guzovskiy 	rw_exit(&dn->dn_struct_rwlock);
8492fdbea25SAleksandr Guzovskiy 
8502fdbea25SAleksandr Guzovskiy 	if (offset == db->db.db_offset && blksz == db->db.db_size) {
8512fdbea25SAleksandr Guzovskiy 		dbuf_assign_arcbuf(db, buf, tx);
8522fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
8532fdbea25SAleksandr Guzovskiy 	} else {
8542fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
855*503ad85cSMatthew Ahrens 		dmu_write(dn->dn_objset, dn->dn_object, offset, blksz,
8562fdbea25SAleksandr Guzovskiy 		    buf->b_data, tx);
8572fdbea25SAleksandr Guzovskiy 		dmu_return_arcbuf(buf);
8582fdbea25SAleksandr Guzovskiy 	}
8592fdbea25SAleksandr Guzovskiy }
8602fdbea25SAleksandr Guzovskiy 
861c5c6ffa0Smaybee typedef struct {
862c717a561Smaybee 	dbuf_dirty_record_t	*dr;
863c717a561Smaybee 	dmu_sync_cb_t		*done;
864c717a561Smaybee 	void			*arg;
865c717a561Smaybee } dmu_sync_arg_t;
866c5c6ffa0Smaybee 
867e14bb325SJeff Bonwick /* ARGSUSED */
868e14bb325SJeff Bonwick static void
869e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
870e14bb325SJeff Bonwick {
871e14bb325SJeff Bonwick 	blkptr_t *bp = zio->io_bp;
872e14bb325SJeff Bonwick 
873e14bb325SJeff Bonwick 	if (!BP_IS_HOLE(bp)) {
874e14bb325SJeff Bonwick 		dmu_sync_arg_t *in = varg;
875e14bb325SJeff Bonwick 		dbuf_dirty_record_t *dr = in->dr;
876e14bb325SJeff Bonwick 		dmu_buf_impl_t *db = dr->dr_dbuf;
877e14bb325SJeff Bonwick 		ASSERT(BP_GET_TYPE(bp) == db->db_dnode->dn_type);
878e14bb325SJeff Bonwick 		ASSERT(BP_GET_LEVEL(bp) == 0);
879e14bb325SJeff Bonwick 		bp->blk_fill = 1;
880e14bb325SJeff Bonwick 	}
881e14bb325SJeff Bonwick }
882e14bb325SJeff Bonwick 
883c5c6ffa0Smaybee /* ARGSUSED */
884c5c6ffa0Smaybee static void
885c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
886c5c6ffa0Smaybee {
887c717a561Smaybee 	dmu_sync_arg_t *in = varg;
888c717a561Smaybee 	dbuf_dirty_record_t *dr = in->dr;
889c717a561Smaybee 	dmu_buf_impl_t *db = dr->dr_dbuf;
890c5c6ffa0Smaybee 	dmu_sync_cb_t *done = in->done;
891c5c6ffa0Smaybee 
892c5c6ffa0Smaybee 	mutex_enter(&db->db_mtx);
893c717a561Smaybee 	ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
894c717a561Smaybee 	dr->dt.dl.dr_overridden_by = *zio->io_bp; /* structure assignment */
895c717a561Smaybee 	dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
896c5c6ffa0Smaybee 	cv_broadcast(&db->db_changed);
897c5c6ffa0Smaybee 	mutex_exit(&db->db_mtx);
898c5c6ffa0Smaybee 
899c5c6ffa0Smaybee 	if (done)
900c717a561Smaybee 		done(&(db->db), in->arg);
901c717a561Smaybee 
902c717a561Smaybee 	kmem_free(in, sizeof (dmu_sync_arg_t));
903c5c6ffa0Smaybee }
904c5c6ffa0Smaybee 
905fa9e4066Sahrens /*
906c5c6ffa0Smaybee  * Intent log support: sync the block associated with db to disk.
907c5c6ffa0Smaybee  * N.B. and XXX: the caller is responsible for making sure that the
908c5c6ffa0Smaybee  * data isn't changing while dmu_sync() is writing it.
909fa9e4066Sahrens  *
910fa9e4066Sahrens  * Return values:
911fa9e4066Sahrens  *
912c5c6ffa0Smaybee  *	EEXIST: this txg has already been synced, so there's nothing to to.
913fa9e4066Sahrens  *		The caller should not log the write.
914fa9e4066Sahrens  *
915fa9e4066Sahrens  *	ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
916fa9e4066Sahrens  *		The caller should not log the write.
917fa9e4066Sahrens  *
918c5c6ffa0Smaybee  *	EALREADY: this block is already in the process of being synced.
919c5c6ffa0Smaybee  *		The caller should track its progress (somehow).
920fa9e4066Sahrens  *
921c5c6ffa0Smaybee  *	EINPROGRESS: the IO has been initiated.
922c5c6ffa0Smaybee  *		The caller should log this blkptr in the callback.
923fa9e4066Sahrens  *
924c5c6ffa0Smaybee  *	0: completed.  Sets *bp to the blkptr just written.
925c5c6ffa0Smaybee  *		The caller should log this blkptr immediately.
926fa9e4066Sahrens  */
927fa9e4066Sahrens int
928c5c6ffa0Smaybee dmu_sync(zio_t *pio, dmu_buf_t *db_fake,
929c5c6ffa0Smaybee     blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg)
930fa9e4066Sahrens {
931c5c6ffa0Smaybee 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
932*503ad85cSMatthew Ahrens 	objset_t *os = db->db_objset;
933c5c6ffa0Smaybee 	dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool;
934fa9e4066Sahrens 	tx_state_t *tx = &dp->dp_tx;
935c717a561Smaybee 	dbuf_dirty_record_t *dr;
936c717a561Smaybee 	dmu_sync_arg_t *in;
937ea8dc4b6Seschrock 	zbookmark_t zb;
938088f3894Sahrens 	writeprops_t wp = { 0 };
939c717a561Smaybee 	zio_t *zio;
940c5c6ffa0Smaybee 	int err;
941fa9e4066Sahrens 
942fa9e4066Sahrens 	ASSERT(BP_IS_HOLE(bp));
943fa9e4066Sahrens 	ASSERT(txg != 0);
944fa9e4066Sahrens 
945fa9e4066Sahrens 	dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n",
946fa9e4066Sahrens 	    txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg);
947fa9e4066Sahrens 
948ea8dc4b6Seschrock 	/*
949c5c6ffa0Smaybee 	 * XXX - would be nice if we could do this without suspending...
950ea8dc4b6Seschrock 	 */
951c5c6ffa0Smaybee 	txg_suspend(dp);
952ea8dc4b6Seschrock 
953fa9e4066Sahrens 	/*
954fa9e4066Sahrens 	 * If this txg already synced, there's nothing to do.
955fa9e4066Sahrens 	 */
956fa9e4066Sahrens 	if (txg <= tx->tx_synced_txg) {
957c5c6ffa0Smaybee 		txg_resume(dp);
958fa9e4066Sahrens 		/*
959fa9e4066Sahrens 		 * If we're running ziltest, we need the blkptr regardless.
960fa9e4066Sahrens 		 */
961fa9e4066Sahrens 		if (txg > spa_freeze_txg(dp->dp_spa)) {
962fa9e4066Sahrens 			/* if db_blkptr == NULL, this was an empty write */
963fa9e4066Sahrens 			if (db->db_blkptr)
964fa9e4066Sahrens 				*bp = *db->db_blkptr; /* structure assignment */
965fa9e4066Sahrens 			return (0);
966fa9e4066Sahrens 		}
967c5c6ffa0Smaybee 		return (EEXIST);
968fa9e4066Sahrens 	}
969fa9e4066Sahrens 
970fa9e4066Sahrens 	mutex_enter(&db->db_mtx);
971fa9e4066Sahrens 
972c5c6ffa0Smaybee 	if (txg == tx->tx_syncing_txg) {
973c5c6ffa0Smaybee 		while (db->db_data_pending) {
974c5c6ffa0Smaybee 			/*
975c5c6ffa0Smaybee 			 * IO is in-progress.  Wait for it to finish.
976c5c6ffa0Smaybee 			 * XXX - would be nice to be able to somehow "attach"
977c5c6ffa0Smaybee 			 * this zio to the parent zio passed in.
978c5c6ffa0Smaybee 			 */
979c5c6ffa0Smaybee 			cv_wait(&db->db_changed, &db->db_mtx);
98013506d1eSmaybee 			if (!db->db_data_pending &&
98113506d1eSmaybee 			    db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) {
98213506d1eSmaybee 				/*
98313506d1eSmaybee 				 * IO was compressed away
98413506d1eSmaybee 				 */
98513506d1eSmaybee 				*bp = *db->db_blkptr; /* structure assignment */
98613506d1eSmaybee 				mutex_exit(&db->db_mtx);
98713506d1eSmaybee 				txg_resume(dp);
98813506d1eSmaybee 				return (0);
98913506d1eSmaybee 			}
990c5c6ffa0Smaybee 			ASSERT(db->db_data_pending ||
991c5c6ffa0Smaybee 			    (db->db_blkptr && db->db_blkptr->blk_birth == txg));
992c5c6ffa0Smaybee 		}
993fa9e4066Sahrens 
994c5c6ffa0Smaybee 		if (db->db_blkptr && db->db_blkptr->blk_birth == txg) {
995c5c6ffa0Smaybee 			/*
996c5c6ffa0Smaybee 			 * IO is already completed.
997c5c6ffa0Smaybee 			 */
998c5c6ffa0Smaybee 			*bp = *db->db_blkptr; /* structure assignment */
999fa9e4066Sahrens 			mutex_exit(&db->db_mtx);
1000c5c6ffa0Smaybee 			txg_resume(dp);
1001c5c6ffa0Smaybee 			return (0);
1002fa9e4066Sahrens 		}
1003fa9e4066Sahrens 	}
1004fa9e4066Sahrens 
1005c717a561Smaybee 	dr = db->db_last_dirty;
1006c717a561Smaybee 	while (dr && dr->dr_txg > txg)
1007c717a561Smaybee 		dr = dr->dr_next;
1008c717a561Smaybee 	if (dr == NULL || dr->dr_txg < txg) {
1009c5c6ffa0Smaybee 		/*
1010c5c6ffa0Smaybee 		 * This dbuf isn't dirty, must have been free_range'd.
1011c5c6ffa0Smaybee 		 * There's no need to log writes to freed blocks, so we're done.
1012c5c6ffa0Smaybee 		 */
1013c5c6ffa0Smaybee 		mutex_exit(&db->db_mtx);
1014c5c6ffa0Smaybee 		txg_resume(dp);
1015c5c6ffa0Smaybee 		return (ENOENT);
1016c5c6ffa0Smaybee 	}
1017c5c6ffa0Smaybee 
1018c717a561Smaybee 	ASSERT(dr->dr_txg == txg);
1019c717a561Smaybee 	if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
1020c717a561Smaybee 		/*
1021c717a561Smaybee 		 * We have already issued a sync write for this buffer.
1022c717a561Smaybee 		 */
1023c717a561Smaybee 		mutex_exit(&db->db_mtx);
1024c717a561Smaybee 		txg_resume(dp);
1025c717a561Smaybee 		return (EALREADY);
1026c717a561Smaybee 	} else if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1027c717a561Smaybee 		/*
1028c717a561Smaybee 		 * This buffer has already been synced.  It could not
1029c717a561Smaybee 		 * have been dirtied since, or we would have cleared the state.
1030c717a561Smaybee 		 */
1031c717a561Smaybee 		*bp = dr->dt.dl.dr_overridden_by; /* structure assignment */
1032c717a561Smaybee 		mutex_exit(&db->db_mtx);
1033c717a561Smaybee 		txg_resume(dp);
1034c717a561Smaybee 		return (0);
1035c717a561Smaybee 	}
1036c717a561Smaybee 
1037c717a561Smaybee 	dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1038c717a561Smaybee 	in = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1039c717a561Smaybee 	in->dr = dr;
1040c5c6ffa0Smaybee 	in->done = done;
1041c5c6ffa0Smaybee 	in->arg = arg;
1042fa9e4066Sahrens 	mutex_exit(&db->db_mtx);
1043c5c6ffa0Smaybee 	txg_resume(dp);
1044fa9e4066Sahrens 
1045c5c6ffa0Smaybee 	zb.zb_objset = os->os_dsl_dataset->ds_object;
1046ea8dc4b6Seschrock 	zb.zb_object = db->db.db_object;
1047ea8dc4b6Seschrock 	zb.zb_level = db->db_level;
1048ea8dc4b6Seschrock 	zb.zb_blkid = db->db_blkid;
1049e14bb325SJeff Bonwick 
1050088f3894Sahrens 	wp.wp_type = db->db_dnode->dn_type;
1051088f3894Sahrens 	wp.wp_level = db->db_level;
1052e14bb325SJeff Bonwick 	wp.wp_copies = os->os_copies;
1053088f3894Sahrens 	wp.wp_dnchecksum = db->db_dnode->dn_checksum;
1054088f3894Sahrens 	wp.wp_oschecksum = os->os_checksum;
1055088f3894Sahrens 	wp.wp_dncompress = db->db_dnode->dn_compress;
1056088f3894Sahrens 	wp.wp_oscompress = os->os_compress;
1057e14bb325SJeff Bonwick 
1058e14bb325SJeff Bonwick 	ASSERT(BP_IS_HOLE(bp));
1059e14bb325SJeff Bonwick 
1060e14bb325SJeff Bonwick 	zio = arc_write(pio, os->os_spa, &wp, DBUF_IS_L2CACHEABLE(db),
1061e14bb325SJeff Bonwick 	    txg, bp, dr->dt.dl.dr_data, dmu_sync_ready, dmu_sync_done, in,
1062e14bb325SJeff Bonwick 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1063c717a561Smaybee 	if (pio) {
1064c717a561Smaybee 		zio_nowait(zio);
1065c717a561Smaybee 		err = EINPROGRESS;
1066c717a561Smaybee 	} else {
1067c717a561Smaybee 		err = zio_wait(zio);
1068c717a561Smaybee 		ASSERT(err == 0);
1069c717a561Smaybee 	}
1070c717a561Smaybee 	return (err);
1071fa9e4066Sahrens }
1072fa9e4066Sahrens 
1073fa9e4066Sahrens int
1074fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1075fa9e4066Sahrens 	dmu_tx_t *tx)
1076fa9e4066Sahrens {
1077ea8dc4b6Seschrock 	dnode_t *dn;
1078ea8dc4b6Seschrock 	int err;
1079ea8dc4b6Seschrock 
1080*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
1081ea8dc4b6Seschrock 	if (err)
1082ea8dc4b6Seschrock 		return (err);
1083ea8dc4b6Seschrock 	err = dnode_set_blksz(dn, size, ibs, tx);
1084fa9e4066Sahrens 	dnode_rele(dn, FTAG);
1085fa9e4066Sahrens 	return (err);
1086fa9e4066Sahrens }
1087fa9e4066Sahrens 
1088fa9e4066Sahrens void
1089fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1090fa9e4066Sahrens 	dmu_tx_t *tx)
1091fa9e4066Sahrens {
1092ea8dc4b6Seschrock 	dnode_t *dn;
1093ea8dc4b6Seschrock 
1094ea8dc4b6Seschrock 	/* XXX assumes dnode_hold will not get an i/o error */
1095*503ad85cSMatthew Ahrens 	(void) dnode_hold(os, object, FTAG, &dn);
1096fa9e4066Sahrens 	ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS);
1097fa9e4066Sahrens 	dn->dn_checksum = checksum;
1098fa9e4066Sahrens 	dnode_setdirty(dn, tx);
1099fa9e4066Sahrens 	dnode_rele(dn, FTAG);
1100fa9e4066Sahrens }
1101fa9e4066Sahrens 
1102fa9e4066Sahrens void
1103fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1104fa9e4066Sahrens 	dmu_tx_t *tx)
1105fa9e4066Sahrens {
1106ea8dc4b6Seschrock 	dnode_t *dn;
1107ea8dc4b6Seschrock 
1108ea8dc4b6Seschrock 	/* XXX assumes dnode_hold will not get an i/o error */
1109*503ad85cSMatthew Ahrens 	(void) dnode_hold(os, object, FTAG, &dn);
1110fa9e4066Sahrens 	ASSERT(compress < ZIO_COMPRESS_FUNCTIONS);
1111fa9e4066Sahrens 	dn->dn_compress = compress;
1112fa9e4066Sahrens 	dnode_setdirty(dn, tx);
1113fa9e4066Sahrens 	dnode_rele(dn, FTAG);
1114fa9e4066Sahrens }
1115fa9e4066Sahrens 
1116fa9e4066Sahrens int
1117fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
1118fa9e4066Sahrens {
1119fa9e4066Sahrens 	dnode_t *dn;
1120fa9e4066Sahrens 	int i, err;
1121fa9e4066Sahrens 
1122*503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
1123ea8dc4b6Seschrock 	if (err)
1124ea8dc4b6Seschrock 		return (err);
1125fa9e4066Sahrens 	/*
1126fa9e4066Sahrens 	 * Sync any current changes before
1127fa9e4066Sahrens 	 * we go trundling through the block pointers.
1128fa9e4066Sahrens 	 */
1129fa9e4066Sahrens 	for (i = 0; i < TXG_SIZE; i++) {
1130c543ec06Sahrens 		if (list_link_active(&dn->dn_dirty_link[i]))
1131fa9e4066Sahrens 			break;
1132fa9e4066Sahrens 	}
1133fa9e4066Sahrens 	if (i != TXG_SIZE) {
1134fa9e4066Sahrens 		dnode_rele(dn, FTAG);
1135fa9e4066Sahrens 		txg_wait_synced(dmu_objset_pool(os), 0);
1136*503ad85cSMatthew Ahrens 		err = dnode_hold(os, object, FTAG, &dn);
1137ea8dc4b6Seschrock 		if (err)
1138ea8dc4b6Seschrock 			return (err);
1139fa9e4066Sahrens 	}
1140fa9e4066Sahrens 
1141cdb0ab79Smaybee 	err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
1142fa9e4066Sahrens 	dnode_rele(dn, FTAG);
1143fa9e4066Sahrens 
1144fa9e4066Sahrens 	return (err);
1145fa9e4066Sahrens }
1146fa9e4066Sahrens 
1147fa9e4066Sahrens void
1148fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
1149fa9e4066Sahrens {
1150fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1151fa9e4066Sahrens 	mutex_enter(&dn->dn_mtx);
1152fa9e4066Sahrens 
1153fa9e4066Sahrens 	doi->doi_data_block_size = dn->dn_datablksz;
1154fa9e4066Sahrens 	doi->doi_metadata_block_size = dn->dn_indblkshift ?
1155fa9e4066Sahrens 	    1ULL << dn->dn_indblkshift : 0;
1156fa9e4066Sahrens 	doi->doi_indirection = dn->dn_nlevels;
1157fa9e4066Sahrens 	doi->doi_checksum = dn->dn_checksum;
1158fa9e4066Sahrens 	doi->doi_compress = dn->dn_compress;
115999653d4eSeschrock 	doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) +
116099653d4eSeschrock 	    SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT;
1161fa9e4066Sahrens 	doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid;
1162fa9e4066Sahrens 	doi->doi_type = dn->dn_type;
1163fa9e4066Sahrens 	doi->doi_bonus_size = dn->dn_bonuslen;
1164fa9e4066Sahrens 	doi->doi_bonus_type = dn->dn_bonustype;
1165fa9e4066Sahrens 
1166fa9e4066Sahrens 	mutex_exit(&dn->dn_mtx);
1167fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
1168fa9e4066Sahrens }
1169fa9e4066Sahrens 
1170fa9e4066Sahrens /*
1171fa9e4066Sahrens  * Get information on a DMU object.
1172fa9e4066Sahrens  * If doi is NULL, just indicates whether the object exists.
1173fa9e4066Sahrens  */
1174fa9e4066Sahrens int
1175fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
1176fa9e4066Sahrens {
1177ea8dc4b6Seschrock 	dnode_t *dn;
1178*503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
1179fa9e4066Sahrens 
1180ea8dc4b6Seschrock 	if (err)
1181ea8dc4b6Seschrock 		return (err);
1182fa9e4066Sahrens 
1183fa9e4066Sahrens 	if (doi != NULL)
1184fa9e4066Sahrens 		dmu_object_info_from_dnode(dn, doi);
1185fa9e4066Sahrens 
1186fa9e4066Sahrens 	dnode_rele(dn, FTAG);
1187fa9e4066Sahrens 	return (0);
1188fa9e4066Sahrens }
1189fa9e4066Sahrens 
1190fa9e4066Sahrens /*
1191fa9e4066Sahrens  * As above, but faster; can be used when you have a held dbuf in hand.
1192fa9e4066Sahrens  */
1193fa9e4066Sahrens void
1194fa9e4066Sahrens dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi)
1195fa9e4066Sahrens {
1196fa9e4066Sahrens 	dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi);
1197fa9e4066Sahrens }
1198fa9e4066Sahrens 
1199fa9e4066Sahrens /*
1200fa9e4066Sahrens  * Faster still when you only care about the size.
1201fa9e4066Sahrens  * This is specifically optimized for zfs_getattr().
1202fa9e4066Sahrens  */
1203fa9e4066Sahrens void
1204fa9e4066Sahrens dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512)
1205fa9e4066Sahrens {
1206fa9e4066Sahrens 	dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
1207fa9e4066Sahrens 
1208fa9e4066Sahrens 	*blksize = dn->dn_datablksz;
120999653d4eSeschrock 	/* add 1 for dnode space */
121099653d4eSeschrock 	*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
121199653d4eSeschrock 	    SPA_MINBLOCKSHIFT) + 1;
1212fa9e4066Sahrens }
1213fa9e4066Sahrens 
1214fa9e4066Sahrens void
1215fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size)
1216fa9e4066Sahrens {
1217fa9e4066Sahrens 	uint64_t *buf = vbuf;
1218fa9e4066Sahrens 	size_t count = size >> 3;
1219fa9e4066Sahrens 	int i;
1220fa9e4066Sahrens 
1221fa9e4066Sahrens 	ASSERT((size & 7) == 0);
1222fa9e4066Sahrens 
1223fa9e4066Sahrens 	for (i = 0; i < count; i++)
1224fa9e4066Sahrens 		buf[i] = BSWAP_64(buf[i]);
1225fa9e4066Sahrens }
1226fa9e4066Sahrens 
1227fa9e4066Sahrens void
1228fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size)
1229fa9e4066Sahrens {
1230fa9e4066Sahrens 	uint32_t *buf = vbuf;
1231fa9e4066Sahrens 	size_t count = size >> 2;
1232fa9e4066Sahrens 	int i;
1233fa9e4066Sahrens 
1234fa9e4066Sahrens 	ASSERT((size & 3) == 0);
1235fa9e4066Sahrens 
1236fa9e4066Sahrens 	for (i = 0; i < count; i++)
1237fa9e4066Sahrens 		buf[i] = BSWAP_32(buf[i]);
1238fa9e4066Sahrens }
1239fa9e4066Sahrens 
1240fa9e4066Sahrens void
1241fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size)
1242fa9e4066Sahrens {
1243fa9e4066Sahrens 	uint16_t *buf = vbuf;
1244fa9e4066Sahrens 	size_t count = size >> 1;
1245fa9e4066Sahrens 	int i;
1246fa9e4066Sahrens 
1247fa9e4066Sahrens 	ASSERT((size & 1) == 0);
1248fa9e4066Sahrens 
1249fa9e4066Sahrens 	for (i = 0; i < count; i++)
1250fa9e4066Sahrens 		buf[i] = BSWAP_16(buf[i]);
1251fa9e4066Sahrens }
1252fa9e4066Sahrens 
1253fa9e4066Sahrens /* ARGSUSED */
1254fa9e4066Sahrens void
1255fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size)
1256fa9e4066Sahrens {
1257fa9e4066Sahrens }
1258fa9e4066Sahrens 
1259fa9e4066Sahrens void
1260fa9e4066Sahrens dmu_init(void)
1261fa9e4066Sahrens {
1262fa9e4066Sahrens 	dbuf_init();
1263fa9e4066Sahrens 	dnode_init();
1264fa9e4066Sahrens 	arc_init();
1265fa94a07fSbrendan 	l2arc_init();
1266fa9e4066Sahrens }
1267fa9e4066Sahrens 
1268fa9e4066Sahrens void
1269fa9e4066Sahrens dmu_fini(void)
1270fa9e4066Sahrens {
1271fa9e4066Sahrens 	arc_fini();
1272fa9e4066Sahrens 	dnode_fini();
1273fa9e4066Sahrens 	dbuf_fini();
1274fa94a07fSbrendan 	l2arc_fini();
1275fa9e4066Sahrens }
1276