xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu.c (revision 52abb70e)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
2294d1a210STim Haley  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23857c96d2SIgor Kozhukhov  */
24857c96d2SIgor Kozhukhov /*
25857c96d2SIgor Kozhukhov  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26857c96d2SIgor Kozhukhov  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27857c96d2SIgor Kozhukhov  * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
28adaec86aSMatthew Ahrens  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
29857c96d2SIgor Kozhukhov  * Copyright (c) 2018 DilOS
30fa9e4066Sahrens  */
31aad02571SSaso Kiselkov 
32fa9e4066Sahrens #include <sys/dmu.h>
33fa9e4066Sahrens #include <sys/dmu_impl.h>
34fa9e4066Sahrens #include <sys/dmu_tx.h>
35fa9e4066Sahrens #include <sys/dbuf.h>
36fa9e4066Sahrens #include <sys/dnode.h>
37fa9e4066Sahrens #include <sys/zfs_context.h>
38fa9e4066Sahrens #include <sys/dmu_objset.h>
39fa9e4066Sahrens #include <sys/dmu_traverse.h>
40fa9e4066Sahrens #include <sys/dsl_dataset.h>
41fa9e4066Sahrens #include <sys/dsl_dir.h>
42fa9e4066Sahrens #include <sys/dsl_pool.h>
431d452cf5Sahrens #include <sys/dsl_synctask.h>
44a2eea2e1Sahrens #include <sys/dsl_prop.h>
45fa9e4066Sahrens #include <sys/dmu_zfetch.h>
46fa9e4066Sahrens #include <sys/zfs_ioctl.h>
47fa9e4066Sahrens #include <sys/zap.h>
48ea8dc4b6Seschrock #include <sys/zio_checksum.h>
4980901aeaSGeorge Wilson #include <sys/zio_compress.h>
500a586ceaSMark Shellenbaum #include <sys/sa.h>
51b8289d24SDaniil Lunev #include <sys/zfeature.h>
52770499e1SDan Kimmel #include <sys/abd.h>
5344eda4d7Smaybee #ifdef _KERNEL
5444eda4d7Smaybee #include <sys/vmsystm.h>
550fab61baSJonathan W Adams #include <sys/zfs_znode.h>
5644eda4d7Smaybee #endif
57fa9e4066Sahrens 
58857c96d2SIgor Kozhukhov static xuio_stats_t xuio_stats = {
59857c96d2SIgor Kozhukhov 	{ "onloan_read_buf",	KSTAT_DATA_UINT64 },
60857c96d2SIgor Kozhukhov 	{ "onloan_write_buf",	KSTAT_DATA_UINT64 },
61857c96d2SIgor Kozhukhov 	{ "read_buf_copied",	KSTAT_DATA_UINT64 },
62857c96d2SIgor Kozhukhov 	{ "read_buf_nocopy",	KSTAT_DATA_UINT64 },
63857c96d2SIgor Kozhukhov 	{ "write_buf_copied",	KSTAT_DATA_UINT64 },
64857c96d2SIgor Kozhukhov 	{ "write_buf_nocopy",	KSTAT_DATA_UINT64 }
65857c96d2SIgor Kozhukhov };
66857c96d2SIgor Kozhukhov 
67857c96d2SIgor Kozhukhov #define	XUIOSTAT_INCR(stat, val)	\
68857c96d2SIgor Kozhukhov 	atomic_add_64(&xuio_stats.stat.value.ui64, (val))
69857c96d2SIgor Kozhukhov #define	XUIOSTAT_BUMP(stat)	XUIOSTAT_INCR(stat, 1)
70857c96d2SIgor Kozhukhov 
7180901aeaSGeorge Wilson /*
7280901aeaSGeorge Wilson  * Enable/disable nopwrite feature.
7380901aeaSGeorge Wilson  */
7480901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1;
7580901aeaSGeorge Wilson 
76ff5177eeSAlek Pinchuk /*
77ff5177eeSAlek Pinchuk  * Tunable to control percentage of dirtied blocks from frees in one TXG.
78ff5177eeSAlek Pinchuk  * After this threshold is crossed, additional dirty blocks from frees
79ff5177eeSAlek Pinchuk  * wait until the next TXG.
80ff5177eeSAlek Pinchuk  * A value of zero will disable this throttle.
81ff5177eeSAlek Pinchuk  */
82ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30;
83ff5177eeSAlek Pinchuk 
845cabbc6bSPrashanth Sreenivasa /*
855cabbc6bSPrashanth Sreenivasa  * This can be used for testing, to ensure that certain actions happen
865cabbc6bSPrashanth Sreenivasa  * while in the middle of a remap (which might otherwise complete too
875cabbc6bSPrashanth Sreenivasa  * quickly).
885cabbc6bSPrashanth Sreenivasa  */
895cabbc6bSPrashanth Sreenivasa int zfs_object_remap_one_indirect_delay_ticks = 0;
905cabbc6bSPrashanth Sreenivasa 
91*52abb70eSMatthew Ahrens /*
92*52abb70eSMatthew Ahrens  * Limit the amount we can prefetch with one call to this amount.  This
93*52abb70eSMatthew Ahrens  * helps to limit the amount of memory that can be used by prefetching.
94*52abb70eSMatthew Ahrens  * Larger objects should be prefetched a bit at a time.
95*52abb70eSMatthew Ahrens  */
96*52abb70eSMatthew Ahrens uint64_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
97*52abb70eSMatthew Ahrens 
98fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
99adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "unallocated"		},
100adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "object directory"		},
101adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "object array"		},
102adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "packed nvlist"		},
103adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "packed nvlist size"		},
104adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj"			},
105adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj header"		},
106adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map header"	},
107adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA space map"		},
108adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "ZIL intent log"		},
109adb52d92SMatthew Ahrens 	{ DMU_BSWAP_DNODE,  TRUE,  FALSE,  "DMU dnode"			},
110adb52d92SMatthew Ahrens 	{ DMU_BSWAP_OBJSET, TRUE,  TRUE,   "DMU objset"			},
111adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL directory"		},
112adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL directory child map"	},
113adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset snap map"	},
114adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL props"			},
115adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL dataset"		},
116adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZNODE,  TRUE,  FALSE,  "ZFS znode"			},
117adb52d92SMatthew Ahrens 	{ DMU_BSWAP_OLDACL, TRUE,  FALSE,  "ZFS V0 ACL"			},
118adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "ZFS plain file"		},
119adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS directory"		},
120adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS master node"		},
121adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS delete queue"		},
122adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "zvol object"		},
123adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "zvol prop"			},
124adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "other uint8[]"		},
125adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, FALSE, FALSE,  "other uint64[]"		},
126adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "other ZAP"			},
127adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "persistent error log"	},
128adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "SPA history"		},
129adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "SPA history offsets"	},
130adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "Pool properties"		},
131adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL permissions"		},
132adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ACL,    TRUE,  FALSE,  "ZFS ACL"			},
133adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "ZFS SYSACL"			},
134adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "FUID table"			},
135adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "FUID table size"		},
136adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dataset next clones"	},
137adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan work queue"		},
138adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group used"	},
139adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "ZFS user/group quota"	},
140adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "snapshot refcount tags"	},
141adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT ZAP algorithm"		},
142adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "DDT statistics"		},
143adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  TRUE,  FALSE,  "System attributes"		},
144adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA master node"		},
145adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr registration"	},
146adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "SA attr layouts"		},
147adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  FALSE,  "scan translations"		},
148adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT8,  FALSE, FALSE,  "deduplicated block"		},
149adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL deadlist map"		},
150adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  TRUE,   "DSL deadlist map hdr"	},
151adb52d92SMatthew Ahrens 	{ DMU_BSWAP_ZAP,    TRUE,  TRUE,   "DSL dir clones"		},
152adb52d92SMatthew Ahrens 	{ DMU_BSWAP_UINT64, TRUE,  FALSE,  "bpobj subobj"		}
153ad135b5dSChristopher Siden };
154ad135b5dSChristopher Siden 
155ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
156ad135b5dSChristopher Siden 	{	byteswap_uint8_array,	"uint8"		},
157ad135b5dSChristopher Siden 	{	byteswap_uint16_array,	"uint16"	},
158ad135b5dSChristopher Siden 	{	byteswap_uint32_array,	"uint32"	},
159ad135b5dSChristopher Siden 	{	byteswap_uint64_array,	"uint64"	},
160ad135b5dSChristopher Siden 	{	zap_byteswap,		"zap"		},
161ad135b5dSChristopher Siden 	{	dnode_buf_byteswap,	"dnode"		},
162ad135b5dSChristopher Siden 	{	dmu_objset_byteswap,	"objset"	},
163ad135b5dSChristopher Siden 	{	zfs_znode_byteswap,	"znode"		},
164ad135b5dSChristopher Siden 	{	zfs_oldacl_byteswap,	"oldacl"	},
165ad135b5dSChristopher Siden 	{	zfs_acl_byteswap,	"acl"		}
1663f9d6ad7SLin Ling };
167fa9e4066Sahrens 
16879d72832SMatthew Ahrens int
16979d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
17079d72832SMatthew Ahrens     void *tag, dmu_buf_t **dbp)
17179d72832SMatthew Ahrens {
17279d72832SMatthew Ahrens 	uint64_t blkid;
17379d72832SMatthew Ahrens 	dmu_buf_impl_t *db;
17479d72832SMatthew Ahrens 
17579d72832SMatthew Ahrens 	blkid = dbuf_whichblock(dn, 0, offset);
17679d72832SMatthew Ahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
17779d72832SMatthew Ahrens 	db = dbuf_hold(dn, blkid, tag);
17879d72832SMatthew Ahrens 	rw_exit(&dn->dn_struct_rwlock);
17979d72832SMatthew Ahrens 
18079d72832SMatthew Ahrens 	if (db == NULL) {
18179d72832SMatthew Ahrens 		*dbp = NULL;
18279d72832SMatthew Ahrens 		return (SET_ERROR(EIO));
18379d72832SMatthew Ahrens 	}
18479d72832SMatthew Ahrens 
18579d72832SMatthew Ahrens 	*dbp = &db->db;
18679d72832SMatthew Ahrens 	return (0);
18779d72832SMatthew Ahrens }
188fa9e4066Sahrens int
1895d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
1905d7b4d43SMatthew Ahrens     void *tag, dmu_buf_t **dbp)
191fa9e4066Sahrens {
192fa9e4066Sahrens 	dnode_t *dn;
193fa9e4066Sahrens 	uint64_t blkid;
194fa9e4066Sahrens 	dmu_buf_impl_t *db;
195ea8dc4b6Seschrock 	int err;
196fa9e4066Sahrens 
197503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
198ea8dc4b6Seschrock 	if (err)
199ea8dc4b6Seschrock 		return (err);
200a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
201fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
202ea8dc4b6Seschrock 	db = dbuf_hold(dn, blkid, tag);
203fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
2045d7b4d43SMatthew Ahrens 	dnode_rele(dn, FTAG);
2055d7b4d43SMatthew Ahrens 
206ea8dc4b6Seschrock 	if (db == NULL) {
2075d7b4d43SMatthew Ahrens 		*dbp = NULL;
2085d7b4d43SMatthew Ahrens 		return (SET_ERROR(EIO));
2095d7b4d43SMatthew Ahrens 	}
2105d7b4d43SMatthew Ahrens 
2115d7b4d43SMatthew Ahrens 	*dbp = &db->db;
2125d7b4d43SMatthew Ahrens 	return (err);
2135d7b4d43SMatthew Ahrens }
2145d7b4d43SMatthew Ahrens 
21579d72832SMatthew Ahrens int
21679d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
21779d72832SMatthew Ahrens     void *tag, dmu_buf_t **dbp, int flags)
21879d72832SMatthew Ahrens {
21979d72832SMatthew Ahrens 	int err;
22079d72832SMatthew Ahrens 	int db_flags = DB_RF_CANFAIL;
22179d72832SMatthew Ahrens 
22279d72832SMatthew Ahrens 	if (flags & DMU_READ_NO_PREFETCH)
22379d72832SMatthew Ahrens 		db_flags |= DB_RF_NOPREFETCH;
22479d72832SMatthew Ahrens 
22579d72832SMatthew Ahrens 	err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
22679d72832SMatthew Ahrens 	if (err == 0) {
22779d72832SMatthew Ahrens 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
22879d72832SMatthew Ahrens 		err = dbuf_read(db, NULL, db_flags);
22979d72832SMatthew Ahrens 		if (err != 0) {
23079d72832SMatthew Ahrens 			dbuf_rele(db, tag);
23179d72832SMatthew Ahrens 			*dbp = NULL;
23279d72832SMatthew Ahrens 		}
23379d72832SMatthew Ahrens 	}
23479d72832SMatthew Ahrens 
23579d72832SMatthew Ahrens 	return (err);
23679d72832SMatthew Ahrens }
23779d72832SMatthew Ahrens 
2385d7b4d43SMatthew Ahrens int
2395d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
2405d7b4d43SMatthew Ahrens     void *tag, dmu_buf_t **dbp, int flags)
2415d7b4d43SMatthew Ahrens {
2425d7b4d43SMatthew Ahrens 	int err;
2435d7b4d43SMatthew Ahrens 	int db_flags = DB_RF_CANFAIL;
2445d7b4d43SMatthew Ahrens 
2455d7b4d43SMatthew Ahrens 	if (flags & DMU_READ_NO_PREFETCH)
2465d7b4d43SMatthew Ahrens 		db_flags |= DB_RF_NOPREFETCH;
2475d7b4d43SMatthew Ahrens 
2485d7b4d43SMatthew Ahrens 	err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
2495d7b4d43SMatthew Ahrens 	if (err == 0) {
2505d7b4d43SMatthew Ahrens 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
25147cb52daSJeff Bonwick 		err = dbuf_read(db, NULL, db_flags);
2525d7b4d43SMatthew Ahrens 		if (err != 0) {
253ea8dc4b6Seschrock 			dbuf_rele(db, tag);
2545d7b4d43SMatthew Ahrens 			*dbp = NULL;
255ea8dc4b6Seschrock 		}
256ea8dc4b6Seschrock 	}
257fa9e4066Sahrens 
258ea8dc4b6Seschrock 	return (err);
259fa9e4066Sahrens }
260fa9e4066Sahrens 
261fa9e4066Sahrens int
262fa9e4066Sahrens dmu_bonus_max(void)
263fa9e4066Sahrens {
26454811da5SToomas Soome 	return (DN_OLD_MAX_BONUSLEN);
265fa9e4066Sahrens }
266fa9e4066Sahrens 
2671934e92fSmaybee int
268744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
2691934e92fSmaybee {
270744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
271744947dcSTom Erickson 	dnode_t *dn;
272744947dcSTom Erickson 	int error;
2731934e92fSmaybee 
274744947dcSTom Erickson 	DB_DNODE_ENTER(db);
275744947dcSTom Erickson 	dn = DB_DNODE(db);
276744947dcSTom Erickson 
277744947dcSTom Erickson 	if (dn->dn_bonus != db) {
278be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
279744947dcSTom Erickson 	} else if (newsize < 0 || newsize > db_fake->db_size) {
280be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
281744947dcSTom Erickson 	} else {
282744947dcSTom Erickson 		dnode_setbonuslen(dn, newsize, tx);
283744947dcSTom Erickson 		error = 0;
284744947dcSTom Erickson 	}
285744947dcSTom Erickson 
286744947dcSTom Erickson 	DB_DNODE_EXIT(db);
287744947dcSTom Erickson 	return (error);
2881934e92fSmaybee }
2891934e92fSmaybee 
2900a586ceaSMark Shellenbaum int
291744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
2920a586ceaSMark Shellenbaum {
293744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
294744947dcSTom Erickson 	dnode_t *dn;
295744947dcSTom Erickson 	int error;
2960a586ceaSMark Shellenbaum 
297744947dcSTom Erickson 	DB_DNODE_ENTER(db);
298744947dcSTom Erickson 	dn = DB_DNODE(db);
299744947dcSTom Erickson 
300ad135b5dSChristopher Siden 	if (!DMU_OT_IS_VALID(type)) {
301be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
302744947dcSTom Erickson 	} else if (dn->dn_bonus != db) {
303be6fd75aSMatthew Ahrens 		error = SET_ERROR(EINVAL);
304744947dcSTom Erickson 	} else {
305744947dcSTom Erickson 		dnode_setbonus_type(dn, type, tx);
306744947dcSTom Erickson 		error = 0;
307744947dcSTom Erickson 	}
3080a586ceaSMark Shellenbaum 
309744947dcSTom Erickson 	DB_DNODE_EXIT(db);
310744947dcSTom Erickson 	return (error);
311744947dcSTom Erickson }
3120a586ceaSMark Shellenbaum 
313744947dcSTom Erickson dmu_object_type_t
314744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake)
315744947dcSTom Erickson {
316744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
317744947dcSTom Erickson 	dnode_t *dn;
318744947dcSTom Erickson 	dmu_object_type_t type;
319744947dcSTom Erickson 
320744947dcSTom Erickson 	DB_DNODE_ENTER(db);
321744947dcSTom Erickson 	dn = DB_DNODE(db);
322744947dcSTom Erickson 	type = dn->dn_bonustype;
323744947dcSTom Erickson 	DB_DNODE_EXIT(db);
324744947dcSTom Erickson 
325744947dcSTom Erickson 	return (type);
3260a586ceaSMark Shellenbaum }
3270a586ceaSMark Shellenbaum 
3280a586ceaSMark Shellenbaum int
3290a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
3300a586ceaSMark Shellenbaum {
3310a586ceaSMark Shellenbaum 	dnode_t *dn;
3320a586ceaSMark Shellenbaum 	int error;
3330a586ceaSMark Shellenbaum 
3340a586ceaSMark Shellenbaum 	error = dnode_hold(os, object, FTAG, &dn);
3350a586ceaSMark Shellenbaum 	dbuf_rm_spill(dn, tx);
33606e0070dSMark Shellenbaum 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
33706e0070dSMark Shellenbaum 	dnode_rm_spill(dn, tx);
33806e0070dSMark Shellenbaum 	rw_exit(&dn->dn_struct_rwlock);
3390a586ceaSMark Shellenbaum 	dnode_rele(dn, FTAG);
3400a586ceaSMark Shellenbaum 	return (error);
3410a586ceaSMark Shellenbaum }
3420a586ceaSMark Shellenbaum 
343fa9e4066Sahrens /*
344ea8dc4b6Seschrock  * returns ENOENT, EIO, or 0.
345fa9e4066Sahrens  */
346ea8dc4b6Seschrock int
347ea8dc4b6Seschrock dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
348fa9e4066Sahrens {
349ea8dc4b6Seschrock 	dnode_t *dn;
350fa9e4066Sahrens 	dmu_buf_impl_t *db;
3511934e92fSmaybee 	int error;
352fa9e4066Sahrens 
353503ad85cSMatthew Ahrens 	error = dnode_hold(os, object, FTAG, &dn);
3541934e92fSmaybee 	if (error)
3551934e92fSmaybee 		return (error);
356fa9e4066Sahrens 
357ea8dc4b6Seschrock 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
358ea8dc4b6Seschrock 	if (dn->dn_bonus == NULL) {
359fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
360ea8dc4b6Seschrock 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
361ea8dc4b6Seschrock 		if (dn->dn_bonus == NULL)
3621934e92fSmaybee 			dbuf_create_bonus(dn);
363fa9e4066Sahrens 	}
364ea8dc4b6Seschrock 	db = dn->dn_bonus;
3651934e92fSmaybee 
3661934e92fSmaybee 	/* as long as the bonus buf is held, the dnode will be held */
367e914ace2STim Schumacher 	if (zfs_refcount_add(&db->db_holds, tag) == 1) {
3681934e92fSmaybee 		VERIFY(dnode_add_ref(dn, db));
369640c1670SJosef 'Jeff' Sipek 		atomic_inc_32(&dn->dn_dbufs_count);
370744947dcSTom Erickson 	}
371744947dcSTom Erickson 
372744947dcSTom Erickson 	/*
373744947dcSTom Erickson 	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
374744947dcSTom Erickson 	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
375744947dcSTom Erickson 	 * a dnode hold for every dbuf.
376744947dcSTom Erickson 	 */
377744947dcSTom Erickson 	rw_exit(&dn->dn_struct_rwlock);
3781934e92fSmaybee 
379fa9e4066Sahrens 	dnode_rele(dn, FTAG);
380ea8dc4b6Seschrock 
38147cb52daSJeff Bonwick 	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
382ea8dc4b6Seschrock 
383ea8dc4b6Seschrock 	*dbp = &db->db;
384ea8dc4b6Seschrock 	return (0);
385fa9e4066Sahrens }
386fa9e4066Sahrens 
3870a586ceaSMark Shellenbaum /*
3880a586ceaSMark Shellenbaum  * returns ENOENT, EIO, or 0.
3890a586ceaSMark Shellenbaum  *
3900a586ceaSMark Shellenbaum  * This interface will allocate a blank spill dbuf when a spill blk
3910a586ceaSMark Shellenbaum  * doesn't already exist on the dnode.
3920a586ceaSMark Shellenbaum  *
3930a586ceaSMark Shellenbaum  * if you only want to find an already existing spill db, then
3940a586ceaSMark Shellenbaum  * dmu_spill_hold_existing() should be used.
3950a586ceaSMark Shellenbaum  */
3960a586ceaSMark Shellenbaum int
3970a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
3980a586ceaSMark Shellenbaum {
3990a586ceaSMark Shellenbaum 	dmu_buf_impl_t *db = NULL;
4000a586ceaSMark Shellenbaum 	int err;
4010a586ceaSMark Shellenbaum 
4020a586ceaSMark Shellenbaum 	if ((flags & DB_RF_HAVESTRUCT) == 0)
4030a586ceaSMark Shellenbaum 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
4040a586ceaSMark Shellenbaum 
4050a586ceaSMark Shellenbaum 	db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
4060a586ceaSMark Shellenbaum 
4070a586ceaSMark Shellenbaum 	if ((flags & DB_RF_HAVESTRUCT) == 0)
4080a586ceaSMark Shellenbaum 		rw_exit(&dn->dn_struct_rwlock);
4090a586ceaSMark Shellenbaum 
4100a586ceaSMark Shellenbaum 	ASSERT(db != NULL);
4111d8ccc7bSMark Shellenbaum 	err = dbuf_read(db, NULL, flags);
4121d8ccc7bSMark Shellenbaum 	if (err == 0)
4131d8ccc7bSMark Shellenbaum 		*dbp = &db->db;
4141d8ccc7bSMark Shellenbaum 	else
4151d8ccc7bSMark Shellenbaum 		dbuf_rele(db, tag);
4160a586ceaSMark Shellenbaum 	return (err);
4170a586ceaSMark Shellenbaum }
4180a586ceaSMark Shellenbaum 
4190a586ceaSMark Shellenbaum int
4200a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
4210a586ceaSMark Shellenbaum {
422744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
423744947dcSTom Erickson 	dnode_t *dn;
4240a586ceaSMark Shellenbaum 	int err;
4250a586ceaSMark Shellenbaum 
426744947dcSTom Erickson 	DB_DNODE_ENTER(db);
427744947dcSTom Erickson 	dn = DB_DNODE(db);
428744947dcSTom Erickson 
429744947dcSTom Erickson 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
430be6fd75aSMatthew Ahrens 		err = SET_ERROR(EINVAL);
431744947dcSTom Erickson 	} else {
432744947dcSTom Erickson 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
433744947dcSTom Erickson 
434744947dcSTom Erickson 		if (!dn->dn_have_spill) {
435be6fd75aSMatthew Ahrens 			err = SET_ERROR(ENOENT);
436744947dcSTom Erickson 		} else {
437744947dcSTom Erickson 			err = dmu_spill_hold_by_dnode(dn,
438744947dcSTom Erickson 			    DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
439744947dcSTom Erickson 		}
4400a586ceaSMark Shellenbaum 
4410a586ceaSMark Shellenbaum 		rw_exit(&dn->dn_struct_rwlock);
4420a586ceaSMark Shellenbaum 	}
443744947dcSTom Erickson 
444744947dcSTom Erickson 	DB_DNODE_EXIT(db);
4450a586ceaSMark Shellenbaum 	return (err);
4460a586ceaSMark Shellenbaum }
4470a586ceaSMark Shellenbaum 
4480a586ceaSMark Shellenbaum int
4490a586ceaSMark Shellenbaum dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
4500a586ceaSMark Shellenbaum {
451744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
452744947dcSTom Erickson 	dnode_t *dn;
453744947dcSTom Erickson 	int err;
454744947dcSTom Erickson 
455744947dcSTom Erickson 	DB_DNODE_ENTER(db);
456744947dcSTom Erickson 	dn = DB_DNODE(db);
457744947dcSTom Erickson 	err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
458744947dcSTom Erickson 	DB_DNODE_EXIT(db);
459744947dcSTom Erickson 
460744947dcSTom Erickson 	return (err);
4610a586ceaSMark Shellenbaum }
4620a586ceaSMark Shellenbaum 
46313506d1eSmaybee /*
46413506d1eSmaybee  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
46513506d1eSmaybee  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
46613506d1eSmaybee  * and can induce severe lock contention when writing to several files
46713506d1eSmaybee  * whose dnodes are in the same block.
46813506d1eSmaybee  */
4698dfe5547SRichard Yao int
4707bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
471cf6106c8SMatthew Ahrens     boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
472fa9e4066Sahrens {
473fa9e4066Sahrens 	dmu_buf_t **dbp;
474fa9e4066Sahrens 	uint64_t blkid, nblks, i;
4757bfdf011SNeil Perrin 	uint32_t dbuf_flags;
476ea8dc4b6Seschrock 	int err;
477ea8dc4b6Seschrock 	zio_t *zio;
478ea8dc4b6Seschrock 
479ea8dc4b6Seschrock 	ASSERT(length <= DMU_MAX_ACCESS);
480fa9e4066Sahrens 
481cf6106c8SMatthew Ahrens 	/*
482cf6106c8SMatthew Ahrens 	 * Note: We directly notify the prefetch code of this read, so that
483cf6106c8SMatthew Ahrens 	 * we can tell it about the multi-block read.  dbuf_read() only knows
484cf6106c8SMatthew Ahrens 	 * about the one block it is accessing.
485cf6106c8SMatthew Ahrens 	 */
486cf6106c8SMatthew Ahrens 	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
487cf6106c8SMatthew Ahrens 	    DB_RF_NOPREFETCH;
488ea8dc4b6Seschrock 
489fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
490fa9e4066Sahrens 	if (dn->dn_datablkshift) {
491fa9e4066Sahrens 		int blkshift = dn->dn_datablkshift;
492cf6106c8SMatthew Ahrens 		nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
493cf6106c8SMatthew Ahrens 		    P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
494fa9e4066Sahrens 	} else {
4950125049cSahrens 		if (offset + length > dn->dn_datablksz) {
4960125049cSahrens 			zfs_panic_recover("zfs: accessing past end of object "
4970125049cSahrens 			    "%llx/%llx (size=%u access=%llu+%llu)",
4980125049cSahrens 			    (longlong_t)dn->dn_objset->
4990125049cSahrens 			    os_dsl_dataset->ds_object,
5000125049cSahrens 			    (longlong_t)dn->dn_object, dn->dn_datablksz,
5010125049cSahrens 			    (longlong_t)offset, (longlong_t)length);
502c87b8fc5SMark J Musante 			rw_exit(&dn->dn_struct_rwlock);
503be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
5040125049cSahrens 		}
505fa9e4066Sahrens 		nblks = 1;
506fa9e4066Sahrens 	}
507ea8dc4b6Seschrock 	dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
508fa9e4066Sahrens 
509e14bb325SJeff Bonwick 	zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
510a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
511fa9e4066Sahrens 	for (i = 0; i < nblks; i++) {
512cf6106c8SMatthew Ahrens 		dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
513ea8dc4b6Seschrock 		if (db == NULL) {
514ea8dc4b6Seschrock 			rw_exit(&dn->dn_struct_rwlock);
515ea8dc4b6Seschrock 			dmu_buf_rele_array(dbp, nblks, tag);
516ea8dc4b6Seschrock 			zio_nowait(zio);
517be6fd75aSMatthew Ahrens 			return (SET_ERROR(EIO));
518ea8dc4b6Seschrock 		}
519cf6106c8SMatthew Ahrens 
520ea8dc4b6Seschrock 		/* initiate async i/o */
521cf6106c8SMatthew Ahrens 		if (read)
5227bfdf011SNeil Perrin 			(void) dbuf_read(db, zio, dbuf_flags);
523ea8dc4b6Seschrock 		dbp[i] = &db->db;
524fa9e4066Sahrens 	}
525cf6106c8SMatthew Ahrens 
526cb92f413SAlexander Motin 	if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
527cb92f413SAlexander Motin 	    DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
528cb92f413SAlexander Motin 		dmu_zfetch(&dn->dn_zfetch, blkid, nblks,
529cb92f413SAlexander Motin 		    read && DNODE_IS_CACHEABLE(dn));
530cf6106c8SMatthew Ahrens 	}
531fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
532fa9e4066Sahrens 
533ea8dc4b6Seschrock 	/* wait for async i/o */
534ea8dc4b6Seschrock 	err = zio_wait(zio);
535ea8dc4b6Seschrock 	if (err) {
536ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, nblks, tag);
537ea8dc4b6Seschrock 		return (err);
538ea8dc4b6Seschrock 	}
539fa9e4066Sahrens 
540ea8dc4b6Seschrock 	/* wait for other io to complete */
541ea8dc4b6Seschrock 	if (read) {
542ea8dc4b6Seschrock 		for (i = 0; i < nblks; i++) {
543ea8dc4b6Seschrock 			dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
544ea8dc4b6Seschrock 			mutex_enter(&db->db_mtx);
545ea8dc4b6Seschrock 			while (db->db_state == DB_READ ||
546ea8dc4b6Seschrock 			    db->db_state == DB_FILL)
547ea8dc4b6Seschrock 				cv_wait(&db->db_changed, &db->db_mtx);
548ea8dc4b6Seschrock 			if (db->db_state == DB_UNCACHED)
549be6fd75aSMatthew Ahrens 				err = SET_ERROR(EIO);
550ea8dc4b6Seschrock 			mutex_exit(&db->db_mtx);
551ea8dc4b6Seschrock 			if (err) {
552ea8dc4b6Seschrock 				dmu_buf_rele_array(dbp, nblks, tag);
553ea8dc4b6Seschrock 				return (err);
554ea8dc4b6Seschrock 			}
555ea8dc4b6Seschrock 		}
556ea8dc4b6Seschrock 	}
557fa9e4066Sahrens 
558ea8dc4b6Seschrock 	*numbufsp = nblks;
559ea8dc4b6Seschrock 	*dbpp = dbp;
560ea8dc4b6Seschrock 	return (0);
561fa9e4066Sahrens }
562fa9e4066Sahrens 
563a2eea2e1Sahrens static int
56413506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
56513506d1eSmaybee     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
56613506d1eSmaybee {
56713506d1eSmaybee 	dnode_t *dn;
56813506d1eSmaybee 	int err;
56913506d1eSmaybee 
570503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
57113506d1eSmaybee 	if (err)
57213506d1eSmaybee 		return (err);
57313506d1eSmaybee 
57413506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
5757bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
57613506d1eSmaybee 
57713506d1eSmaybee 	dnode_rele(dn, FTAG);
57813506d1eSmaybee 
57913506d1eSmaybee 	return (err);
58013506d1eSmaybee }
58113506d1eSmaybee 
58213506d1eSmaybee int
583744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
584cf6106c8SMatthew Ahrens     uint64_t length, boolean_t read, void *tag, int *numbufsp,
585cf6106c8SMatthew Ahrens     dmu_buf_t ***dbpp)
58613506d1eSmaybee {
587744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
588744947dcSTom Erickson 	dnode_t *dn;
58913506d1eSmaybee 	int err;
59013506d1eSmaybee 
591744947dcSTom Erickson 	DB_DNODE_ENTER(db);
592744947dcSTom Erickson 	dn = DB_DNODE(db);
59313506d1eSmaybee 	err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
5947bfdf011SNeil Perrin 	    numbufsp, dbpp, DMU_READ_PREFETCH);
595744947dcSTom Erickson 	DB_DNODE_EXIT(db);
59613506d1eSmaybee 
59713506d1eSmaybee 	return (err);
59813506d1eSmaybee }
59913506d1eSmaybee 
600fa9e4066Sahrens void
601ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
602fa9e4066Sahrens {
603fa9e4066Sahrens 	int i;
604fa9e4066Sahrens 	dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
605fa9e4066Sahrens 
606fa9e4066Sahrens 	if (numbufs == 0)
607fa9e4066Sahrens 		return;
608fa9e4066Sahrens 
609ea8dc4b6Seschrock 	for (i = 0; i < numbufs; i++) {
610ea8dc4b6Seschrock 		if (dbp[i])
611ea8dc4b6Seschrock 			dbuf_rele(dbp[i], tag);
612ea8dc4b6Seschrock 	}
613fa9e4066Sahrens 
614fa9e4066Sahrens 	kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
615fa9e4066Sahrens }
616fa9e4066Sahrens 
61769962b56SMatthew Ahrens /*
618a2cdcdd2SPaul Dagnelie  * Issue prefetch i/os for the given blocks.  If level is greater than 0, the
619a2cdcdd2SPaul Dagnelie  * indirect blocks prefeteched will be those that point to the blocks containing
620a2cdcdd2SPaul Dagnelie  * the data starting at offset, and continuing to offset + len.
62169962b56SMatthew Ahrens  *
622a2cdcdd2SPaul Dagnelie  * Note that if the indirect blocks above the blocks being prefetched are not in
623a2cdcdd2SPaul Dagnelie  * cache, they will be asychronously read in.
62469962b56SMatthew Ahrens  */
625fa9e4066Sahrens void
626a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
627a2cdcdd2SPaul Dagnelie     uint64_t len, zio_priority_t pri)
628fa9e4066Sahrens {
629fa9e4066Sahrens 	dnode_t *dn;
630fa9e4066Sahrens 	uint64_t blkid;
63169962b56SMatthew Ahrens 	int nblks, err;
632fa9e4066Sahrens 
633fa9e4066Sahrens 	if (len == 0) {  /* they're interested in the bonus buffer */
634744947dcSTom Erickson 		dn = DMU_META_DNODE(os);
635fa9e4066Sahrens 
636fa9e4066Sahrens 		if (object == 0 || object >= DN_MAX_OBJECT)
637fa9e4066Sahrens 			return;
638fa9e4066Sahrens 
639fa9e4066Sahrens 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
640a2cdcdd2SPaul Dagnelie 		blkid = dbuf_whichblock(dn, level,
641a2cdcdd2SPaul Dagnelie 		    object * sizeof (dnode_phys_t));
642a2cdcdd2SPaul Dagnelie 		dbuf_prefetch(dn, level, blkid, pri, 0);
643fa9e4066Sahrens 		rw_exit(&dn->dn_struct_rwlock);
644fa9e4066Sahrens 		return;
645fa9e4066Sahrens 	}
646fa9e4066Sahrens 
647*52abb70eSMatthew Ahrens 	/*
648*52abb70eSMatthew Ahrens 	 * See comment before the definition of dmu_prefetch_max.
649*52abb70eSMatthew Ahrens 	 */
650*52abb70eSMatthew Ahrens 	len = MIN(len, dmu_prefetch_max);
651*52abb70eSMatthew Ahrens 
652fa9e4066Sahrens 	/*
653fa9e4066Sahrens 	 * XXX - Note, if the dnode for the requested object is not
654fa9e4066Sahrens 	 * already cached, we will do a *synchronous* read in the
655fa9e4066Sahrens 	 * dnode_hold() call.  The same is true for any indirects.
656fa9e4066Sahrens 	 */
657503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
658ea8dc4b6Seschrock 	if (err != 0)
659fa9e4066Sahrens 		return;
660fa9e4066Sahrens 
661fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
662a2cdcdd2SPaul Dagnelie 	/*
663a2cdcdd2SPaul Dagnelie 	 * offset + len - 1 is the last byte we want to prefetch for, and offset
664a2cdcdd2SPaul Dagnelie 	 * is the first.  Then dbuf_whichblk(dn, level, off + len - 1) is the
665a2cdcdd2SPaul Dagnelie 	 * last block we want to prefetch, and dbuf_whichblock(dn, level,
666a2cdcdd2SPaul Dagnelie 	 * offset)  is the first.  Then the number we need to prefetch is the
667a2cdcdd2SPaul Dagnelie 	 * last - first + 1.
668a2cdcdd2SPaul Dagnelie 	 */
669a2cdcdd2SPaul Dagnelie 	if (level > 0 || dn->dn_datablkshift != 0) {
670a2cdcdd2SPaul Dagnelie 		nblks = dbuf_whichblock(dn, level, offset + len - 1) -
671a2cdcdd2SPaul Dagnelie 		    dbuf_whichblock(dn, level, offset) + 1;
672fa9e4066Sahrens 	} else {
673fa9e4066Sahrens 		nblks = (offset < dn->dn_datablksz);
674fa9e4066Sahrens 	}
675fa9e4066Sahrens 
676fa9e4066Sahrens 	if (nblks != 0) {
677a2cdcdd2SPaul Dagnelie 		blkid = dbuf_whichblock(dn, level, offset);
67869962b56SMatthew Ahrens 		for (int i = 0; i < nblks; i++)
679a2cdcdd2SPaul Dagnelie 			dbuf_prefetch(dn, level, blkid + i, pri, 0);
680fa9e4066Sahrens 	}
681fa9e4066Sahrens 
682fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
683fa9e4066Sahrens 
684fa9e4066Sahrens 	dnode_rele(dn, FTAG);
685fa9e4066Sahrens }
686fa9e4066Sahrens 
68776256205SMark Maybee /*
68876256205SMark Maybee  * Get the next "chunk" of file data to free.  We traverse the file from
68976256205SMark Maybee  * the end so that the file gets shorter over time (if we crashes in the
69076256205SMark Maybee  * middle, this will leave us in a better state).  We find allocated file
69176256205SMark Maybee  * data by simply searching the allocated level 1 indirects.
692713d6c20SMatthew Ahrens  *
693713d6c20SMatthew Ahrens  * On input, *start should be the first offset that does not need to be
694713d6c20SMatthew Ahrens  * freed (e.g. "offset + length").  On return, *start will be the first
695713d6c20SMatthew Ahrens  * offset that should be freed.
69676256205SMark Maybee  */
697cdb0ab79Smaybee static int
698713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
699cdb0ab79Smaybee {
700713d6c20SMatthew Ahrens 	uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
701713d6c20SMatthew Ahrens 	/* bytes of data covered by a level-1 indirect block */
70276256205SMark Maybee 	uint64_t iblkrange =
7031c8564a7SMark Maybee 	    dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
704cdb0ab79Smaybee 
705713d6c20SMatthew Ahrens 	ASSERT3U(minimum, <=, *start);
706cdb0ab79Smaybee 
707713d6c20SMatthew Ahrens 	if (*start - minimum <= iblkrange * maxblks) {
708713d6c20SMatthew Ahrens 		*start = minimum;
709cdb0ab79Smaybee 		return (0);
710cdb0ab79Smaybee 	}
71176256205SMark Maybee 	ASSERT(ISP2(iblkrange));
712cdb0ab79Smaybee 
713713d6c20SMatthew Ahrens 	for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
7141c8564a7SMark Maybee 		int err;
715cdb0ab79Smaybee 
716713d6c20SMatthew Ahrens 		/*
717713d6c20SMatthew Ahrens 		 * dnode_next_offset(BACKWARDS) will find an allocated L1
718713d6c20SMatthew Ahrens 		 * indirect block at or before the input offset.  We must
719713d6c20SMatthew Ahrens 		 * decrement *start so that it is at the end of the region
720713d6c20SMatthew Ahrens 		 * to search.
721713d6c20SMatthew Ahrens 		 */
722713d6c20SMatthew Ahrens 		(*start)--;
723cdb0ab79Smaybee 		err = dnode_next_offset(dn,
72476256205SMark Maybee 		    DNODE_FIND_BACKWARDS, start, 2, 1, 0);
725cdb0ab79Smaybee 
726713d6c20SMatthew Ahrens 		/* if there are no indirect blocks before start, we are done */
72776256205SMark Maybee 		if (err == ESRCH) {
728713d6c20SMatthew Ahrens 			*start = minimum;
729713d6c20SMatthew Ahrens 			break;
730713d6c20SMatthew Ahrens 		} else if (err != 0) {
731cdb0ab79Smaybee 			return (err);
73276256205SMark Maybee 		}
73376256205SMark Maybee 
734713d6c20SMatthew Ahrens 		/* set start to the beginning of this L1 indirect */
73576256205SMark Maybee 		*start = P2ALIGN(*start, iblkrange);
736cdb0ab79Smaybee 	}
737713d6c20SMatthew Ahrens 	if (*start < minimum)
738713d6c20SMatthew Ahrens 		*start = minimum;
739cdb0ab79Smaybee 	return (0);
740cdb0ab79Smaybee }
741cdb0ab79Smaybee 
742eb721827SAlek Pinchuk /*
743eb721827SAlek Pinchuk  * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
744eb721827SAlek Pinchuk  * otherwise return false.
745eb721827SAlek Pinchuk  * Used below in dmu_free_long_range_impl() to enable abort when unmounting
746eb721827SAlek Pinchuk  */
747eb721827SAlek Pinchuk /*ARGSUSED*/
748eb721827SAlek Pinchuk static boolean_t
749eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os)
750eb721827SAlek Pinchuk {
751eb721827SAlek Pinchuk #ifdef _KERNEL
752eb721827SAlek Pinchuk 	if (dmu_objset_type(os) == DMU_OST_ZFS)
753eb721827SAlek Pinchuk 		return (zfs_get_vfs_flag_unmounted(os));
754eb721827SAlek Pinchuk #endif
755eb721827SAlek Pinchuk 	return (B_FALSE);
756eb721827SAlek Pinchuk }
757eb721827SAlek Pinchuk 
758cdb0ab79Smaybee static int
759cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
760713d6c20SMatthew Ahrens     uint64_t length)
761cdb0ab79Smaybee {
762713d6c20SMatthew Ahrens 	uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
763713d6c20SMatthew Ahrens 	int err;
764ff5177eeSAlek Pinchuk 	uint64_t dirty_frees_threshold;
765ff5177eeSAlek Pinchuk 	dsl_pool_t *dp = dmu_objset_pool(os);
766713d6c20SMatthew Ahrens 
767713d6c20SMatthew Ahrens 	if (offset >= object_size)
768cdb0ab79Smaybee 		return (0);
769cdb0ab79Smaybee 
770ff5177eeSAlek Pinchuk 	if (zfs_per_txg_dirty_frees_percent <= 100)
771ff5177eeSAlek Pinchuk 		dirty_frees_threshold =
772ff5177eeSAlek Pinchuk 		    zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
773ff5177eeSAlek Pinchuk 	else
774ff5177eeSAlek Pinchuk 		dirty_frees_threshold = zfs_dirty_data_max / 4;
775ff5177eeSAlek Pinchuk 
776713d6c20SMatthew Ahrens 	if (length == DMU_OBJECT_END || offset + length > object_size)
777713d6c20SMatthew Ahrens 		length = object_size - offset;
778713d6c20SMatthew Ahrens 
779713d6c20SMatthew Ahrens 	while (length != 0) {
780ff5177eeSAlek Pinchuk 		uint64_t chunk_end, chunk_begin, chunk_len;
781ff5177eeSAlek Pinchuk 		uint64_t long_free_dirty_all_txgs = 0;
782ff5177eeSAlek Pinchuk 		dmu_tx_t *tx;
783713d6c20SMatthew Ahrens 
784eb721827SAlek Pinchuk 		if (dmu_objset_zfs_unmounting(dn->dn_objset))
785eb721827SAlek Pinchuk 			return (SET_ERROR(EINTR));
786eb721827SAlek Pinchuk 
787713d6c20SMatthew Ahrens 		chunk_end = chunk_begin = offset + length;
788713d6c20SMatthew Ahrens 
789713d6c20SMatthew Ahrens 		/* move chunk_begin backwards to the beginning of this chunk */
790713d6c20SMatthew Ahrens 		err = get_next_chunk(dn, &chunk_begin, offset);
791cdb0ab79Smaybee 		if (err)
792cdb0ab79Smaybee 			return (err);
793713d6c20SMatthew Ahrens 		ASSERT3U(chunk_begin, >=, offset);
794713d6c20SMatthew Ahrens 		ASSERT3U(chunk_begin, <=, chunk_end);
795cdb0ab79Smaybee 
796ff5177eeSAlek Pinchuk 		chunk_len = chunk_end - chunk_begin;
797ff5177eeSAlek Pinchuk 
798ff5177eeSAlek Pinchuk 		mutex_enter(&dp->dp_lock);
799ff5177eeSAlek Pinchuk 		for (int t = 0; t < TXG_SIZE; t++) {
800ff5177eeSAlek Pinchuk 			long_free_dirty_all_txgs +=
801ff5177eeSAlek Pinchuk 			    dp->dp_long_free_dirty_pertxg[t];
802ff5177eeSAlek Pinchuk 		}
803ff5177eeSAlek Pinchuk 		mutex_exit(&dp->dp_lock);
804ff5177eeSAlek Pinchuk 
805ff5177eeSAlek Pinchuk 		/*
806ff5177eeSAlek Pinchuk 		 * To avoid filling up a TXG with just frees wait for
807ff5177eeSAlek Pinchuk 		 * the next TXG to open before freeing more chunks if
808ff5177eeSAlek Pinchuk 		 * we have reached the threshold of frees
809ff5177eeSAlek Pinchuk 		 */
810ff5177eeSAlek Pinchuk 		if (dirty_frees_threshold != 0 &&
811ff5177eeSAlek Pinchuk 		    long_free_dirty_all_txgs >= dirty_frees_threshold) {
812ff5177eeSAlek Pinchuk 			txg_wait_open(dp, 0);
813ff5177eeSAlek Pinchuk 			continue;
814ff5177eeSAlek Pinchuk 		}
815ff5177eeSAlek Pinchuk 
816ff5177eeSAlek Pinchuk 		tx = dmu_tx_create(os);
817ff5177eeSAlek Pinchuk 		dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
8184bb73804SMatthew Ahrens 
8194bb73804SMatthew Ahrens 		/*
8204bb73804SMatthew Ahrens 		 * Mark this transaction as typically resulting in a net
8214bb73804SMatthew Ahrens 		 * reduction in space used.
8224bb73804SMatthew Ahrens 		 */
8234bb73804SMatthew Ahrens 		dmu_tx_mark_netfree(tx);
824cdb0ab79Smaybee 		err = dmu_tx_assign(tx, TXG_WAIT);
825cdb0ab79Smaybee 		if (err) {
826cdb0ab79Smaybee 			dmu_tx_abort(tx);
827cdb0ab79Smaybee 			return (err);
828cdb0ab79Smaybee 		}
829ff5177eeSAlek Pinchuk 
830ff5177eeSAlek Pinchuk 		mutex_enter(&dp->dp_lock);
831ff5177eeSAlek Pinchuk 		dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
832ff5177eeSAlek Pinchuk 		    chunk_len;
833ff5177eeSAlek Pinchuk 		mutex_exit(&dp->dp_lock);
834ff5177eeSAlek Pinchuk 		DTRACE_PROBE3(free__long__range,
835ff5177eeSAlek Pinchuk 		    uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
836ff5177eeSAlek Pinchuk 		    uint64_t, dmu_tx_get_txg(tx));
837ff5177eeSAlek Pinchuk 		dnode_free_range(dn, chunk_begin, chunk_len, tx);
838cdb0ab79Smaybee 		dmu_tx_commit(tx);
839713d6c20SMatthew Ahrens 
840ff5177eeSAlek Pinchuk 		length -= chunk_len;
841cdb0ab79Smaybee 	}
842cdb0ab79Smaybee 	return (0);
843cdb0ab79Smaybee }
844cdb0ab79Smaybee 
845cdb0ab79Smaybee int
846cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object,
847cdb0ab79Smaybee     uint64_t offset, uint64_t length)
848cdb0ab79Smaybee {
849cdb0ab79Smaybee 	dnode_t *dn;
850cdb0ab79Smaybee 	int err;
851cdb0ab79Smaybee 
852503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
853cdb0ab79Smaybee 	if (err != 0)
854cdb0ab79Smaybee 		return (err);
855713d6c20SMatthew Ahrens 	err = dmu_free_long_range_impl(os, dn, offset, length);
8565253393bSMatthew Ahrens 
8575253393bSMatthew Ahrens 	/*
8585253393bSMatthew Ahrens 	 * It is important to zero out the maxblkid when freeing the entire
8595253393bSMatthew Ahrens 	 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
8605253393bSMatthew Ahrens 	 * will take the fast path, and (b) dnode_reallocate() can verify
8615253393bSMatthew Ahrens 	 * that the entire file has been freed.
8625253393bSMatthew Ahrens 	 */
86343466aaeSMax Grossman 	if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
8645253393bSMatthew Ahrens 		dn->dn_maxblkid = 0;
8655253393bSMatthew Ahrens 
866cdb0ab79Smaybee 	dnode_rele(dn, FTAG);
867cdb0ab79Smaybee 	return (err);
868cdb0ab79Smaybee }
869cdb0ab79Smaybee 
870cdb0ab79Smaybee int
871713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object)
872cdb0ab79Smaybee {
873cdb0ab79Smaybee 	dmu_tx_t *tx;
874cdb0ab79Smaybee 	int err;
875cdb0ab79Smaybee 
876713d6c20SMatthew Ahrens 	err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
877cdb0ab79Smaybee 	if (err != 0)
878cdb0ab79Smaybee 		return (err);
879713d6c20SMatthew Ahrens 
880713d6c20SMatthew Ahrens 	tx = dmu_tx_create(os);
881713d6c20SMatthew Ahrens 	dmu_tx_hold_bonus(tx, object);
882713d6c20SMatthew Ahrens 	dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
8834bb73804SMatthew Ahrens 	dmu_tx_mark_netfree(tx);
884713d6c20SMatthew Ahrens 	err = dmu_tx_assign(tx, TXG_WAIT);
885713d6c20SMatthew Ahrens 	if (err == 0) {
886713d6c20SMatthew Ahrens 		err = dmu_object_free(os, object, tx);
887713d6c20SMatthew Ahrens 		dmu_tx_commit(tx);
888cdb0ab79Smaybee 	} else {
889713d6c20SMatthew Ahrens 		dmu_tx_abort(tx);
890cdb0ab79Smaybee 	}
891713d6c20SMatthew Ahrens 
892cdb0ab79Smaybee 	return (err);
893cdb0ab79Smaybee }
894cdb0ab79Smaybee 
895ea8dc4b6Seschrock int
896fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
897fa9e4066Sahrens     uint64_t size, dmu_tx_t *tx)
898fa9e4066Sahrens {
899ea8dc4b6Seschrock 	dnode_t *dn;
900503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
901ea8dc4b6Seschrock 	if (err)
902ea8dc4b6Seschrock 		return (err);
903fa9e4066Sahrens 	ASSERT(offset < UINT64_MAX);
904fa9e4066Sahrens 	ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
905fa9e4066Sahrens 	dnode_free_range(dn, offset, size, tx);
906fa9e4066Sahrens 	dnode_rele(dn, FTAG);
907ea8dc4b6Seschrock 	return (0);
908fa9e4066Sahrens }
909fa9e4066Sahrens 
910b0c42cd4Sbzzz static int
911b0c42cd4Sbzzz dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
9127bfdf011SNeil Perrin     void *buf, uint32_t flags)
913fa9e4066Sahrens {
914fa9e4066Sahrens 	dmu_buf_t **dbp;
915b0c42cd4Sbzzz 	int numbufs, err = 0;
916feb08c6bSbillm 
917feb08c6bSbillm 	/*
918feb08c6bSbillm 	 * Deal with odd block sizes, where there can't be data past the first
919feb08c6bSbillm 	 * block.  If we ever do the tail block optimization, we will need to
920feb08c6bSbillm 	 * handle that here as well.
921feb08c6bSbillm 	 */
922c87b8fc5SMark J Musante 	if (dn->dn_maxblkid == 0) {
923fa9e4066Sahrens 		int newsz = offset > dn->dn_datablksz ? 0 :
924fa9e4066Sahrens 		    MIN(size, dn->dn_datablksz - offset);
925fa9e4066Sahrens 		bzero((char *)buf + newsz, size - newsz);
926fa9e4066Sahrens 		size = newsz;
927fa9e4066Sahrens 	}
928fa9e4066Sahrens 
929fa9e4066Sahrens 	while (size > 0) {
930fa9e4066Sahrens 		uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
931c87b8fc5SMark J Musante 		int i;
932fa9e4066Sahrens 
933fa9e4066Sahrens 		/*
934fa9e4066Sahrens 		 * NB: we could do this block-at-a-time, but it's nice
935fa9e4066Sahrens 		 * to be reading in parallel.
936fa9e4066Sahrens 		 */
937a2eea2e1Sahrens 		err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
9387bfdf011SNeil Perrin 		    TRUE, FTAG, &numbufs, &dbp, flags);
939ea8dc4b6Seschrock 		if (err)
9401934e92fSmaybee 			break;
941fa9e4066Sahrens 
942fa9e4066Sahrens 		for (i = 0; i < numbufs; i++) {
943fa9e4066Sahrens 			int tocpy;
944fa9e4066Sahrens 			int bufoff;
945fa9e4066Sahrens 			dmu_buf_t *db = dbp[i];
946fa9e4066Sahrens 
947fa9e4066Sahrens 			ASSERT(size > 0);
948fa9e4066Sahrens 
949fa9e4066Sahrens 			bufoff = offset - db->db_offset;
950fa9e4066Sahrens 			tocpy = (int)MIN(db->db_size - bufoff, size);
951fa9e4066Sahrens 
952fa9e4066Sahrens 			bcopy((char *)db->db_data + bufoff, buf, tocpy);
953fa9e4066Sahrens 
954fa9e4066Sahrens 			offset += tocpy;
955fa9e4066Sahrens 			size -= tocpy;
956fa9e4066Sahrens 			buf = (char *)buf + tocpy;
957fa9e4066Sahrens 		}
958ea8dc4b6Seschrock 		dmu_buf_rele_array(dbp, numbufs, FTAG);
959fa9e4066Sahrens 	}
9601934e92fSmaybee 	return (err);
961fa9e4066Sahrens }
962fa9e4066Sahrens 
963b0c42cd4Sbzzz int
964b0c42cd4Sbzzz dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
965b0c42cd4Sbzzz     void *buf, uint32_t flags)
966fa9e4066Sahrens {
967b0c42cd4Sbzzz 	dnode_t *dn;
968b0c42cd4Sbzzz 	int err;
969fa9e4066Sahrens 
970b0c42cd4Sbzzz 	err = dnode_hold(os, object, FTAG, &dn);
971b0c42cd4Sbzzz 	if (err != 0)
972b0c42cd4Sbzzz 		return (err);
97313506d1eSmaybee 
974b0c42cd4Sbzzz 	err = dmu_read_impl(dn, offset, size, buf, flags);
975b0c42cd4Sbzzz 	dnode_rele(dn, FTAG);
976b0c42cd4Sbzzz 	return (err);
977b0c42cd4Sbzzz }
978b0c42cd4Sbzzz 
979b0c42cd4Sbzzz int
980b0c42cd4Sbzzz dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
981b0c42cd4Sbzzz     uint32_t flags)
982b0c42cd4Sbzzz {
983b0c42cd4Sbzzz 	return (dmu_read_impl(dn, offset, size, buf, flags));
984b0c42cd4Sbzzz }
985b0c42cd4Sbzzz 
986b0c42cd4Sbzzz static void
987b0c42cd4Sbzzz dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
988b0c42cd4Sbzzz     const void *buf, dmu_tx_t *tx)
989b0c42cd4Sbzzz {
990b0c42cd4Sbzzz 	int i;
991fa9e4066Sahrens 
992fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
993fa9e4066Sahrens 		int tocpy;
994fa9e4066Sahrens 		int bufoff;
995fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
996fa9e4066Sahrens 
997fa9e4066Sahrens 		ASSERT(size > 0);
998fa9e4066Sahrens 
999fa9e4066Sahrens 		bufoff = offset - db->db_offset;
1000fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
1001fa9e4066Sahrens 
1002fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1003fa9e4066Sahrens 
1004fa9e4066Sahrens 		if (tocpy == db->db_size)
1005fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
1006fa9e4066Sahrens 		else
1007fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
1008fa9e4066Sahrens 
1009fa9e4066Sahrens 		bcopy(buf, (char *)db->db_data + bufoff, tocpy);
1010fa9e4066Sahrens 
1011fa9e4066Sahrens 		if (tocpy == db->db_size)
1012fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
1013fa9e4066Sahrens 
1014fa9e4066Sahrens 		offset += tocpy;
1015fa9e4066Sahrens 		size -= tocpy;
1016fa9e4066Sahrens 		buf = (char *)buf + tocpy;
1017fa9e4066Sahrens 	}
1018b0c42cd4Sbzzz }
1019b0c42cd4Sbzzz 
1020b0c42cd4Sbzzz void
1021b0c42cd4Sbzzz dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1022b0c42cd4Sbzzz     const void *buf, dmu_tx_t *tx)
1023b0c42cd4Sbzzz {
1024b0c42cd4Sbzzz 	dmu_buf_t **dbp;
1025b0c42cd4Sbzzz 	int numbufs;
1026b0c42cd4Sbzzz 
1027b0c42cd4Sbzzz 	if (size == 0)
1028b0c42cd4Sbzzz 		return;
1029b0c42cd4Sbzzz 
1030b0c42cd4Sbzzz 	VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1031b0c42cd4Sbzzz 	    FALSE, FTAG, &numbufs, &dbp));
1032b0c42cd4Sbzzz 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1033b0c42cd4Sbzzz 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1034b0c42cd4Sbzzz }
1035b0c42cd4Sbzzz 
1036b0c42cd4Sbzzz void
1037b0c42cd4Sbzzz dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1038b0c42cd4Sbzzz     const void *buf, dmu_tx_t *tx)
1039b0c42cd4Sbzzz {
1040b0c42cd4Sbzzz 	dmu_buf_t **dbp;
1041b0c42cd4Sbzzz 	int numbufs;
1042b0c42cd4Sbzzz 
1043b0c42cd4Sbzzz 	if (size == 0)
1044b0c42cd4Sbzzz 		return;
1045b0c42cd4Sbzzz 
1046b0c42cd4Sbzzz 	VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1047b0c42cd4Sbzzz 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1048b0c42cd4Sbzzz 	dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1049ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1050fa9e4066Sahrens }
1051fa9e4066Sahrens 
10525cabbc6bSPrashanth Sreenivasa static int
10535cabbc6bSPrashanth Sreenivasa dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
10545cabbc6bSPrashanth Sreenivasa     uint64_t last_removal_txg, uint64_t offset)
10555cabbc6bSPrashanth Sreenivasa {
10565cabbc6bSPrashanth Sreenivasa 	uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
10575cabbc6bSPrashanth Sreenivasa 	int err = 0;
10585cabbc6bSPrashanth Sreenivasa 
10595cabbc6bSPrashanth Sreenivasa 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
10605cabbc6bSPrashanth Sreenivasa 	dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
10615cabbc6bSPrashanth Sreenivasa 	ASSERT3P(dbuf, !=, NULL);
10625cabbc6bSPrashanth Sreenivasa 
10635cabbc6bSPrashanth Sreenivasa 	/*
10645cabbc6bSPrashanth Sreenivasa 	 * If the block hasn't been written yet, this default will ensure
10655cabbc6bSPrashanth Sreenivasa 	 * we don't try to remap it.
10665cabbc6bSPrashanth Sreenivasa 	 */
10675cabbc6bSPrashanth Sreenivasa 	uint64_t birth = UINT64_MAX;
10685cabbc6bSPrashanth Sreenivasa 	ASSERT3U(last_removal_txg, !=, UINT64_MAX);
10695cabbc6bSPrashanth Sreenivasa 	if (dbuf->db_blkptr != NULL)
10705cabbc6bSPrashanth Sreenivasa 		birth = dbuf->db_blkptr->blk_birth;
10715cabbc6bSPrashanth Sreenivasa 	rw_exit(&dn->dn_struct_rwlock);
10725cabbc6bSPrashanth Sreenivasa 
10735cabbc6bSPrashanth Sreenivasa 	/*
10745cabbc6bSPrashanth Sreenivasa 	 * If this L1 was already written after the last removal, then we've
10755cabbc6bSPrashanth Sreenivasa 	 * already tried to remap it.
10765cabbc6bSPrashanth Sreenivasa 	 */
10775cabbc6bSPrashanth Sreenivasa 	if (birth <= last_removal_txg &&
10785cabbc6bSPrashanth Sreenivasa 	    dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
10795cabbc6bSPrashanth Sreenivasa 	    dbuf_can_remap(dbuf)) {
10805cabbc6bSPrashanth Sreenivasa 		dmu_tx_t *tx = dmu_tx_create(os);
10815cabbc6bSPrashanth Sreenivasa 		dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
10825cabbc6bSPrashanth Sreenivasa 		err = dmu_tx_assign(tx, TXG_WAIT);
10835cabbc6bSPrashanth Sreenivasa 		if (err == 0) {
10845cabbc6bSPrashanth Sreenivasa 			(void) dbuf_dirty(dbuf, tx);
10855cabbc6bSPrashanth Sreenivasa 			dmu_tx_commit(tx);
10865cabbc6bSPrashanth Sreenivasa 		} else {
10875cabbc6bSPrashanth Sreenivasa 			dmu_tx_abort(tx);
10885cabbc6bSPrashanth Sreenivasa 		}
10895cabbc6bSPrashanth Sreenivasa 	}
10905cabbc6bSPrashanth Sreenivasa 
10915cabbc6bSPrashanth Sreenivasa 	dbuf_rele(dbuf, FTAG);
10925cabbc6bSPrashanth Sreenivasa 
10935cabbc6bSPrashanth Sreenivasa 	delay(zfs_object_remap_one_indirect_delay_ticks);
10945cabbc6bSPrashanth Sreenivasa 
10955cabbc6bSPrashanth Sreenivasa 	return (err);
10965cabbc6bSPrashanth Sreenivasa }
10975cabbc6bSPrashanth Sreenivasa 
10985cabbc6bSPrashanth Sreenivasa /*
10995cabbc6bSPrashanth Sreenivasa  * Remap all blockpointers in the object, if possible, so that they reference
11005cabbc6bSPrashanth Sreenivasa  * only concrete vdevs.
11015cabbc6bSPrashanth Sreenivasa  *
11025cabbc6bSPrashanth Sreenivasa  * To do this, iterate over the L0 blockpointers and remap any that reference
11035cabbc6bSPrashanth Sreenivasa  * an indirect vdev. Note that we only examine L0 blockpointers; since we
11045cabbc6bSPrashanth Sreenivasa  * cannot guarantee that we can remap all blockpointer anyways (due to split
11055cabbc6bSPrashanth Sreenivasa  * blocks), we do not want to make the code unnecessarily complicated to
11065cabbc6bSPrashanth Sreenivasa  * catch the unlikely case that there is an L1 block on an indirect vdev that
11075cabbc6bSPrashanth Sreenivasa  * contains no indirect blockpointers.
11085cabbc6bSPrashanth Sreenivasa  */
11095cabbc6bSPrashanth Sreenivasa int
11105cabbc6bSPrashanth Sreenivasa dmu_object_remap_indirects(objset_t *os, uint64_t object,
11115cabbc6bSPrashanth Sreenivasa     uint64_t last_removal_txg)
11125cabbc6bSPrashanth Sreenivasa {
11135cabbc6bSPrashanth Sreenivasa 	uint64_t offset, l1span;
11145cabbc6bSPrashanth Sreenivasa 	int err;
11155cabbc6bSPrashanth Sreenivasa 	dnode_t *dn;
11165cabbc6bSPrashanth Sreenivasa 
11175cabbc6bSPrashanth Sreenivasa 	err = dnode_hold(os, object, FTAG, &dn);
11185cabbc6bSPrashanth Sreenivasa 	if (err != 0) {
11195cabbc6bSPrashanth Sreenivasa 		return (err);
11205cabbc6bSPrashanth Sreenivasa 	}
11215cabbc6bSPrashanth Sreenivasa 
11225cabbc6bSPrashanth Sreenivasa 	if (dn->dn_nlevels <= 1) {
11235cabbc6bSPrashanth Sreenivasa 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
11245cabbc6bSPrashanth Sreenivasa 			err = SET_ERROR(EINTR);
11255cabbc6bSPrashanth Sreenivasa 		}
11265cabbc6bSPrashanth Sreenivasa 
11275cabbc6bSPrashanth Sreenivasa 		/*
11285cabbc6bSPrashanth Sreenivasa 		 * If the dnode has no indirect blocks, we cannot dirty them.
11295cabbc6bSPrashanth Sreenivasa 		 * We still want to remap the blkptr(s) in the dnode if
11305cabbc6bSPrashanth Sreenivasa 		 * appropriate, so mark it as dirty.
11315cabbc6bSPrashanth Sreenivasa 		 */
11325cabbc6bSPrashanth Sreenivasa 		if (err == 0 && dnode_needs_remap(dn)) {
11335cabbc6bSPrashanth Sreenivasa 			dmu_tx_t *tx = dmu_tx_create(os);
11345cabbc6bSPrashanth Sreenivasa 			dmu_tx_hold_bonus(tx, dn->dn_object);
11355cabbc6bSPrashanth Sreenivasa 			if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) {
11365cabbc6bSPrashanth Sreenivasa 				dnode_setdirty(dn, tx);
11375cabbc6bSPrashanth Sreenivasa 				dmu_tx_commit(tx);
11385cabbc6bSPrashanth Sreenivasa 			} else {
11395cabbc6bSPrashanth Sreenivasa 				dmu_tx_abort(tx);
11405cabbc6bSPrashanth Sreenivasa 			}
11415cabbc6bSPrashanth Sreenivasa 		}
11425cabbc6bSPrashanth Sreenivasa 
11435cabbc6bSPrashanth Sreenivasa 		dnode_rele(dn, FTAG);
11445cabbc6bSPrashanth Sreenivasa 		return (err);
11455cabbc6bSPrashanth Sreenivasa 	}
11465cabbc6bSPrashanth Sreenivasa 
11475cabbc6bSPrashanth Sreenivasa 	offset = 0;
11485cabbc6bSPrashanth Sreenivasa 	l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
11495cabbc6bSPrashanth Sreenivasa 	    dn->dn_datablkshift);
11505cabbc6bSPrashanth Sreenivasa 	/*
11515cabbc6bSPrashanth Sreenivasa 	 * Find the next L1 indirect that is not a hole.
11525cabbc6bSPrashanth Sreenivasa 	 */
11535cabbc6bSPrashanth Sreenivasa 	while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
11545cabbc6bSPrashanth Sreenivasa 		if (issig(JUSTLOOKING) && issig(FORREAL)) {
11555cabbc6bSPrashanth Sreenivasa 			err = SET_ERROR(EINTR);
11565cabbc6bSPrashanth Sreenivasa 			break;
11575cabbc6bSPrashanth Sreenivasa 		}
11585cabbc6bSPrashanth Sreenivasa 		if ((err = dmu_object_remap_one_indirect(os, dn,
11595cabbc6bSPrashanth Sreenivasa 		    last_removal_txg, offset)) != 0) {
11605cabbc6bSPrashanth Sreenivasa 			break;
11615cabbc6bSPrashanth Sreenivasa 		}
11625cabbc6bSPrashanth Sreenivasa 		offset += l1span;
11635cabbc6bSPrashanth Sreenivasa 	}
11645cabbc6bSPrashanth Sreenivasa 
11655cabbc6bSPrashanth Sreenivasa 	dnode_rele(dn, FTAG);
11665cabbc6bSPrashanth Sreenivasa 	return (err);
11675cabbc6bSPrashanth Sreenivasa }
11685cabbc6bSPrashanth Sreenivasa 
116982c9918fSTim Haley void
117082c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
117182c9918fSTim Haley     dmu_tx_t *tx)
117282c9918fSTim Haley {
117382c9918fSTim Haley 	dmu_buf_t **dbp;
117482c9918fSTim Haley 	int numbufs, i;
117582c9918fSTim Haley 
117682c9918fSTim Haley 	if (size == 0)
117782c9918fSTim Haley 		return;
117882c9918fSTim Haley 
117982c9918fSTim Haley 	VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
118082c9918fSTim Haley 	    FALSE, FTAG, &numbufs, &dbp));
118182c9918fSTim Haley 
118282c9918fSTim Haley 	for (i = 0; i < numbufs; i++) {
118382c9918fSTim Haley 		dmu_buf_t *db = dbp[i];
118482c9918fSTim Haley 
118582c9918fSTim Haley 		dmu_buf_will_not_fill(db, tx);
118682c9918fSTim Haley 	}
118782c9918fSTim Haley 	dmu_buf_rele_array(dbp, numbufs, FTAG);
118882c9918fSTim Haley }
118982c9918fSTim Haley 
11905d7b4d43SMatthew Ahrens void
11915d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
11925d7b4d43SMatthew Ahrens     void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
11935d7b4d43SMatthew Ahrens     int compressed_size, int byteorder, dmu_tx_t *tx)
11945d7b4d43SMatthew Ahrens {
11955d7b4d43SMatthew Ahrens 	dmu_buf_t *db;
11965d7b4d43SMatthew Ahrens 
11975d7b4d43SMatthew Ahrens 	ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
11985d7b4d43SMatthew Ahrens 	ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
11995d7b4d43SMatthew Ahrens 	VERIFY0(dmu_buf_hold_noread(os, object, offset,
12005d7b4d43SMatthew Ahrens 	    FTAG, &db));
12015d7b4d43SMatthew Ahrens 
12025d7b4d43SMatthew Ahrens 	dmu_buf_write_embedded(db,
12035d7b4d43SMatthew Ahrens 	    data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
12045d7b4d43SMatthew Ahrens 	    uncompressed_size, compressed_size, byteorder, tx);
12055d7b4d43SMatthew Ahrens 
12065d7b4d43SMatthew Ahrens 	dmu_buf_rele(db, FTAG);
12075d7b4d43SMatthew Ahrens }
12085d7b4d43SMatthew Ahrens 
1209c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /*
1210c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * DMU support for xuio
1211c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  */
1212c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL;
1213c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1214c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1215c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk)
1216c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1217c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv;
1218c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio_t *uio = &xuio->xu_uio;
1219c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1220c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio->uio_iovcnt = nblk;
1221c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
1222c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1223c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
1224c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->cnt = nblk;
1225c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
1226c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->iovp = uio->uio_iov;
1227c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIO_XUZC_PRIV(xuio) = priv;
1228c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1229c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
1230c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
1231c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	else
1232c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
1233c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1234c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (0);
1235c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1236c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1237c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
1238c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio)
1239c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1240c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1241c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	int nblk = priv->cnt;
1242c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1243c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv->iovp, nblk * sizeof (iovec_t));
1244c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
1245c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	kmem_free(priv, sizeof (dmu_xuio_t));
1246c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1247c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (XUIO_XUZC_RW(xuio) == UIO_READ)
1248c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
1249c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	else
1250c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
1251c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1252c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1253c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /*
1254c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
1255c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  * and increase priv->next by 1.
1256c242f9a0Schunli zhang - Sun Microsystems - Irvine United States  */
1257c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1258c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
1259c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1260c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	struct iovec *iov;
1261c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	uio_t *uio = &xuio->xu_uio;
1262c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1263c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	int i = priv->next++;
1264c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1265c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
12665602294fSDan Kimmel 	ASSERT(off + n <= arc_buf_lsize(abuf));
1267c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov = uio->uio_iov + i;
1268c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov->iov_base = (char *)abuf->b_data + off;
1269c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	iov->iov_len = n;
1270c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs[i] = abuf;
1271c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (0);
1272c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1273c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1274c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int
1275c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio)
1276c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1277c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1278c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (priv->cnt);
1279c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1280c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1281c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *
1282c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i)
1283c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1284c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1285c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1286c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
1287c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	return (priv->bufs[i]);
1288c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1289c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1290c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
1291c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i)
1292c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1293c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
1294c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1295c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	ASSERT(i < priv->cnt);
1296c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	priv->bufs[i] = NULL;
1297c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1298c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1299c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void
1300c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void)
1301c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1302c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
1303c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	    KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
1304c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	    KSTAT_FLAG_VIRTUAL);
1305c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (xuio_ksp != NULL) {
1306c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio_ksp->ks_data = &xuio_stats;
1307c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		kstat_install(xuio_ksp);
1308c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	}
1309c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1310c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1311c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void
1312c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void)
1313c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1314c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (xuio_ksp != NULL) {
1315c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		kstat_delete(xuio_ksp);
1316c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio_ksp = NULL;
1317c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	}
1318c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1319c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1320c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
132199aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void)
1322c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1323c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1324c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1325c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1326c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void
132799aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void)
1328c242f9a0Schunli zhang - Sun Microsystems - Irvine United States {
1329c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
1330c242f9a0Schunli zhang - Sun Microsystems - Irvine United States }
1331c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1332fa9e4066Sahrens #ifdef _KERNEL
13338dfe5547SRichard Yao int
1334f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
1335feb08c6bSbillm {
1336feb08c6bSbillm 	dmu_buf_t **dbp;
1337feb08c6bSbillm 	int numbufs, i, err;
1338c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_t *xuio = NULL;
1339feb08c6bSbillm 
1340feb08c6bSbillm 	/*
1341feb08c6bSbillm 	 * NB: we could do this block-at-a-time, but it's nice
1342feb08c6bSbillm 	 * to be reading in parallel.
1343feb08c6bSbillm 	 */
1344f8554bb9SMatthew Ahrens 	err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1345f8554bb9SMatthew Ahrens 	    TRUE, FTAG, &numbufs, &dbp, 0);
1346feb08c6bSbillm 	if (err)
1347feb08c6bSbillm 		return (err);
1348feb08c6bSbillm 
1349c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	if (uio->uio_extflg == UIO_XUIO)
1350c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		xuio = (xuio_t *)uio;
1351c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1352feb08c6bSbillm 	for (i = 0; i < numbufs; i++) {
1353feb08c6bSbillm 		int tocpy;
1354feb08c6bSbillm 		int bufoff;
1355feb08c6bSbillm 		dmu_buf_t *db = dbp[i];
1356feb08c6bSbillm 
1357feb08c6bSbillm 		ASSERT(size > 0);
1358feb08c6bSbillm 
1359feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
1360feb08c6bSbillm 		tocpy = (int)MIN(db->db_size - bufoff, size);
1361feb08c6bSbillm 
1362c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		if (xuio) {
1363c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
1364c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			arc_buf_t *dbuf_abuf = dbi->db_buf;
1365c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1366c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1367c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			if (!err) {
1368c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				uio->uio_resid -= tocpy;
1369c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				uio->uio_loffset += tocpy;
1370c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			}
1371c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 
1372c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			if (abuf == dbuf_abuf)
1373c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1374c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			else
1375c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 				XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1376c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		} else {
1377c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			err = uiomove((char *)db->db_data + bufoff, tocpy,
1378c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 			    UIO_READ, uio);
1379c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		}
1380feb08c6bSbillm 		if (err)
1381feb08c6bSbillm 			break;
1382feb08c6bSbillm 
1383feb08c6bSbillm 		size -= tocpy;
1384feb08c6bSbillm 	}
1385feb08c6bSbillm 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1386feb08c6bSbillm 
1387feb08c6bSbillm 	return (err);
1388feb08c6bSbillm }
1389feb08c6bSbillm 
1390f8554bb9SMatthew Ahrens /*
1391f8554bb9SMatthew Ahrens  * Read 'size' bytes into the uio buffer.
1392f8554bb9SMatthew Ahrens  * From object zdb->db_object.
1393f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1394f8554bb9SMatthew Ahrens  *
1395f8554bb9SMatthew Ahrens  * If the caller already has a dbuf in the target object
1396f8554bb9SMatthew Ahrens  * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1397f8554bb9SMatthew Ahrens  * because we don't have to find the dnode_t for the object.
1398f8554bb9SMatthew Ahrens  */
1399f8554bb9SMatthew Ahrens int
1400f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
1401f8554bb9SMatthew Ahrens {
1402f8554bb9SMatthew Ahrens 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1403f8554bb9SMatthew Ahrens 	dnode_t *dn;
1404f8554bb9SMatthew Ahrens 	int err;
1405f8554bb9SMatthew Ahrens 
1406f8554bb9SMatthew Ahrens 	if (size == 0)
1407f8554bb9SMatthew Ahrens 		return (0);
1408f8554bb9SMatthew Ahrens 
1409f8554bb9SMatthew Ahrens 	DB_DNODE_ENTER(db);
1410f8554bb9SMatthew Ahrens 	dn = DB_DNODE(db);
1411f8554bb9SMatthew Ahrens 	err = dmu_read_uio_dnode(dn, uio, size);
1412f8554bb9SMatthew Ahrens 	DB_DNODE_EXIT(db);
1413f8554bb9SMatthew Ahrens 
1414f8554bb9SMatthew Ahrens 	return (err);
1415f8554bb9SMatthew Ahrens }
1416f8554bb9SMatthew Ahrens 
1417f8554bb9SMatthew Ahrens /*
1418f8554bb9SMatthew Ahrens  * Read 'size' bytes into the uio buffer.
1419f8554bb9SMatthew Ahrens  * From the specified object
1420f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1421f8554bb9SMatthew Ahrens  */
1422f8554bb9SMatthew Ahrens int
1423f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
1424f8554bb9SMatthew Ahrens {
1425f8554bb9SMatthew Ahrens 	dnode_t *dn;
1426f8554bb9SMatthew Ahrens 	int err;
1427f8554bb9SMatthew Ahrens 
1428f8554bb9SMatthew Ahrens 	if (size == 0)
1429f8554bb9SMatthew Ahrens 		return (0);
1430f8554bb9SMatthew Ahrens 
1431f8554bb9SMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
1432f8554bb9SMatthew Ahrens 	if (err)
1433f8554bb9SMatthew Ahrens 		return (err);
1434f8554bb9SMatthew Ahrens 
1435f8554bb9SMatthew Ahrens 	err = dmu_read_uio_dnode(dn, uio, size);
1436f8554bb9SMatthew Ahrens 
1437f8554bb9SMatthew Ahrens 	dnode_rele(dn, FTAG);
1438f8554bb9SMatthew Ahrens 
1439f8554bb9SMatthew Ahrens 	return (err);
1440f8554bb9SMatthew Ahrens }
1441f8554bb9SMatthew Ahrens 
14428dfe5547SRichard Yao int
144394d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1444fa9e4066Sahrens {
1445fa9e4066Sahrens 	dmu_buf_t **dbp;
144694d1a210STim Haley 	int numbufs;
1447fa9e4066Sahrens 	int err = 0;
144894d1a210STim Haley 	int i;
1449fa9e4066Sahrens 
145094d1a210STim Haley 	err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
145194d1a210STim Haley 	    FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1452ea8dc4b6Seschrock 	if (err)
1453ea8dc4b6Seschrock 		return (err);
1454fa9e4066Sahrens 
1455fa9e4066Sahrens 	for (i = 0; i < numbufs; i++) {
1456fa9e4066Sahrens 		int tocpy;
1457fa9e4066Sahrens 		int bufoff;
1458fa9e4066Sahrens 		dmu_buf_t *db = dbp[i];
1459fa9e4066Sahrens 
1460fa9e4066Sahrens 		ASSERT(size > 0);
1461fa9e4066Sahrens 
1462feb08c6bSbillm 		bufoff = uio->uio_loffset - db->db_offset;
1463fa9e4066Sahrens 		tocpy = (int)MIN(db->db_size - bufoff, size);
1464fa9e4066Sahrens 
1465fa9e4066Sahrens 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1466fa9e4066Sahrens 
1467fa9e4066Sahrens 		if (tocpy == db->db_size)
1468fa9e4066Sahrens 			dmu_buf_will_fill(db, tx);
1469fa9e4066Sahrens 		else
1470fa9e4066Sahrens 			dmu_buf_will_dirty(db, tx);
1471fa9e4066Sahrens 
1472fa9e4066Sahrens 		/*
1473fa9e4066Sahrens 		 * XXX uiomove could block forever (eg. nfs-backed
1474fa9e4066Sahrens 		 * pages).  There needs to be a uiolockdown() function
1475fa9e4066Sahrens 		 * to lock the pages in memory, so that uiomove won't
1476fa9e4066Sahrens 		 * block.
1477fa9e4066Sahrens 		 */
1478fa9e4066Sahrens 		err = uiomove((char *)db->db_data + bufoff, tocpy,
1479fa9e4066Sahrens 		    UIO_WRITE, uio);
1480fa9e4066Sahrens 
1481fa9e4066Sahrens 		if (tocpy == db->db_size)
1482fa9e4066Sahrens 			dmu_buf_fill_done(db, tx);
1483fa9e4066Sahrens 
1484fa9e4066Sahrens 		if (err)
1485fa9e4066Sahrens 			break;
1486fa9e4066Sahrens 
1487fa9e4066Sahrens 		size -= tocpy;
1488fa9e4066Sahrens 	}
148994d1a210STim Haley 
1490ea8dc4b6Seschrock 	dmu_buf_rele_array(dbp, numbufs, FTAG);
1491fa9e4066Sahrens 	return (err);
1492fa9e4066Sahrens }
149344eda4d7Smaybee 
1494f8554bb9SMatthew Ahrens /*
1495f8554bb9SMatthew Ahrens  * Write 'size' bytes from the uio buffer.
1496f8554bb9SMatthew Ahrens  * To object zdb->db_object.
1497f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1498f8554bb9SMatthew Ahrens  *
1499f8554bb9SMatthew Ahrens  * If the caller already has a dbuf in the target object
1500f8554bb9SMatthew Ahrens  * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1501f8554bb9SMatthew Ahrens  * because we don't have to find the dnode_t for the object.
1502f8554bb9SMatthew Ahrens  */
150394d1a210STim Haley int
150494d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
150594d1a210STim Haley     dmu_tx_t *tx)
150694d1a210STim Haley {
1507744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1508744947dcSTom Erickson 	dnode_t *dn;
1509744947dcSTom Erickson 	int err;
1510744947dcSTom Erickson 
151194d1a210STim Haley 	if (size == 0)
151294d1a210STim Haley 		return (0);
151394d1a210STim Haley 
1514744947dcSTom Erickson 	DB_DNODE_ENTER(db);
1515744947dcSTom Erickson 	dn = DB_DNODE(db);
1516744947dcSTom Erickson 	err = dmu_write_uio_dnode(dn, uio, size, tx);
1517744947dcSTom Erickson 	DB_DNODE_EXIT(db);
1518744947dcSTom Erickson 
1519744947dcSTom Erickson 	return (err);
152094d1a210STim Haley }
152194d1a210STim Haley 
1522f8554bb9SMatthew Ahrens /*
1523f8554bb9SMatthew Ahrens  * Write 'size' bytes from the uio buffer.
1524f8554bb9SMatthew Ahrens  * To the specified object.
1525f8554bb9SMatthew Ahrens  * Starting at offset uio->uio_loffset.
1526f8554bb9SMatthew Ahrens  */
152794d1a210STim Haley int
152894d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
152994d1a210STim Haley     dmu_tx_t *tx)
153094d1a210STim Haley {
153194d1a210STim Haley 	dnode_t *dn;
153294d1a210STim Haley 	int err;
153394d1a210STim Haley 
153494d1a210STim Haley 	if (size == 0)
153594d1a210STim Haley 		return (0);
153694d1a210STim Haley 
153794d1a210STim Haley 	err = dnode_hold(os, object, FTAG, &dn);
153894d1a210STim Haley 	if (err)
153994d1a210STim Haley 		return (err);
154094d1a210STim Haley 
154194d1a210STim Haley 	err = dmu_write_uio_dnode(dn, uio, size, tx);
154294d1a210STim Haley 
154394d1a210STim Haley 	dnode_rele(dn, FTAG);
154494d1a210STim Haley 
154594d1a210STim Haley 	return (err);
154694d1a210STim Haley }
154794d1a210STim Haley 
154844eda4d7Smaybee int
154944eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
155044eda4d7Smaybee     page_t *pp, dmu_tx_t *tx)
155144eda4d7Smaybee {
155244eda4d7Smaybee 	dmu_buf_t **dbp;
155344eda4d7Smaybee 	int numbufs, i;
155444eda4d7Smaybee 	int err;
155544eda4d7Smaybee 
155644eda4d7Smaybee 	if (size == 0)
155744eda4d7Smaybee 		return (0);
155844eda4d7Smaybee 
155944eda4d7Smaybee 	err = dmu_buf_hold_array(os, object, offset, size,
156044eda4d7Smaybee 	    FALSE, FTAG, &numbufs, &dbp);
156144eda4d7Smaybee 	if (err)
156244eda4d7Smaybee 		return (err);
156344eda4d7Smaybee 
156444eda4d7Smaybee 	for (i = 0; i < numbufs; i++) {
156544eda4d7Smaybee 		int tocpy, copied, thiscpy;
156644eda4d7Smaybee 		int bufoff;
156744eda4d7Smaybee 		dmu_buf_t *db = dbp[i];
156844eda4d7Smaybee 		caddr_t va;
156944eda4d7Smaybee 
157044eda4d7Smaybee 		ASSERT(size > 0);
157144eda4d7Smaybee 		ASSERT3U(db->db_size, >=, PAGESIZE);
157244eda4d7Smaybee 
157344eda4d7Smaybee 		bufoff = offset - db->db_offset;
157444eda4d7Smaybee 		tocpy = (int)MIN(db->db_size - bufoff, size);
157544eda4d7Smaybee 
157644eda4d7Smaybee 		ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
157744eda4d7Smaybee 
157844eda4d7Smaybee 		if (tocpy == db->db_size)
157944eda4d7Smaybee 			dmu_buf_will_fill(db, tx);
158044eda4d7Smaybee 		else
158144eda4d7Smaybee 			dmu_buf_will_dirty(db, tx);
158244eda4d7Smaybee 
158344eda4d7Smaybee 		for (copied = 0; copied < tocpy; copied += PAGESIZE) {
158444eda4d7Smaybee 			ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
158544eda4d7Smaybee 			thiscpy = MIN(PAGESIZE, tocpy - copied);
15860fab61baSJonathan W Adams 			va = zfs_map_page(pp, S_READ);
158744eda4d7Smaybee 			bcopy(va, (char *)db->db_data + bufoff, thiscpy);
15880fab61baSJonathan W Adams 			zfs_unmap_page(pp, va);
158944eda4d7Smaybee 			pp = pp->p_next;
159044eda4d7Smaybee 			bufoff += PAGESIZE;
159144eda4d7Smaybee 		}
159244eda4d7Smaybee 
159344eda4d7Smaybee 		if (tocpy == db->db_size)
159444eda4d7Smaybee 			dmu_buf_fill_done(db, tx);
159544eda4d7Smaybee 
159644eda4d7Smaybee 		offset += tocpy;
159744eda4d7Smaybee 		size -= tocpy;
159844eda4d7Smaybee 	}
159944eda4d7Smaybee 	dmu_buf_rele_array(dbp, numbufs, FTAG);
160044eda4d7Smaybee 	return (err);
160144eda4d7Smaybee }
1602fa9e4066Sahrens #endif
1603fa9e4066Sahrens 
16042fdbea25SAleksandr Guzovskiy /*
16052fdbea25SAleksandr Guzovskiy  * Allocate a loaned anonymous arc buffer.
16062fdbea25SAleksandr Guzovskiy  */
16072fdbea25SAleksandr Guzovskiy arc_buf_t *
16082fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size)
16092fdbea25SAleksandr Guzovskiy {
1610744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
16112fdbea25SAleksandr Guzovskiy 
16125602294fSDan Kimmel 	return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
16132fdbea25SAleksandr Guzovskiy }
16142fdbea25SAleksandr Guzovskiy 
16152fdbea25SAleksandr Guzovskiy /*
16162fdbea25SAleksandr Guzovskiy  * Free a loaned arc buffer.
16172fdbea25SAleksandr Guzovskiy  */
16182fdbea25SAleksandr Guzovskiy void
16192fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf)
16202fdbea25SAleksandr Guzovskiy {
16212fdbea25SAleksandr Guzovskiy 	arc_return_buf(buf, FTAG);
1622dcbf3bd6SGeorge Wilson 	arc_buf_destroy(buf, FTAG);
16232fdbea25SAleksandr Guzovskiy }
16242fdbea25SAleksandr Guzovskiy 
16252fdbea25SAleksandr Guzovskiy /*
16262fdbea25SAleksandr Guzovskiy  * When possible directly assign passed loaned arc buffer to a dbuf.
16272fdbea25SAleksandr Guzovskiy  * If this is not possible copy the contents of passed arc buf via
16282fdbea25SAleksandr Guzovskiy  * dmu_write().
16292fdbea25SAleksandr Guzovskiy  */
16302fdbea25SAleksandr Guzovskiy void
16318dfe5547SRichard Yao dmu_assign_arcbuf_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
16322fdbea25SAleksandr Guzovskiy     dmu_tx_t *tx)
16332fdbea25SAleksandr Guzovskiy {
16342fdbea25SAleksandr Guzovskiy 	dmu_buf_impl_t *db;
16355602294fSDan Kimmel 	uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
16362fdbea25SAleksandr Guzovskiy 	uint64_t blkid;
16372fdbea25SAleksandr Guzovskiy 
16382fdbea25SAleksandr Guzovskiy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1639a2cdcdd2SPaul Dagnelie 	blkid = dbuf_whichblock(dn, 0, offset);
16402fdbea25SAleksandr Guzovskiy 	VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
16412fdbea25SAleksandr Guzovskiy 	rw_exit(&dn->dn_struct_rwlock);
16422fdbea25SAleksandr Guzovskiy 
16438a904709SMatthew Ahrens 	/*
16448a904709SMatthew Ahrens 	 * We can only assign if the offset is aligned, the arc buf is the
16455602294fSDan Kimmel 	 * same size as the dbuf, and the dbuf is not metadata.
16468a904709SMatthew Ahrens 	 */
16475602294fSDan Kimmel 	if (offset == db->db.db_offset && blksz == db->db.db_size) {
16482fdbea25SAleksandr Guzovskiy 		dbuf_assign_arcbuf(db, buf, tx);
16492fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
16502fdbea25SAleksandr Guzovskiy 	} else {
1651744947dcSTom Erickson 		objset_t *os;
1652744947dcSTom Erickson 		uint64_t object;
1653744947dcSTom Erickson 
16545602294fSDan Kimmel 		/* compressed bufs must always be assignable to their dbuf */
16555602294fSDan Kimmel 		ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
16565602294fSDan Kimmel 		ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
16575602294fSDan Kimmel 
1658744947dcSTom Erickson 		os = dn->dn_objset;
1659744947dcSTom Erickson 		object = dn->dn_object;
1660744947dcSTom Erickson 
16612fdbea25SAleksandr Guzovskiy 		dbuf_rele(db, FTAG);
1662744947dcSTom Erickson 		dmu_write(os, object, offset, blksz, buf->b_data, tx);
16632fdbea25SAleksandr Guzovskiy 		dmu_return_arcbuf(buf);
1664c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 		XUIOSTAT_BUMP(xuiostat_wbuf_copied);
16652fdbea25SAleksandr Guzovskiy 	}
16662fdbea25SAleksandr Guzovskiy }
16672fdbea25SAleksandr Guzovskiy 
16688dfe5547SRichard Yao void
16698dfe5547SRichard Yao dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
16708dfe5547SRichard Yao     dmu_tx_t *tx)
16718dfe5547SRichard Yao {
16728dfe5547SRichard Yao 	dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
16738dfe5547SRichard Yao 
16748dfe5547SRichard Yao 	DB_DNODE_ENTER(dbuf);
16758dfe5547SRichard Yao 	dmu_assign_arcbuf_dnode(DB_DNODE(dbuf), offset, buf, tx);
16768dfe5547SRichard Yao 	DB_DNODE_EXIT(dbuf);
16778dfe5547SRichard Yao }
16788dfe5547SRichard Yao 
1679c5c6ffa0Smaybee typedef struct {
1680b24ab676SJeff Bonwick 	dbuf_dirty_record_t	*dsa_dr;
1681b24ab676SJeff Bonwick 	dmu_sync_cb_t		*dsa_done;
1682b24ab676SJeff Bonwick 	zgd_t			*dsa_zgd;
1683b24ab676SJeff Bonwick 	dmu_tx_t		*dsa_tx;
1684c717a561Smaybee } dmu_sync_arg_t;
1685c5c6ffa0Smaybee 
1686e14bb325SJeff Bonwick /* ARGSUSED */
1687e14bb325SJeff Bonwick static void
1688e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1689e14bb325SJeff Bonwick {
1690b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = varg;
1691b24ab676SJeff Bonwick 	dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1692e14bb325SJeff Bonwick 	blkptr_t *bp = zio->io_bp;
1693975c32a0SNeil Perrin 
1694b24ab676SJeff Bonwick 	if (zio->io_error == 0) {
1695b24ab676SJeff Bonwick 		if (BP_IS_HOLE(bp)) {
1696b24ab676SJeff Bonwick 			/*
1697b24ab676SJeff Bonwick 			 * A block of zeros may compress to a hole, but the
1698b24ab676SJeff Bonwick 			 * block size still needs to be known for replay.
1699b24ab676SJeff Bonwick 			 */
1700b24ab676SJeff Bonwick 			BP_SET_LSIZE(bp, db->db_size);
17015d7b4d43SMatthew Ahrens 		} else if (!BP_IS_EMBEDDED(bp)) {
1702b24ab676SJeff Bonwick 			ASSERT(BP_GET_LEVEL(bp) == 0);
1703b24ab676SJeff Bonwick 			bp->blk_fill = 1;
1704b24ab676SJeff Bonwick 		}
1705e14bb325SJeff Bonwick 	}
1706e14bb325SJeff Bonwick }
1707e14bb325SJeff Bonwick 
1708b24ab676SJeff Bonwick static void
1709b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio)
1710b24ab676SJeff Bonwick {
1711b24ab676SJeff Bonwick 	dmu_sync_ready(zio, NULL, zio->io_private);
1712b24ab676SJeff Bonwick }
1713b24ab676SJeff Bonwick 
1714c5c6ffa0Smaybee /* ARGSUSED */
1715c5c6ffa0Smaybee static void
1716c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1717c5c6ffa0Smaybee {
1718b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = varg;
1719b24ab676SJeff Bonwick 	dbuf_dirty_record_t *dr = dsa->dsa_dr;
1720c717a561Smaybee 	dmu_buf_impl_t *db = dr->dr_dbuf;
1721cab3a55eSPrakash Surya 	zgd_t *zgd = dsa->dsa_zgd;
1722cab3a55eSPrakash Surya 
1723cab3a55eSPrakash Surya 	/*
1724cab3a55eSPrakash Surya 	 * Record the vdev(s) backing this blkptr so they can be flushed after
1725cab3a55eSPrakash Surya 	 * the writes for the lwb have completed.
1726cab3a55eSPrakash Surya 	 */
1727cab3a55eSPrakash Surya 	if (zio->io_error == 0) {
1728cab3a55eSPrakash Surya 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1729cab3a55eSPrakash Surya 	}
1730c5c6ffa0Smaybee 
1731b50a0fe0SNeil Perrin 	mutex_enter(&db->db_mtx);
1732b50a0fe0SNeil Perrin 	ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1733b24ab676SJeff Bonwick 	if (zio->io_error == 0) {
173480901aeaSGeorge Wilson 		dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
173580901aeaSGeorge Wilson 		if (dr->dt.dl.dr_nopwrite) {
173680901aeaSGeorge Wilson 			blkptr_t *bp = zio->io_bp;
173780901aeaSGeorge Wilson 			blkptr_t *bp_orig = &zio->io_bp_orig;
173880901aeaSGeorge Wilson 			uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
173980901aeaSGeorge Wilson 
174080901aeaSGeorge Wilson 			ASSERT(BP_EQUAL(bp, bp_orig));
1741b7edcb94SMatthew Ahrens 			VERIFY(BP_EQUAL(bp, db->db_blkptr));
174280901aeaSGeorge Wilson 			ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
174345818ee1SMatthew Ahrens 			ASSERT(zio_checksum_table[chksum].ci_flags &
174445818ee1SMatthew Ahrens 			    ZCHECKSUM_FLAG_NOPWRITE);
174580901aeaSGeorge Wilson 		}
1746b24ab676SJeff Bonwick 		dr->dt.dl.dr_overridden_by = *zio->io_bp;
1747b24ab676SJeff Bonwick 		dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1748b24ab676SJeff Bonwick 		dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
174970163ac5SPrakash Surya 
175070163ac5SPrakash Surya 		/*
175170163ac5SPrakash Surya 		 * Old style holes are filled with all zeros, whereas
175270163ac5SPrakash Surya 		 * new-style holes maintain their lsize, type, level,
175370163ac5SPrakash Surya 		 * and birth time (see zio_write_compress). While we
175470163ac5SPrakash Surya 		 * need to reset the BP_SET_LSIZE() call that happened
175570163ac5SPrakash Surya 		 * in dmu_sync_ready for old style holes, we do *not*
175670163ac5SPrakash Surya 		 * want to wipe out the information contained in new
175770163ac5SPrakash Surya 		 * style holes. Thus, only zero out the block pointer if
175870163ac5SPrakash Surya 		 * it's an old style hole.
175970163ac5SPrakash Surya 		 */
176070163ac5SPrakash Surya 		if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
176170163ac5SPrakash Surya 		    dr->dt.dl.dr_overridden_by.blk_birth == 0)
1762b24ab676SJeff Bonwick 			BP_ZERO(&dr->dt.dl.dr_overridden_by);
1763b24ab676SJeff Bonwick 	} else {
1764b24ab676SJeff Bonwick 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1765b24ab676SJeff Bonwick 	}
1766c5c6ffa0Smaybee 	cv_broadcast(&db->db_changed);
1767b50a0fe0SNeil Perrin 	mutex_exit(&db->db_mtx);
1768b50a0fe0SNeil Perrin 
1769b24ab676SJeff Bonwick 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1770b24ab676SJeff Bonwick 
1771b24ab676SJeff Bonwick 	kmem_free(dsa, sizeof (*dsa));
1772b24ab676SJeff Bonwick }
1773b24ab676SJeff Bonwick 
1774b24ab676SJeff Bonwick static void
1775b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio)
1776b24ab676SJeff Bonwick {
1777b24ab676SJeff Bonwick 	blkptr_t *bp = zio->io_bp;
1778b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa = zio->io_private;
177980901aeaSGeorge Wilson 	blkptr_t *bp_orig = &zio->io_bp_orig;
1780cab3a55eSPrakash Surya 	zgd_t *zgd = dsa->dsa_zgd;
1781b24ab676SJeff Bonwick 
1782cab3a55eSPrakash Surya 	if (zio->io_error == 0) {
1783cab3a55eSPrakash Surya 		/*
1784cab3a55eSPrakash Surya 		 * Record the vdev(s) backing this blkptr so they can be
1785cab3a55eSPrakash Surya 		 * flushed after the writes for the lwb have completed.
1786cab3a55eSPrakash Surya 		 */
1787cab3a55eSPrakash Surya 		zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1788cab3a55eSPrakash Surya 
1789cab3a55eSPrakash Surya 		if (!BP_IS_HOLE(bp)) {
1790cab3a55eSPrakash Surya 			ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1791cab3a55eSPrakash Surya 			ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1792cab3a55eSPrakash Surya 			ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1793cab3a55eSPrakash Surya 			ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1794cab3a55eSPrakash Surya 			zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1795cab3a55eSPrakash Surya 		}
1796b24ab676SJeff Bonwick 	}
1797b24ab676SJeff Bonwick 
1798b24ab676SJeff Bonwick 	dmu_tx_commit(dsa->dsa_tx);
1799b24ab676SJeff Bonwick 
1800b24ab676SJeff Bonwick 	dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1801b24ab676SJeff Bonwick 
1802770499e1SDan Kimmel 	abd_put(zio->io_abd);
1803b24ab676SJeff Bonwick 	kmem_free(dsa, sizeof (*dsa));
1804b24ab676SJeff Bonwick }
1805b24ab676SJeff Bonwick 
1806b24ab676SJeff Bonwick static int
1807b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
18087802d7bfSMatthew Ahrens     zio_prop_t *zp, zbookmark_phys_t *zb)
1809b24ab676SJeff Bonwick {
1810b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa;
1811b24ab676SJeff Bonwick 	dmu_tx_t *tx;
1812b24ab676SJeff Bonwick 
1813b24ab676SJeff Bonwick 	tx = dmu_tx_create(os);
1814b24ab676SJeff Bonwick 	dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
18156e1f5caaSNeil Perrin 	if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1816b24ab676SJeff Bonwick 		dmu_tx_abort(tx);
1817be6fd75aSMatthew Ahrens 		/* Make zl_get_data do txg_waited_synced() */
1818be6fd75aSMatthew Ahrens 		return (SET_ERROR(EIO));
1819b24ab676SJeff Bonwick 	}
1820b24ab676SJeff Bonwick 
18211271e4b1SPrakash Surya 	/*
18221271e4b1SPrakash Surya 	 * In order to prevent the zgd's lwb from being free'd prior to
18231271e4b1SPrakash Surya 	 * dmu_sync_late_arrival_done() being called, we have to ensure
18241271e4b1SPrakash Surya 	 * the lwb's "max txg" takes this tx's txg into account.
18251271e4b1SPrakash Surya 	 */
18261271e4b1SPrakash Surya 	zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
18271271e4b1SPrakash Surya 
1828b24ab676SJeff Bonwick 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1829b24ab676SJeff Bonwick 	dsa->dsa_dr = NULL;
1830b24ab676SJeff Bonwick 	dsa->dsa_done = done;
1831b24ab676SJeff Bonwick 	dsa->dsa_zgd = zgd;
1832b24ab676SJeff Bonwick 	dsa->dsa_tx = tx;
1833c717a561Smaybee 
1834b7edcb94SMatthew Ahrens 	/*
1835b7edcb94SMatthew Ahrens 	 * Since we are currently syncing this txg, it's nontrivial to
1836b7edcb94SMatthew Ahrens 	 * determine what BP to nopwrite against, so we disable nopwrite.
1837b7edcb94SMatthew Ahrens 	 *
1838b7edcb94SMatthew Ahrens 	 * When syncing, the db_blkptr is initially the BP of the previous
1839b7edcb94SMatthew Ahrens 	 * txg.  We can not nopwrite against it because it will be changed
1840b7edcb94SMatthew Ahrens 	 * (this is similar to the non-late-arrival case where the dbuf is
1841b7edcb94SMatthew Ahrens 	 * dirty in a future txg).
1842b7edcb94SMatthew Ahrens 	 *
1843b7edcb94SMatthew Ahrens 	 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1844b7edcb94SMatthew Ahrens 	 * We can not nopwrite against it because although the BP will not
1845b7edcb94SMatthew Ahrens 	 * (typically) be changed, the data has not yet been persisted to this
1846b7edcb94SMatthew Ahrens 	 * location.
1847b7edcb94SMatthew Ahrens 	 *
1848b7edcb94SMatthew Ahrens 	 * Finally, when dbuf_write_done() is called, it is theoretically
1849b7edcb94SMatthew Ahrens 	 * possible to always nopwrite, because the data that was written in
1850b7edcb94SMatthew Ahrens 	 * this txg is the same data that we are trying to write.  However we
1851b7edcb94SMatthew Ahrens 	 * would need to check that this dbuf is not dirty in any future
1852b7edcb94SMatthew Ahrens 	 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1853b7edcb94SMatthew Ahrens 	 * don't nopwrite in this case.
1854b7edcb94SMatthew Ahrens 	 */
1855b7edcb94SMatthew Ahrens 	zp->zp_nopwrite = B_FALSE;
1856b7edcb94SMatthew Ahrens 
18575602294fSDan Kimmel 	zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1858770499e1SDan Kimmel 	    abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1859770499e1SDan Kimmel 	    zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1860770499e1SDan Kimmel 	    dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1861770499e1SDan Kimmel 	    dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1862b24ab676SJeff Bonwick 
1863b24ab676SJeff Bonwick 	return (0);
1864c5c6ffa0Smaybee }
1865c5c6ffa0Smaybee 
1866fa9e4066Sahrens /*
1867c5c6ffa0Smaybee  * Intent log support: sync the block associated with db to disk.
1868c5c6ffa0Smaybee  * N.B. and XXX: the caller is responsible for making sure that the
1869c5c6ffa0Smaybee  * data isn't changing while dmu_sync() is writing it.
1870fa9e4066Sahrens  *
1871fa9e4066Sahrens  * Return values:
1872fa9e4066Sahrens  *
187380901aeaSGeorge Wilson  *	EEXIST: this txg has already been synced, so there's nothing to do.
1874fa9e4066Sahrens  *		The caller should not log the write.
1875fa9e4066Sahrens  *
1876fa9e4066Sahrens  *	ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1877fa9e4066Sahrens  *		The caller should not log the write.
1878fa9e4066Sahrens  *
1879c5c6ffa0Smaybee  *	EALREADY: this block is already in the process of being synced.
1880c5c6ffa0Smaybee  *		The caller should track its progress (somehow).
1881fa9e4066Sahrens  *
1882b24ab676SJeff Bonwick  *	EIO: could not do the I/O.
1883b24ab676SJeff Bonwick  *		The caller should do a txg_wait_synced().
1884fa9e4066Sahrens  *
1885b24ab676SJeff Bonwick  *	0: the I/O has been initiated.
1886b24ab676SJeff Bonwick  *		The caller should log this blkptr in the done callback.
1887b24ab676SJeff Bonwick  *		It is possible that the I/O will fail, in which case
1888b24ab676SJeff Bonwick  *		the error will be reported to the done callback and
1889b24ab676SJeff Bonwick  *		propagated to pio from zio_done().
1890fa9e4066Sahrens  */
1891fa9e4066Sahrens int
1892b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1893fa9e4066Sahrens {
1894b24ab676SJeff Bonwick 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1895503ad85cSMatthew Ahrens 	objset_t *os = db->db_objset;
1896b24ab676SJeff Bonwick 	dsl_dataset_t *ds = os->os_dsl_dataset;
1897c717a561Smaybee 	dbuf_dirty_record_t *dr;
1898b24ab676SJeff Bonwick 	dmu_sync_arg_t *dsa;
18997802d7bfSMatthew Ahrens 	zbookmark_phys_t zb;
1900b24ab676SJeff Bonwick 	zio_prop_t zp;
1901744947dcSTom Erickson 	dnode_t *dn;
1902fa9e4066Sahrens 
1903b24ab676SJeff Bonwick 	ASSERT(pio != NULL);
1904fa9e4066Sahrens 	ASSERT(txg != 0);
1905fa9e4066Sahrens 
1906b24ab676SJeff Bonwick 	SET_BOOKMARK(&zb, ds->ds_object,
1907b24ab676SJeff Bonwick 	    db->db.db_object, db->db_level, db->db_blkid);
1908b24ab676SJeff Bonwick 
1909744947dcSTom Erickson 	DB_DNODE_ENTER(db);
1910744947dcSTom Erickson 	dn = DB_DNODE(db);
1911adaec86aSMatthew Ahrens 	dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1912744947dcSTom Erickson 	DB_DNODE_EXIT(db);
1913fa9e4066Sahrens 
1914ea8dc4b6Seschrock 	/*
1915b24ab676SJeff Bonwick 	 * If we're frozen (running ziltest), we always need to generate a bp.
1916ea8dc4b6Seschrock 	 */
1917b24ab676SJeff Bonwick 	if (txg > spa_freeze_txg(os->os_spa))
1918b24ab676SJeff Bonwick 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1919ea8dc4b6Seschrock 
1920fa9e4066Sahrens 	/*
1921b24ab676SJeff Bonwick 	 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1922b24ab676SJeff Bonwick 	 * and us.  If we determine that this txg is not yet syncing,
1923b24ab676SJeff Bonwick 	 * but it begins to sync a moment later, that's OK because the
1924b24ab676SJeff Bonwick 	 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1925fa9e4066Sahrens 	 */
1926b24ab676SJeff Bonwick 	mutex_enter(&db->db_mtx);
1927b24ab676SJeff Bonwick 
1928b24ab676SJeff Bonwick 	if (txg <= spa_last_synced_txg(os->os_spa)) {
1929fa9e4066Sahrens 		/*
1930b24ab676SJeff Bonwick 		 * This txg has already synced.  There's nothing to do.
1931fa9e4066Sahrens 		 */
1932b24ab676SJeff Bonwick 		mutex_exit(&db->db_mtx);
1933be6fd75aSMatthew Ahrens 		return (SET_ERROR(EEXIST));
1934fa9e4066Sahrens 	}
1935fa9e4066Sahrens 
1936b24ab676SJeff Bonwick 	if (txg <= spa_syncing_txg(os->os_spa)) {
1937b24ab676SJeff Bonwick 		/*
1938b24ab676SJeff Bonwick 		 * This txg is currently syncing, so we can't mess with
1939b24ab676SJeff Bonwick 		 * the dirty record anymore; just write a new log block.
1940b24ab676SJeff Bonwick 		 */
1941b24ab676SJeff Bonwick 		mutex_exit(&db->db_mtx);
1942b24ab676SJeff Bonwick 		return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1943fa9e4066Sahrens 	}
1944fa9e4066Sahrens 
1945c717a561Smaybee 	dr = db->db_last_dirty;
1946b24ab676SJeff Bonwick 	while (dr && dr->dr_txg != txg)
1947c717a561Smaybee 		dr = dr->dr_next;
1948b24ab676SJeff Bonwick 
1949b24ab676SJeff Bonwick 	if (dr == NULL) {
1950c5c6ffa0Smaybee 		/*
1951b24ab676SJeff Bonwick 		 * There's no dr for this dbuf, so it must have been freed.
1952c5c6ffa0Smaybee 		 * There's no need to log writes to freed blocks, so we're done.
1953c5c6ffa0Smaybee 		 */
1954c5c6ffa0Smaybee 		mutex_exit(&db->db_mtx);
1955be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOENT));
1956c5c6ffa0Smaybee 	}
1957c5c6ffa0Smaybee 
195880901aeaSGeorge Wilson 	ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
195980901aeaSGeorge Wilson 
1960b7edcb94SMatthew Ahrens 	if (db->db_blkptr != NULL) {
1961b7edcb94SMatthew Ahrens 		/*
1962b7edcb94SMatthew Ahrens 		 * We need to fill in zgd_bp with the current blkptr so that
1963b7edcb94SMatthew Ahrens 		 * the nopwrite code can check if we're writing the same
1964b7edcb94SMatthew Ahrens 		 * data that's already on disk.  We can only nopwrite if we
1965b7edcb94SMatthew Ahrens 		 * are sure that after making the copy, db_blkptr will not
1966b7edcb94SMatthew Ahrens 		 * change until our i/o completes.  We ensure this by
1967b7edcb94SMatthew Ahrens 		 * holding the db_mtx, and only allowing nopwrite if the
1968b7edcb94SMatthew Ahrens 		 * block is not already dirty (see below).  This is verified
1969b7edcb94SMatthew Ahrens 		 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1970b7edcb94SMatthew Ahrens 		 * not changed.
1971b7edcb94SMatthew Ahrens 		 */
1972b7edcb94SMatthew Ahrens 		*zgd->zgd_bp = *db->db_blkptr;
1973b7edcb94SMatthew Ahrens 	}
1974b7edcb94SMatthew Ahrens 
197580901aeaSGeorge Wilson 	/*
197634e8acefSMatthew Ahrens 	 * Assume the on-disk data is X, the current syncing data (in
197734e8acefSMatthew Ahrens 	 * txg - 1) is Y, and the current in-memory data is Z (currently
197834e8acefSMatthew Ahrens 	 * in dmu_sync).
197934e8acefSMatthew Ahrens 	 *
198034e8acefSMatthew Ahrens 	 * We usually want to perform a nopwrite if X and Z are the
198134e8acefSMatthew Ahrens 	 * same.  However, if Y is different (i.e. the BP is going to
198234e8acefSMatthew Ahrens 	 * change before this write takes effect), then a nopwrite will
198334e8acefSMatthew Ahrens 	 * be incorrect - we would override with X, which could have
198434e8acefSMatthew Ahrens 	 * been freed when Y was written.
198534e8acefSMatthew Ahrens 	 *
198634e8acefSMatthew Ahrens 	 * (Note that this is not a concern when we are nop-writing from
198734e8acefSMatthew Ahrens 	 * syncing context, because X and Y must be identical, because
198834e8acefSMatthew Ahrens 	 * all previous txgs have been synced.)
198934e8acefSMatthew Ahrens 	 *
199034e8acefSMatthew Ahrens 	 * Therefore, we disable nopwrite if the current BP could change
199134e8acefSMatthew Ahrens 	 * before this TXG.  There are two ways it could change: by
199234e8acefSMatthew Ahrens 	 * being dirty (dr_next is non-NULL), or by being freed
199334e8acefSMatthew Ahrens 	 * (dnode_block_freed()).  This behavior is verified by
199434e8acefSMatthew Ahrens 	 * zio_done(), which VERIFYs that the override BP is identical
199534e8acefSMatthew Ahrens 	 * to the on-disk BP.
199680901aeaSGeorge Wilson 	 */
199734e8acefSMatthew Ahrens 	DB_DNODE_ENTER(db);
199834e8acefSMatthew Ahrens 	dn = DB_DNODE(db);
199934e8acefSMatthew Ahrens 	if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
200080901aeaSGeorge Wilson 		zp.zp_nopwrite = B_FALSE;
200134e8acefSMatthew Ahrens 	DB_DNODE_EXIT(db);
200280901aeaSGeorge Wilson 
2003c717a561Smaybee 	ASSERT(dr->dr_txg == txg);
2004b24ab676SJeff Bonwick 	if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
2005b24ab676SJeff Bonwick 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2006c717a561Smaybee 		/*
2007b24ab676SJeff Bonwick 		 * We have already issued a sync write for this buffer,
2008b24ab676SJeff Bonwick 		 * or this buffer has already been synced.  It could not
2009c717a561Smaybee 		 * have been dirtied since, or we would have cleared the state.
2010c717a561Smaybee 		 */
2011c717a561Smaybee 		mutex_exit(&db->db_mtx);
2012be6fd75aSMatthew Ahrens 		return (SET_ERROR(EALREADY));
2013c717a561Smaybee 	}
2014c717a561Smaybee 
2015b24ab676SJeff Bonwick 	ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2016c717a561Smaybee 	dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
2017fa9e4066Sahrens 	mutex_exit(&db->db_mtx);
2018e14bb325SJeff Bonwick 
2019b24ab676SJeff Bonwick 	dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
2020b24ab676SJeff Bonwick 	dsa->dsa_dr = dr;
2021b24ab676SJeff Bonwick 	dsa->dsa_done = done;
2022b24ab676SJeff Bonwick 	dsa->dsa_zgd = zgd;
2023b24ab676SJeff Bonwick 	dsa->dsa_tx = NULL;
2024e14bb325SJeff Bonwick 
2025b24ab676SJeff Bonwick 	zio_nowait(arc_write(pio, os->os_spa, txg,
2026b7edcb94SMatthew Ahrens 	    zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
2027dcbf3bd6SGeorge Wilson 	    &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
20288df0bcf0SPaul Dagnelie 	    ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
2029e14bb325SJeff Bonwick 
2030b24ab676SJeff Bonwick 	return (0);
2031fa9e4066Sahrens }
2032fa9e4066Sahrens 
2033fa9e4066Sahrens int
2034fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
20359a686fbcSPaul Dagnelie     dmu_tx_t *tx)
2036fa9e4066Sahrens {
2037ea8dc4b6Seschrock 	dnode_t *dn;
2038ea8dc4b6Seschrock 	int err;
2039ea8dc4b6Seschrock 
2040503ad85cSMatthew Ahrens 	err = dnode_hold(os, object, FTAG, &dn);
2041ea8dc4b6Seschrock 	if (err)
2042ea8dc4b6Seschrock 		return (err);
2043ea8dc4b6Seschrock 	err = dnode_set_blksz(dn, size, ibs, tx);
2044fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2045fa9e4066Sahrens 	return (err);
2046fa9e4066Sahrens }
2047fa9e4066Sahrens 
2048fa9e4066Sahrens void
2049fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
20509a686fbcSPaul Dagnelie     dmu_tx_t *tx)
2051fa9e4066Sahrens {
2052ea8dc4b6Seschrock 	dnode_t *dn;
2053ea8dc4b6Seschrock 
20545d7b4d43SMatthew Ahrens 	/*
20555d7b4d43SMatthew Ahrens 	 * Send streams include each object's checksum function.  This
20565d7b4d43SMatthew Ahrens 	 * check ensures that the receiving system can understand the
20575d7b4d43SMatthew Ahrens 	 * checksum function transmitted.
20585d7b4d43SMatthew Ahrens 	 */
20595d7b4d43SMatthew Ahrens 	ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
20605d7b4d43SMatthew Ahrens 
20615d7b4d43SMatthew Ahrens 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
20625d7b4d43SMatthew Ahrens 	ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
2063fa9e4066Sahrens 	dn->dn_checksum = checksum;
2064fa9e4066Sahrens 	dnode_setdirty(dn, tx);
2065fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2066fa9e4066Sahrens }
2067fa9e4066Sahrens 
2068fa9e4066Sahrens void
2069fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
20709a686fbcSPaul Dagnelie     dmu_tx_t *tx)
2071fa9e4066Sahrens {
2072ea8dc4b6Seschrock 	dnode_t *dn;
2073ea8dc4b6Seschrock 
20745d7b4d43SMatthew Ahrens 	/*
20755d7b4d43SMatthew Ahrens 	 * Send streams include each object's compression function.  This
20765d7b4d43SMatthew Ahrens 	 * check ensures that the receiving system can understand the
20775d7b4d43SMatthew Ahrens 	 * compression function transmitted.
20785d7b4d43SMatthew Ahrens 	 */
20795d7b4d43SMatthew Ahrens 	ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
20805d7b4d43SMatthew Ahrens 
20815d7b4d43SMatthew Ahrens 	VERIFY0(dnode_hold(os, object, FTAG, &dn));
2082fa9e4066Sahrens 	dn->dn_compress = compress;
2083fa9e4066Sahrens 	dnode_setdirty(dn, tx);
2084fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2085fa9e4066Sahrens }
2086fa9e4066Sahrens 
2087b24ab676SJeff Bonwick int zfs_mdcomp_disable = 0;
2088b24ab676SJeff Bonwick 
2089edf345e6SMatthew Ahrens /*
2090edf345e6SMatthew Ahrens  * When the "redundant_metadata" property is set to "most", only indirect
2091edf345e6SMatthew Ahrens  * blocks of this level and higher will have an additional ditto block.
2092edf345e6SMatthew Ahrens  */
2093edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2;
2094edf345e6SMatthew Ahrens 
2095b24ab676SJeff Bonwick void
2096adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2097b24ab676SJeff Bonwick {
2098b24ab676SJeff Bonwick 	dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2099ad135b5dSChristopher Siden 	boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
21001d8ccc7bSMark Shellenbaum 	    (wp & WP_SPILL));
2101b24ab676SJeff Bonwick 	enum zio_checksum checksum = os->os_checksum;
2102b24ab676SJeff Bonwick 	enum zio_compress compress = os->os_compress;
2103b24ab676SJeff Bonwick 	enum zio_checksum dedup_checksum = os->os_dedup_checksum;
21047540df39SGeorge Wilson 	boolean_t dedup = B_FALSE;
21057540df39SGeorge Wilson 	boolean_t nopwrite = B_FALSE;
2106b24ab676SJeff Bonwick 	boolean_t dedup_verify = os->os_dedup_verify;
2107b24ab676SJeff Bonwick 	int copies = os->os_copies;
2108b24ab676SJeff Bonwick 
2109b24ab676SJeff Bonwick 	/*
211080901aeaSGeorge Wilson 	 * We maintain different write policies for each of the following
211180901aeaSGeorge Wilson 	 * types of data:
211280901aeaSGeorge Wilson 	 *	 1. metadata
211380901aeaSGeorge Wilson 	 *	 2. preallocated blocks (i.e. level-0 blocks of a dump device)
211480901aeaSGeorge Wilson 	 *	 3. all other level 0 blocks
2115b24ab676SJeff Bonwick 	 */
2116b24ab676SJeff Bonwick 	if (ismd) {
2117b8289d24SDaniil Lunev 		if (zfs_mdcomp_disable) {
2118b8289d24SDaniil Lunev 			compress = ZIO_COMPRESS_EMPTY;
2119b8289d24SDaniil Lunev 		} else {
2120db1741f5SJustin T. Gibbs 			/*
2121db1741f5SJustin T. Gibbs 			 * XXX -- we should design a compression algorithm
2122db1741f5SJustin T. Gibbs 			 * that specializes in arrays of bps.
2123db1741f5SJustin T. Gibbs 			 */
2124db1741f5SJustin T. Gibbs 			compress = zio_compress_select(os->os_spa,
2125db1741f5SJustin T. Gibbs 			    ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
2126b8289d24SDaniil Lunev 		}
212780901aeaSGeorge Wilson 
2128b24ab676SJeff Bonwick 		/*
2129b24ab676SJeff Bonwick 		 * Metadata always gets checksummed.  If the data
2130b24ab676SJeff Bonwick 		 * checksum is multi-bit correctable, and it's not a
2131b24ab676SJeff Bonwick 		 * ZBT-style checksum, then it's suitable for metadata
2132b24ab676SJeff Bonwick 		 * as well.  Otherwise, the metadata checksum defaults
2133b24ab676SJeff Bonwick 		 * to fletcher4.
2134b24ab676SJeff Bonwick 		 */
213545818ee1SMatthew Ahrens 		if (!(zio_checksum_table[checksum].ci_flags &
213645818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_METADATA) ||
213745818ee1SMatthew Ahrens 		    (zio_checksum_table[checksum].ci_flags &
213845818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_EMBEDDED))
2139b24ab676SJeff Bonwick 			checksum = ZIO_CHECKSUM_FLETCHER_4;
2140edf345e6SMatthew Ahrens 
2141edf345e6SMatthew Ahrens 		if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
2142edf345e6SMatthew Ahrens 		    (os->os_redundant_metadata ==
2143edf345e6SMatthew Ahrens 		    ZFS_REDUNDANT_METADATA_MOST &&
2144edf345e6SMatthew Ahrens 		    (level >= zfs_redundant_metadata_most_ditto_level ||
2145edf345e6SMatthew Ahrens 		    DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
2146edf345e6SMatthew Ahrens 			copies++;
214780901aeaSGeorge Wilson 	} else if (wp & WP_NOFILL) {
214880901aeaSGeorge Wilson 		ASSERT(level == 0);
214980901aeaSGeorge Wilson 
2150b24ab676SJeff Bonwick 		/*
215180901aeaSGeorge Wilson 		 * If we're writing preallocated blocks, we aren't actually
215280901aeaSGeorge Wilson 		 * writing them so don't set any policy properties.  These
215380901aeaSGeorge Wilson 		 * blocks are currently only used by an external subsystem
215480901aeaSGeorge Wilson 		 * outside of zfs (i.e. dump) and not written by the zio
215580901aeaSGeorge Wilson 		 * pipeline.
2156b24ab676SJeff Bonwick 		 */
215780901aeaSGeorge Wilson 		compress = ZIO_COMPRESS_OFF;
2158810e43b2SBill Pijewski 		checksum = ZIO_CHECKSUM_NOPARITY;
2159b24ab676SJeff Bonwick 	} else {
2160db1741f5SJustin T. Gibbs 		compress = zio_compress_select(os->os_spa, dn->dn_compress,
2161db1741f5SJustin T. Gibbs 		    compress);
2162b24ab676SJeff Bonwick 
216380901aeaSGeorge Wilson 		checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
216480901aeaSGeorge Wilson 		    zio_checksum_select(dn->dn_checksum, checksum) :
216580901aeaSGeorge Wilson 		    dedup_checksum;
2166b24ab676SJeff Bonwick 
216780901aeaSGeorge Wilson 		/*
216880901aeaSGeorge Wilson 		 * Determine dedup setting.  If we are in dmu_sync(),
216980901aeaSGeorge Wilson 		 * we won't actually dedup now because that's all
217080901aeaSGeorge Wilson 		 * done in syncing context; but we do want to use the
217180901aeaSGeorge Wilson 		 * dedup checkum.  If the checksum is not strong
217280901aeaSGeorge Wilson 		 * enough to ensure unique signatures, force
217380901aeaSGeorge Wilson 		 * dedup_verify.
217480901aeaSGeorge Wilson 		 */
217580901aeaSGeorge Wilson 		if (dedup_checksum != ZIO_CHECKSUM_OFF) {
217680901aeaSGeorge Wilson 			dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
217745818ee1SMatthew Ahrens 			if (!(zio_checksum_table[checksum].ci_flags &
217845818ee1SMatthew Ahrens 			    ZCHECKSUM_FLAG_DEDUP))
217980901aeaSGeorge Wilson 				dedup_verify = B_TRUE;
218080901aeaSGeorge Wilson 		}
2181b24ab676SJeff Bonwick 
218280901aeaSGeorge Wilson 		/*
218345818ee1SMatthew Ahrens 		 * Enable nopwrite if we have secure enough checksum
218445818ee1SMatthew Ahrens 		 * algorithm (see comment in zio_nop_write) and
218545818ee1SMatthew Ahrens 		 * compression is enabled.  We don't enable nopwrite if
218645818ee1SMatthew Ahrens 		 * dedup is enabled as the two features are mutually
218745818ee1SMatthew Ahrens 		 * exclusive.
218880901aeaSGeorge Wilson 		 */
218945818ee1SMatthew Ahrens 		nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
219045818ee1SMatthew Ahrens 		    ZCHECKSUM_FLAG_NOPWRITE) &&
219180901aeaSGeorge Wilson 		    compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2192b24ab676SJeff Bonwick 	}
2193b24ab676SJeff Bonwick 
2194b24ab676SJeff Bonwick 	zp->zp_checksum = checksum;
2195adaec86aSMatthew Ahrens 	zp->zp_compress = compress;
21965602294fSDan Kimmel 	ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
21975602294fSDan Kimmel 
21980a586ceaSMark Shellenbaum 	zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2199b24ab676SJeff Bonwick 	zp->zp_level = level;
2200edf345e6SMatthew Ahrens 	zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2201b24ab676SJeff Bonwick 	zp->zp_dedup = dedup;
2202b24ab676SJeff Bonwick 	zp->zp_dedup_verify = dedup && dedup_verify;
220380901aeaSGeorge Wilson 	zp->zp_nopwrite = nopwrite;
2204b24ab676SJeff Bonwick }
2205b24ab676SJeff Bonwick 
2206fa9e4066Sahrens int
2207fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2208fa9e4066Sahrens {
2209fa9e4066Sahrens 	dnode_t *dn;
22102bcf0248SMax Grossman 	int err;
2211fa9e4066Sahrens 
2212fa9e4066Sahrens 	/*
2213fa9e4066Sahrens 	 * Sync any current changes before
2214fa9e4066Sahrens 	 * we go trundling through the block pointers.
2215fa9e4066Sahrens 	 */
22162bcf0248SMax Grossman 	err = dmu_object_wait_synced(os, object);
22172bcf0248SMax Grossman 	if (err) {
22182bcf0248SMax Grossman 		return (err);
2219fa9e4066Sahrens 	}
22202bcf0248SMax Grossman 
22212bcf0248SMax Grossman 	err = dnode_hold(os, object, FTAG, &dn);
22222bcf0248SMax Grossman 	if (err) {
22232bcf0248SMax Grossman 		return (err);
2224fa9e4066Sahrens 	}
2225fa9e4066Sahrens 
2226cdb0ab79Smaybee 	err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2227fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2228fa9e4066Sahrens 
2229fa9e4066Sahrens 	return (err);
2230fa9e4066Sahrens }
2231fa9e4066Sahrens 
22322bcf0248SMax Grossman /*
22332bcf0248SMax Grossman  * Given the ZFS object, if it contains any dirty nodes
22342bcf0248SMax Grossman  * this function flushes all dirty blocks to disk. This
22352bcf0248SMax Grossman  * ensures the DMU object info is updated. A more efficient
22362bcf0248SMax Grossman  * future version might just find the TXG with the maximum
22372bcf0248SMax Grossman  * ID and wait for that to be synced.
22382bcf0248SMax Grossman  */
22392bcf0248SMax Grossman int
22409a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object)
22419a686fbcSPaul Dagnelie {
22422bcf0248SMax Grossman 	dnode_t *dn;
22432bcf0248SMax Grossman 	int error, i;
22442bcf0248SMax Grossman 
22452bcf0248SMax Grossman 	error = dnode_hold(os, object, FTAG, &dn);
22462bcf0248SMax Grossman 	if (error) {
22472bcf0248SMax Grossman 		return (error);
22482bcf0248SMax Grossman 	}
22492bcf0248SMax Grossman 
22502bcf0248SMax Grossman 	for (i = 0; i < TXG_SIZE; i++) {
22512bcf0248SMax Grossman 		if (list_link_active(&dn->dn_dirty_link[i])) {
22522bcf0248SMax Grossman 			break;
22532bcf0248SMax Grossman 		}
22542bcf0248SMax Grossman 	}
22552bcf0248SMax Grossman 	dnode_rele(dn, FTAG);
22562bcf0248SMax Grossman 	if (i != TXG_SIZE) {
22572bcf0248SMax Grossman 		txg_wait_synced(dmu_objset_pool(os), 0);
22582bcf0248SMax Grossman 	}
22592bcf0248SMax Grossman 
22602bcf0248SMax Grossman 	return (0);
22612bcf0248SMax Grossman }
22622bcf0248SMax Grossman 
2263fa9e4066Sahrens void
2264fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2265fa9e4066Sahrens {
2266b24ab676SJeff Bonwick 	dnode_phys_t *dnp;
2267b24ab676SJeff Bonwick 
2268fa9e4066Sahrens 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2269fa9e4066Sahrens 	mutex_enter(&dn->dn_mtx);
2270fa9e4066Sahrens 
2271b24ab676SJeff Bonwick 	dnp = dn->dn_phys;
2272b24ab676SJeff Bonwick 
2273fa9e4066Sahrens 	doi->doi_data_block_size = dn->dn_datablksz;
2274fa9e4066Sahrens 	doi->doi_metadata_block_size = dn->dn_indblkshift ?
2275fa9e4066Sahrens 	    1ULL << dn->dn_indblkshift : 0;
2276b24ab676SJeff Bonwick 	doi->doi_type = dn->dn_type;
2277b24ab676SJeff Bonwick 	doi->doi_bonus_type = dn->dn_bonustype;
2278b24ab676SJeff Bonwick 	doi->doi_bonus_size = dn->dn_bonuslen;
227954811da5SToomas Soome 	doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2280fa9e4066Sahrens 	doi->doi_indirection = dn->dn_nlevels;
2281fa9e4066Sahrens 	doi->doi_checksum = dn->dn_checksum;
2282fa9e4066Sahrens 	doi->doi_compress = dn->dn_compress;
2283e77d42eaSMatthew Ahrens 	doi->doi_nblkptr = dn->dn_nblkptr;
2284b24ab676SJeff Bonwick 	doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2285d0475637SMatthew Ahrens 	doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2286b24ab676SJeff Bonwick 	doi->doi_fill_count = 0;
2287b24ab676SJeff Bonwick 	for (int i = 0; i < dnp->dn_nblkptr; i++)
22885d7b4d43SMatthew Ahrens 		doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2289fa9e4066Sahrens 
2290fa9e4066Sahrens 	mutex_exit(&dn->dn_mtx);
2291fa9e4066Sahrens 	rw_exit(&dn->dn_struct_rwlock);
2292fa9e4066Sahrens }
2293fa9e4066Sahrens 
2294fa9e4066Sahrens /*
2295fa9e4066Sahrens  * Get information on a DMU object.
2296fa9e4066Sahrens  * If doi is NULL, just indicates whether the object exists.
2297fa9e4066Sahrens  */
2298fa9e4066Sahrens int
2299fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2300fa9e4066Sahrens {
2301ea8dc4b6Seschrock 	dnode_t *dn;
2302503ad85cSMatthew Ahrens 	int err = dnode_hold(os, object, FTAG, &dn);
2303fa9e4066Sahrens 
2304ea8dc4b6Seschrock 	if (err)
2305ea8dc4b6Seschrock 		return (err);
2306fa9e4066Sahrens 
2307fa9e4066Sahrens 	if (doi != NULL)
2308fa9e4066Sahrens 		dmu_object_info_from_dnode(dn, doi);
2309fa9e4066Sahrens 
2310fa9e4066Sahrens 	dnode_rele(dn, FTAG);
2311fa9e4066Sahrens 	return (0);
2312fa9e4066Sahrens }
2313fa9e4066Sahrens 
2314fa9e4066Sahrens /*
2315fa9e4066Sahrens  * As above, but faster; can be used when you have a held dbuf in hand.
2316fa9e4066Sahrens  */
2317fa9e4066Sahrens void
2318744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
2319fa9e4066Sahrens {
2320744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2321744947dcSTom Erickson 
2322744947dcSTom Erickson 	DB_DNODE_ENTER(db);
2323744947dcSTom Erickson 	dmu_object_info_from_dnode(DB_DNODE(db), doi);
2324744947dcSTom Erickson 	DB_DNODE_EXIT(db);
2325fa9e4066Sahrens }
2326fa9e4066Sahrens 
2327fa9e4066Sahrens /*
2328fa9e4066Sahrens  * Faster still when you only care about the size.
2329fa9e4066Sahrens  * This is specifically optimized for zfs_getattr().
2330fa9e4066Sahrens  */
2331fa9e4066Sahrens void
2332744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2333744947dcSTom Erickson     u_longlong_t *nblk512)
2334fa9e4066Sahrens {
2335744947dcSTom Erickson 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2336744947dcSTom Erickson 	dnode_t *dn;
2337744947dcSTom Erickson 
2338744947dcSTom Erickson 	DB_DNODE_ENTER(db);
2339744947dcSTom Erickson 	dn = DB_DNODE(db);
2340fa9e4066Sahrens 
2341fa9e4066Sahrens 	*blksize = dn->dn_datablksz;
234254811da5SToomas Soome 	/* add in number of slots used for the dnode itself */
234399653d4eSeschrock 	*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
234454811da5SToomas Soome 	    SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
234554811da5SToomas Soome 	DB_DNODE_EXIT(db);
234654811da5SToomas Soome }
234754811da5SToomas Soome 
234854811da5SToomas Soome void
234954811da5SToomas Soome dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
235054811da5SToomas Soome {
235154811da5SToomas Soome 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
235254811da5SToomas Soome 	dnode_t *dn;
235354811da5SToomas Soome 
235454811da5SToomas Soome 	DB_DNODE_ENTER(db);
235554811da5SToomas Soome 	dn = DB_DNODE(db);
235654811da5SToomas Soome 	*dnsize = dn->dn_num_slots << DNODE_SHIFT;
2357744947dcSTom Erickson 	DB_DNODE_EXIT(db);
2358fa9e4066Sahrens }
2359fa9e4066Sahrens 
2360fa9e4066Sahrens void
2361fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size)
2362fa9e4066Sahrens {
2363fa9e4066Sahrens 	uint64_t *buf = vbuf;
2364fa9e4066Sahrens 	size_t count = size >> 3;
2365fa9e4066Sahrens 	int i;
2366fa9e4066Sahrens 
2367fa9e4066Sahrens 	ASSERT((size & 7) == 0);
2368fa9e4066Sahrens 
2369fa9e4066Sahrens 	for (i = 0; i < count; i++)
2370fa9e4066Sahrens 		buf[i] = BSWAP_64(buf[i]);
2371fa9e4066Sahrens }
2372fa9e4066Sahrens 
2373fa9e4066Sahrens void
2374fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size)
2375fa9e4066Sahrens {
2376fa9e4066Sahrens 	uint32_t *buf = vbuf;
2377fa9e4066Sahrens 	size_t count = size >> 2;
2378fa9e4066Sahrens 	int i;
2379fa9e4066Sahrens 
2380fa9e4066Sahrens 	ASSERT((size & 3) == 0);
2381fa9e4066Sahrens 
2382fa9e4066Sahrens 	for (i = 0; i < count; i++)
2383fa9e4066Sahrens 		buf[i] = BSWAP_32(buf[i]);
2384fa9e4066Sahrens }
2385fa9e4066Sahrens 
2386fa9e4066Sahrens void
2387fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size)
2388fa9e4066Sahrens {
2389fa9e4066Sahrens 	uint16_t *buf = vbuf;
2390fa9e4066Sahrens 	size_t count = size >> 1;
2391fa9e4066Sahrens 	int i;
2392fa9e4066Sahrens 
2393fa9e4066Sahrens 	ASSERT((size & 1) == 0);
2394fa9e4066Sahrens 
2395fa9e4066Sahrens 	for (i = 0; i < count; i++)
2396fa9e4066Sahrens 		buf[i] = BSWAP_16(buf[i]);
2397fa9e4066Sahrens }
2398fa9e4066Sahrens 
2399fa9e4066Sahrens /* ARGSUSED */
2400fa9e4066Sahrens void
2401fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size)
2402fa9e4066Sahrens {
2403fa9e4066Sahrens }
2404fa9e4066Sahrens 
2405fa9e4066Sahrens void
2406fa9e4066Sahrens dmu_init(void)
2407fa9e4066Sahrens {
2408770499e1SDan Kimmel 	abd_init();
24093f9d6ad7SLin Ling 	zfs_dbgmsg_init();
2410744947dcSTom Erickson 	sa_cache_init();
2411744947dcSTom Erickson 	xuio_stat_init();
2412744947dcSTom Erickson 	dmu_objset_init();
2413fa9e4066Sahrens 	dnode_init();
24147cbf8b43SRich Morris 	zfetch_init();
2415fa94a07fSbrendan 	l2arc_init();
2416ce636f8bSMatthew Ahrens 	arc_init();
2417dcbf3bd6SGeorge Wilson 	dbuf_init();
2418fa9e4066Sahrens }
2419fa9e4066Sahrens 
2420fa9e4066Sahrens void
2421fa9e4066Sahrens dmu_fini(void)
2422fa9e4066Sahrens {
24233e30c24aSWill Andrews 	arc_fini(); /* arc depends on l2arc, so arc must go first */
2424ce636f8bSMatthew Ahrens 	l2arc_fini();
24257cbf8b43SRich Morris 	zfetch_fini();
2426fa9e4066Sahrens 	dbuf_fini();
2427744947dcSTom Erickson 	dnode_fini();
2428744947dcSTom Erickson 	dmu_objset_fini();
2429c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 	xuio_stat_fini();
24300a586ceaSMark Shellenbaum 	sa_cache_fini();
24313f9d6ad7SLin Ling 	zfs_dbgmsg_fini();
2432770499e1SDan Kimmel 	abd_fini();
2433fa9e4066Sahrens }
2434