1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2294d1a210STim Haley * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23857c96d2SIgor Kozhukhov */ 24857c96d2SIgor Kozhukhov /* 25857c96d2SIgor Kozhukhov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 266d658717SJohn Levon * Copyright 2019 Joyent, Inc. 27857c96d2SIgor Kozhukhov * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 28adaec86aSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 29857c96d2SIgor Kozhukhov * Copyright (c) 2018 DilOS 30fa9e4066Sahrens */ 31aad02571SSaso Kiselkov 32fa9e4066Sahrens #include <sys/dmu.h> 33fa9e4066Sahrens #include <sys/dmu_impl.h> 34fa9e4066Sahrens #include <sys/dmu_tx.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dnode.h> 37fa9e4066Sahrens #include <sys/zfs_context.h> 38fa9e4066Sahrens #include <sys/dmu_objset.h> 39fa9e4066Sahrens #include <sys/dmu_traverse.h> 40fa9e4066Sahrens #include <sys/dsl_dataset.h> 41fa9e4066Sahrens #include <sys/dsl_dir.h> 42fa9e4066Sahrens #include <sys/dsl_pool.h> 431d452cf5Sahrens #include <sys/dsl_synctask.h> 44a2eea2e1Sahrens #include <sys/dsl_prop.h> 45fa9e4066Sahrens #include <sys/dmu_zfetch.h> 46fa9e4066Sahrens #include <sys/zfs_ioctl.h> 47fa9e4066Sahrens #include <sys/zap.h> 48ea8dc4b6Seschrock #include <sys/zio_checksum.h> 4980901aeaSGeorge Wilson #include <sys/zio_compress.h> 500a586ceaSMark Shellenbaum #include <sys/sa.h> 51b8289d24SDaniil Lunev #include <sys/zfeature.h> 52770499e1SDan Kimmel #include <sys/abd.h> 5344eda4d7Smaybee #ifdef _KERNEL 5444eda4d7Smaybee #include <sys/vmsystm.h> 550fab61baSJonathan W Adams #include <sys/zfs_znode.h> 5644eda4d7Smaybee #endif 57fa9e4066Sahrens 58857c96d2SIgor Kozhukhov static xuio_stats_t xuio_stats = { 59857c96d2SIgor Kozhukhov { "onloan_read_buf", KSTAT_DATA_UINT64 }, 60857c96d2SIgor Kozhukhov { "onloan_write_buf", KSTAT_DATA_UINT64 }, 61857c96d2SIgor Kozhukhov { "read_buf_copied", KSTAT_DATA_UINT64 }, 62857c96d2SIgor Kozhukhov { "read_buf_nocopy", KSTAT_DATA_UINT64 }, 63857c96d2SIgor Kozhukhov { "write_buf_copied", KSTAT_DATA_UINT64 }, 64857c96d2SIgor Kozhukhov { "write_buf_nocopy", KSTAT_DATA_UINT64 } 65857c96d2SIgor Kozhukhov }; 66857c96d2SIgor Kozhukhov 67857c96d2SIgor Kozhukhov #define XUIOSTAT_INCR(stat, val) \ 68857c96d2SIgor Kozhukhov atomic_add_64(&xuio_stats.stat.value.ui64, (val)) 69857c96d2SIgor Kozhukhov #define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1) 70857c96d2SIgor Kozhukhov 7180901aeaSGeorge Wilson /* 7280901aeaSGeorge Wilson * Enable/disable nopwrite feature. 7380901aeaSGeorge Wilson */ 7480901aeaSGeorge Wilson int zfs_nopwrite_enabled = 1; 7580901aeaSGeorge Wilson 76ff5177eeSAlek Pinchuk /* 77ff5177eeSAlek Pinchuk * Tunable to control percentage of dirtied blocks from frees in one TXG. 78ff5177eeSAlek Pinchuk * After this threshold is crossed, additional dirty blocks from frees 79ff5177eeSAlek Pinchuk * wait until the next TXG. 80ff5177eeSAlek Pinchuk * A value of zero will disable this throttle. 81ff5177eeSAlek Pinchuk */ 82ff5177eeSAlek Pinchuk uint32_t zfs_per_txg_dirty_frees_percent = 30; 83ff5177eeSAlek Pinchuk 845cabbc6bSPrashanth Sreenivasa /* 855cabbc6bSPrashanth Sreenivasa * This can be used for testing, to ensure that certain actions happen 865cabbc6bSPrashanth Sreenivasa * while in the middle of a remap (which might otherwise complete too 875cabbc6bSPrashanth Sreenivasa * quickly). 885cabbc6bSPrashanth Sreenivasa */ 895cabbc6bSPrashanth Sreenivasa int zfs_object_remap_one_indirect_delay_ticks = 0; 905cabbc6bSPrashanth Sreenivasa 9152abb70eSMatthew Ahrens /* 9252abb70eSMatthew Ahrens * Limit the amount we can prefetch with one call to this amount. This 9352abb70eSMatthew Ahrens * helps to limit the amount of memory that can be used by prefetching. 9452abb70eSMatthew Ahrens * Larger objects should be prefetched a bit at a time. 9552abb70eSMatthew Ahrens */ 9652abb70eSMatthew Ahrens uint64_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; 9752abb70eSMatthew Ahrens 98fa9e4066Sahrens const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 99eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, 100eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" }, 101eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" }, 102eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" }, 103eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" }, 104eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" }, 105eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" }, 106eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" }, 107eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" }, 108eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" }, 109eb633035STom Caputi { DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" }, 110eb633035STom Caputi { DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" }, 111eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" }, 112eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map" }, 113eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" }, 114eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" }, 115eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" }, 116eb633035STom Caputi { DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" }, 117eb633035STom Caputi { DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" }, 118eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" }, 119eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" }, 120eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" }, 121eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" }, 122eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" }, 123eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" }, 124eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" }, 125eb633035STom Caputi { DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" }, 126eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" }, 127eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" }, 128eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" }, 129eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" }, 130eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" }, 131eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" }, 132eb633035STom Caputi { DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" }, 133eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" }, 134eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" }, 135eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" }, 136eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones" }, 137eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" }, 138f67950b2SNasf-Fan { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used"}, 139f67950b2SNasf-Fan { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/proj quota"}, 140eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags" }, 141eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" }, 142eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" }, 143eb633035STom Caputi { DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" }, 144eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" }, 145eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" }, 146eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" }, 147eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" }, 148eb633035STom Caputi { DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" }, 149eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" }, 150eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" }, 151eb633035STom Caputi { DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" }, 152eb633035STom Caputi { DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" } 153ad135b5dSChristopher Siden }; 154ad135b5dSChristopher Siden 155ad135b5dSChristopher Siden const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 156ad135b5dSChristopher Siden { byteswap_uint8_array, "uint8" }, 157ad135b5dSChristopher Siden { byteswap_uint16_array, "uint16" }, 158ad135b5dSChristopher Siden { byteswap_uint32_array, "uint32" }, 159ad135b5dSChristopher Siden { byteswap_uint64_array, "uint64" }, 160ad135b5dSChristopher Siden { zap_byteswap, "zap" }, 161ad135b5dSChristopher Siden { dnode_buf_byteswap, "dnode" }, 162ad135b5dSChristopher Siden { dmu_objset_byteswap, "objset" }, 163ad135b5dSChristopher Siden { zfs_znode_byteswap, "znode" }, 164ad135b5dSChristopher Siden { zfs_oldacl_byteswap, "oldacl" }, 165ad135b5dSChristopher Siden { zfs_acl_byteswap, "acl" } 1663f9d6ad7SLin Ling }; 167fa9e4066Sahrens 16879d72832SMatthew Ahrens int 16979d72832SMatthew Ahrens dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 17079d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp) 17179d72832SMatthew Ahrens { 17279d72832SMatthew Ahrens uint64_t blkid; 17379d72832SMatthew Ahrens dmu_buf_impl_t *db; 17479d72832SMatthew Ahrens 17579d72832SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 176*9704bf7fSPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 17779d72832SMatthew Ahrens db = dbuf_hold(dn, blkid, tag); 17879d72832SMatthew Ahrens rw_exit(&dn->dn_struct_rwlock); 17979d72832SMatthew Ahrens 18079d72832SMatthew Ahrens if (db == NULL) { 18179d72832SMatthew Ahrens *dbp = NULL; 18279d72832SMatthew Ahrens return (SET_ERROR(EIO)); 18379d72832SMatthew Ahrens } 18479d72832SMatthew Ahrens 18579d72832SMatthew Ahrens *dbp = &db->db; 18679d72832SMatthew Ahrens return (0); 18779d72832SMatthew Ahrens } 188fa9e4066Sahrens int 1895d7b4d43SMatthew Ahrens dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 1905d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp) 191fa9e4066Sahrens { 192fa9e4066Sahrens dnode_t *dn; 193fa9e4066Sahrens uint64_t blkid; 194fa9e4066Sahrens dmu_buf_impl_t *db; 195ea8dc4b6Seschrock int err; 196fa9e4066Sahrens 197503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 198ea8dc4b6Seschrock if (err) 199ea8dc4b6Seschrock return (err); 200fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 201*9704bf7fSPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 202ea8dc4b6Seschrock db = dbuf_hold(dn, blkid, tag); 203fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2045d7b4d43SMatthew Ahrens dnode_rele(dn, FTAG); 2055d7b4d43SMatthew Ahrens 206ea8dc4b6Seschrock if (db == NULL) { 2075d7b4d43SMatthew Ahrens *dbp = NULL; 2085d7b4d43SMatthew Ahrens return (SET_ERROR(EIO)); 2095d7b4d43SMatthew Ahrens } 2105d7b4d43SMatthew Ahrens 2115d7b4d43SMatthew Ahrens *dbp = &db->db; 2125d7b4d43SMatthew Ahrens return (err); 2135d7b4d43SMatthew Ahrens } 2145d7b4d43SMatthew Ahrens 21579d72832SMatthew Ahrens int 21679d72832SMatthew Ahrens dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 21779d72832SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 21879d72832SMatthew Ahrens { 21979d72832SMatthew Ahrens int err; 22079d72832SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 22179d72832SMatthew Ahrens 22279d72832SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 22379d72832SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 224eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 225eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 22679d72832SMatthew Ahrens 22779d72832SMatthew Ahrens err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 22879d72832SMatthew Ahrens if (err == 0) { 22979d72832SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 23079d72832SMatthew Ahrens err = dbuf_read(db, NULL, db_flags); 23179d72832SMatthew Ahrens if (err != 0) { 23279d72832SMatthew Ahrens dbuf_rele(db, tag); 23379d72832SMatthew Ahrens *dbp = NULL; 23479d72832SMatthew Ahrens } 23579d72832SMatthew Ahrens } 23679d72832SMatthew Ahrens 23779d72832SMatthew Ahrens return (err); 23879d72832SMatthew Ahrens } 23979d72832SMatthew Ahrens 2405d7b4d43SMatthew Ahrens int 2415d7b4d43SMatthew Ahrens dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 2425d7b4d43SMatthew Ahrens void *tag, dmu_buf_t **dbp, int flags) 2435d7b4d43SMatthew Ahrens { 2445d7b4d43SMatthew Ahrens int err; 2455d7b4d43SMatthew Ahrens int db_flags = DB_RF_CANFAIL; 2465d7b4d43SMatthew Ahrens 2475d7b4d43SMatthew Ahrens if (flags & DMU_READ_NO_PREFETCH) 2485d7b4d43SMatthew Ahrens db_flags |= DB_RF_NOPREFETCH; 249eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 250eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 2515d7b4d43SMatthew Ahrens 2525d7b4d43SMatthew Ahrens err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 2535d7b4d43SMatthew Ahrens if (err == 0) { 2545d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 25547cb52daSJeff Bonwick err = dbuf_read(db, NULL, db_flags); 2565d7b4d43SMatthew Ahrens if (err != 0) { 257ea8dc4b6Seschrock dbuf_rele(db, tag); 2585d7b4d43SMatthew Ahrens *dbp = NULL; 259ea8dc4b6Seschrock } 260ea8dc4b6Seschrock } 261fa9e4066Sahrens 262ea8dc4b6Seschrock return (err); 263fa9e4066Sahrens } 264fa9e4066Sahrens 265fa9e4066Sahrens int 266fa9e4066Sahrens dmu_bonus_max(void) 267fa9e4066Sahrens { 26854811da5SToomas Soome return (DN_OLD_MAX_BONUSLEN); 269fa9e4066Sahrens } 270fa9e4066Sahrens 2711934e92fSmaybee int 272744947dcSTom Erickson dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 2731934e92fSmaybee { 274744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 275744947dcSTom Erickson dnode_t *dn; 276744947dcSTom Erickson int error; 2771934e92fSmaybee 278744947dcSTom Erickson DB_DNODE_ENTER(db); 279744947dcSTom Erickson dn = DB_DNODE(db); 280744947dcSTom Erickson 281744947dcSTom Erickson if (dn->dn_bonus != db) { 282be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 283744947dcSTom Erickson } else if (newsize < 0 || newsize > db_fake->db_size) { 284be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 285744947dcSTom Erickson } else { 286744947dcSTom Erickson dnode_setbonuslen(dn, newsize, tx); 287744947dcSTom Erickson error = 0; 288744947dcSTom Erickson } 289744947dcSTom Erickson 290744947dcSTom Erickson DB_DNODE_EXIT(db); 291744947dcSTom Erickson return (error); 2921934e92fSmaybee } 2931934e92fSmaybee 2940a586ceaSMark Shellenbaum int 295744947dcSTom Erickson dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 2960a586ceaSMark Shellenbaum { 297744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 298744947dcSTom Erickson dnode_t *dn; 299744947dcSTom Erickson int error; 3000a586ceaSMark Shellenbaum 301744947dcSTom Erickson DB_DNODE_ENTER(db); 302744947dcSTom Erickson dn = DB_DNODE(db); 303744947dcSTom Erickson 304ad135b5dSChristopher Siden if (!DMU_OT_IS_VALID(type)) { 305be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 306744947dcSTom Erickson } else if (dn->dn_bonus != db) { 307be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 308744947dcSTom Erickson } else { 309744947dcSTom Erickson dnode_setbonus_type(dn, type, tx); 310744947dcSTom Erickson error = 0; 311744947dcSTom Erickson } 3120a586ceaSMark Shellenbaum 313744947dcSTom Erickson DB_DNODE_EXIT(db); 314744947dcSTom Erickson return (error); 315744947dcSTom Erickson } 3160a586ceaSMark Shellenbaum 317744947dcSTom Erickson dmu_object_type_t 318744947dcSTom Erickson dmu_get_bonustype(dmu_buf_t *db_fake) 319744947dcSTom Erickson { 320744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 321744947dcSTom Erickson dnode_t *dn; 322744947dcSTom Erickson dmu_object_type_t type; 323744947dcSTom Erickson 324744947dcSTom Erickson DB_DNODE_ENTER(db); 325744947dcSTom Erickson dn = DB_DNODE(db); 326744947dcSTom Erickson type = dn->dn_bonustype; 327744947dcSTom Erickson DB_DNODE_EXIT(db); 328744947dcSTom Erickson 329744947dcSTom Erickson return (type); 3300a586ceaSMark Shellenbaum } 3310a586ceaSMark Shellenbaum 3320a586ceaSMark Shellenbaum int 3330a586ceaSMark Shellenbaum dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 3340a586ceaSMark Shellenbaum { 3350a586ceaSMark Shellenbaum dnode_t *dn; 3360a586ceaSMark Shellenbaum int error; 3370a586ceaSMark Shellenbaum 3380a586ceaSMark Shellenbaum error = dnode_hold(os, object, FTAG, &dn); 3390a586ceaSMark Shellenbaum dbuf_rm_spill(dn, tx); 34006e0070dSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 34106e0070dSMark Shellenbaum dnode_rm_spill(dn, tx); 34206e0070dSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 3430a586ceaSMark Shellenbaum dnode_rele(dn, FTAG); 3440a586ceaSMark Shellenbaum return (error); 3450a586ceaSMark Shellenbaum } 3460a586ceaSMark Shellenbaum 347eb633035STom Caputi /* 348eb633035STom Caputi * Lookup and hold the bonus buffer for the provided dnode. If the dnode 349eb633035STom Caputi * has not yet been allocated a new bonus dbuf a will be allocated. 350eb633035STom Caputi * Returns ENOENT, EIO, or 0. 351eb633035STom Caputi */ 352eb633035STom Caputi int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp, 353eb633035STom Caputi uint32_t flags) 354eb633035STom Caputi { 355eb633035STom Caputi dmu_buf_impl_t *db; 356eb633035STom Caputi int error; 357eb633035STom Caputi uint32_t db_flags = DB_RF_MUST_SUCCEED; 358eb633035STom Caputi 359eb633035STom Caputi if (flags & DMU_READ_NO_PREFETCH) 360eb633035STom Caputi db_flags |= DB_RF_NOPREFETCH; 361eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 3626d658717SJohn Levon db_flags |= DB_RF_NO_DECRYPT; 363eb633035STom Caputi 364eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_READER); 365eb633035STom Caputi if (dn->dn_bonus == NULL) { 366eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 367eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 368eb633035STom Caputi if (dn->dn_bonus == NULL) 369eb633035STom Caputi dbuf_create_bonus(dn); 370eb633035STom Caputi } 371eb633035STom Caputi db = dn->dn_bonus; 372eb633035STom Caputi 373eb633035STom Caputi /* as long as the bonus buf is held, the dnode will be held */ 374eb633035STom Caputi if (zfs_refcount_add(&db->db_holds, tag) == 1) { 375eb633035STom Caputi VERIFY(dnode_add_ref(dn, db)); 376eb633035STom Caputi atomic_inc_32(&dn->dn_dbufs_count); 377eb633035STom Caputi } 378eb633035STom Caputi 379eb633035STom Caputi /* 380eb633035STom Caputi * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 381eb633035STom Caputi * hold and incrementing the dbuf count to ensure that dnode_move() sees 382eb633035STom Caputi * a dnode hold for every dbuf. 383eb633035STom Caputi */ 384eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 385eb633035STom Caputi 386eb633035STom Caputi error = dbuf_read(db, NULL, db_flags); 387eb633035STom Caputi if (error) { 388eb633035STom Caputi dnode_evict_bonus(dn); 389eb633035STom Caputi dbuf_rele(db, tag); 390eb633035STom Caputi *dbp = NULL; 391eb633035STom Caputi return (error); 392eb633035STom Caputi } 393eb633035STom Caputi 394eb633035STom Caputi *dbp = &db->db; 395eb633035STom Caputi return (0); 396eb633035STom Caputi } 397eb633035STom Caputi 398fa9e4066Sahrens /* 399ea8dc4b6Seschrock * returns ENOENT, EIO, or 0. 400fa9e4066Sahrens */ 401ea8dc4b6Seschrock int 402eb633035STom Caputi dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags, 403eb633035STom Caputi dmu_buf_t **dbp) 404fa9e4066Sahrens { 405ea8dc4b6Seschrock dnode_t *dn; 406fa9e4066Sahrens dmu_buf_impl_t *db; 4071934e92fSmaybee int error; 408eb633035STom Caputi uint32_t db_flags = DB_RF_MUST_SUCCEED; 409eb633035STom Caputi 410eb633035STom Caputi if (flags & DMU_READ_NO_PREFETCH) 411eb633035STom Caputi db_flags |= DB_RF_NOPREFETCH; 412eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 413eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 414fa9e4066Sahrens 415503ad85cSMatthew Ahrens error = dnode_hold(os, object, FTAG, &dn); 4161934e92fSmaybee if (error) 4171934e92fSmaybee return (error); 418fa9e4066Sahrens 419ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_READER); 420ea8dc4b6Seschrock if (dn->dn_bonus == NULL) { 421fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 422ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 423ea8dc4b6Seschrock if (dn->dn_bonus == NULL) 4241934e92fSmaybee dbuf_create_bonus(dn); 425fa9e4066Sahrens } 426ea8dc4b6Seschrock db = dn->dn_bonus; 4271934e92fSmaybee 4281934e92fSmaybee /* as long as the bonus buf is held, the dnode will be held */ 429e914ace2STim Schumacher if (zfs_refcount_add(&db->db_holds, tag) == 1) { 4301934e92fSmaybee VERIFY(dnode_add_ref(dn, db)); 431640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 432744947dcSTom Erickson } 433744947dcSTom Erickson 434744947dcSTom Erickson /* 435744947dcSTom Erickson * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 436744947dcSTom Erickson * hold and incrementing the dbuf count to ensure that dnode_move() sees 437744947dcSTom Erickson * a dnode hold for every dbuf. 438744947dcSTom Erickson */ 439744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 4401934e92fSmaybee 441fa9e4066Sahrens dnode_rele(dn, FTAG); 442ea8dc4b6Seschrock 443eb633035STom Caputi error = dbuf_read(db, NULL, db_flags); 444eb633035STom Caputi if (error) { 445eb633035STom Caputi dnode_evict_bonus(dn); 446eb633035STom Caputi dbuf_rele(db, tag); 447eb633035STom Caputi *dbp = NULL; 448eb633035STom Caputi return (error); 449eb633035STom Caputi } 450ea8dc4b6Seschrock 451ea8dc4b6Seschrock *dbp = &db->db; 452ea8dc4b6Seschrock return (0); 453fa9e4066Sahrens } 454fa9e4066Sahrens 455eb633035STom Caputi int 456eb633035STom Caputi dmu_bonus_hold(objset_t *os, uint64_t obj, void *tag, dmu_buf_t **dbp) 457eb633035STom Caputi { 458eb633035STom Caputi return (dmu_bonus_hold_impl(os, obj, tag, DMU_READ_NO_PREFETCH, dbp)); 459eb633035STom Caputi } 460eb633035STom Caputi 4610a586ceaSMark Shellenbaum /* 4620a586ceaSMark Shellenbaum * returns ENOENT, EIO, or 0. 4630a586ceaSMark Shellenbaum * 4640a586ceaSMark Shellenbaum * This interface will allocate a blank spill dbuf when a spill blk 4650a586ceaSMark Shellenbaum * doesn't already exist on the dnode. 4660a586ceaSMark Shellenbaum * 4670a586ceaSMark Shellenbaum * if you only want to find an already existing spill db, then 4680a586ceaSMark Shellenbaum * dmu_spill_hold_existing() should be used. 4690a586ceaSMark Shellenbaum */ 4700a586ceaSMark Shellenbaum int 4710a586ceaSMark Shellenbaum dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) 4720a586ceaSMark Shellenbaum { 4730a586ceaSMark Shellenbaum dmu_buf_impl_t *db = NULL; 4740a586ceaSMark Shellenbaum int err; 4750a586ceaSMark Shellenbaum 4760a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 4770a586ceaSMark Shellenbaum rw_enter(&dn->dn_struct_rwlock, RW_READER); 4780a586ceaSMark Shellenbaum 4790a586ceaSMark Shellenbaum db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 4800a586ceaSMark Shellenbaum 4810a586ceaSMark Shellenbaum if ((flags & DB_RF_HAVESTRUCT) == 0) 4820a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 4830a586ceaSMark Shellenbaum 4840a586ceaSMark Shellenbaum ASSERT(db != NULL); 4851d8ccc7bSMark Shellenbaum err = dbuf_read(db, NULL, flags); 4861d8ccc7bSMark Shellenbaum if (err == 0) 4871d8ccc7bSMark Shellenbaum *dbp = &db->db; 4881d8ccc7bSMark Shellenbaum else 4891d8ccc7bSMark Shellenbaum dbuf_rele(db, tag); 4900a586ceaSMark Shellenbaum return (err); 4910a586ceaSMark Shellenbaum } 4920a586ceaSMark Shellenbaum 4930a586ceaSMark Shellenbaum int 4940a586ceaSMark Shellenbaum dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp) 4950a586ceaSMark Shellenbaum { 496744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 497744947dcSTom Erickson dnode_t *dn; 4980a586ceaSMark Shellenbaum int err; 4990a586ceaSMark Shellenbaum 500744947dcSTom Erickson DB_DNODE_ENTER(db); 501744947dcSTom Erickson dn = DB_DNODE(db); 502744947dcSTom Erickson 503744947dcSTom Erickson if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 504be6fd75aSMatthew Ahrens err = SET_ERROR(EINVAL); 505744947dcSTom Erickson } else { 506744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_READER); 507744947dcSTom Erickson 508744947dcSTom Erickson if (!dn->dn_have_spill) { 509be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 510744947dcSTom Erickson } else { 511744947dcSTom Erickson err = dmu_spill_hold_by_dnode(dn, 512744947dcSTom Erickson DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 513744947dcSTom Erickson } 5140a586ceaSMark Shellenbaum 5150a586ceaSMark Shellenbaum rw_exit(&dn->dn_struct_rwlock); 5160a586ceaSMark Shellenbaum } 517744947dcSTom Erickson 518744947dcSTom Erickson DB_DNODE_EXIT(db); 5190a586ceaSMark Shellenbaum return (err); 5200a586ceaSMark Shellenbaum } 5210a586ceaSMark Shellenbaum 5220a586ceaSMark Shellenbaum int 523eb633035STom Caputi dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag, 524eb633035STom Caputi dmu_buf_t **dbp) 5250a586ceaSMark Shellenbaum { 526744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 527744947dcSTom Erickson dnode_t *dn; 528744947dcSTom Erickson int err; 529eb633035STom Caputi uint32_t db_flags = DB_RF_CANFAIL; 530eb633035STom Caputi 531eb633035STom Caputi if (flags & DMU_READ_NO_DECRYPT) 532eb633035STom Caputi db_flags |= DB_RF_NO_DECRYPT; 533744947dcSTom Erickson 534744947dcSTom Erickson DB_DNODE_ENTER(db); 535744947dcSTom Erickson dn = DB_DNODE(db); 536eb633035STom Caputi err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp); 537744947dcSTom Erickson DB_DNODE_EXIT(db); 538744947dcSTom Erickson 539744947dcSTom Erickson return (err); 5400a586ceaSMark Shellenbaum } 5410a586ceaSMark Shellenbaum 54213506d1eSmaybee /* 54313506d1eSmaybee * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 54413506d1eSmaybee * to take a held dnode rather than <os, object> -- the lookup is wasteful, 54513506d1eSmaybee * and can induce severe lock contention when writing to several files 54613506d1eSmaybee * whose dnodes are in the same block. 54713506d1eSmaybee */ 5488dfe5547SRichard Yao int 5497bfdf011SNeil Perrin dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 550cf6106c8SMatthew Ahrens boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags) 551fa9e4066Sahrens { 552fa9e4066Sahrens dmu_buf_t **dbp; 553fa9e4066Sahrens uint64_t blkid, nblks, i; 5547bfdf011SNeil Perrin uint32_t dbuf_flags; 555ea8dc4b6Seschrock int err; 556ea8dc4b6Seschrock zio_t *zio; 557ea8dc4b6Seschrock 558ea8dc4b6Seschrock ASSERT(length <= DMU_MAX_ACCESS); 559fa9e4066Sahrens 560cf6106c8SMatthew Ahrens /* 561cf6106c8SMatthew Ahrens * Note: We directly notify the prefetch code of this read, so that 562cf6106c8SMatthew Ahrens * we can tell it about the multi-block read. dbuf_read() only knows 563cf6106c8SMatthew Ahrens * about the one block it is accessing. 564cf6106c8SMatthew Ahrens */ 565cf6106c8SMatthew Ahrens dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 566cf6106c8SMatthew Ahrens DB_RF_NOPREFETCH; 567ea8dc4b6Seschrock 568fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 569fa9e4066Sahrens if (dn->dn_datablkshift) { 570fa9e4066Sahrens int blkshift = dn->dn_datablkshift; 571cf6106c8SMatthew Ahrens nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 572cf6106c8SMatthew Ahrens P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 573fa9e4066Sahrens } else { 5740125049cSahrens if (offset + length > dn->dn_datablksz) { 5750125049cSahrens zfs_panic_recover("zfs: accessing past end of object " 5760125049cSahrens "%llx/%llx (size=%u access=%llu+%llu)", 5770125049cSahrens (longlong_t)dn->dn_objset-> 5780125049cSahrens os_dsl_dataset->ds_object, 5790125049cSahrens (longlong_t)dn->dn_object, dn->dn_datablksz, 5800125049cSahrens (longlong_t)offset, (longlong_t)length); 581c87b8fc5SMark J Musante rw_exit(&dn->dn_struct_rwlock); 582be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 5830125049cSahrens } 584fa9e4066Sahrens nblks = 1; 585fa9e4066Sahrens } 586ea8dc4b6Seschrock dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 587fa9e4066Sahrens 588e14bb325SJeff Bonwick zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); 589a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 590fa9e4066Sahrens for (i = 0; i < nblks; i++) { 591cf6106c8SMatthew Ahrens dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 592ea8dc4b6Seschrock if (db == NULL) { 593ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 594ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 595ea8dc4b6Seschrock zio_nowait(zio); 596be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 597ea8dc4b6Seschrock } 598cf6106c8SMatthew Ahrens 599ea8dc4b6Seschrock /* initiate async i/o */ 600cf6106c8SMatthew Ahrens if (read) 6017bfdf011SNeil Perrin (void) dbuf_read(db, zio, dbuf_flags); 602ea8dc4b6Seschrock dbp[i] = &db->db; 603fa9e4066Sahrens } 604cf6106c8SMatthew Ahrens 605cb92f413SAlexander Motin if ((flags & DMU_READ_NO_PREFETCH) == 0 && 606cb92f413SAlexander Motin DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) { 607cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, blkid, nblks, 608*9704bf7fSPaul Dagnelie read && DNODE_IS_CACHEABLE(dn), B_TRUE); 609cf6106c8SMatthew Ahrens } 610fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 611fa9e4066Sahrens 612ea8dc4b6Seschrock /* wait for async i/o */ 613ea8dc4b6Seschrock err = zio_wait(zio); 614ea8dc4b6Seschrock if (err) { 615ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 616ea8dc4b6Seschrock return (err); 617ea8dc4b6Seschrock } 618fa9e4066Sahrens 619ea8dc4b6Seschrock /* wait for other io to complete */ 620ea8dc4b6Seschrock if (read) { 621ea8dc4b6Seschrock for (i = 0; i < nblks; i++) { 622ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 623ea8dc4b6Seschrock mutex_enter(&db->db_mtx); 624ea8dc4b6Seschrock while (db->db_state == DB_READ || 625ea8dc4b6Seschrock db->db_state == DB_FILL) 626ea8dc4b6Seschrock cv_wait(&db->db_changed, &db->db_mtx); 627ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 628be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 629ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 630ea8dc4b6Seschrock if (err) { 631ea8dc4b6Seschrock dmu_buf_rele_array(dbp, nblks, tag); 632ea8dc4b6Seschrock return (err); 633ea8dc4b6Seschrock } 634ea8dc4b6Seschrock } 635ea8dc4b6Seschrock } 636fa9e4066Sahrens 637ea8dc4b6Seschrock *numbufsp = nblks; 638ea8dc4b6Seschrock *dbpp = dbp; 639ea8dc4b6Seschrock return (0); 640fa9e4066Sahrens } 641fa9e4066Sahrens 642a2eea2e1Sahrens static int 64313506d1eSmaybee dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 64413506d1eSmaybee uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 64513506d1eSmaybee { 64613506d1eSmaybee dnode_t *dn; 64713506d1eSmaybee int err; 64813506d1eSmaybee 649503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 65013506d1eSmaybee if (err) 65113506d1eSmaybee return (err); 65213506d1eSmaybee 65313506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 6547bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 65513506d1eSmaybee 65613506d1eSmaybee dnode_rele(dn, FTAG); 65713506d1eSmaybee 65813506d1eSmaybee return (err); 65913506d1eSmaybee } 66013506d1eSmaybee 66113506d1eSmaybee int 662744947dcSTom Erickson dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 663cf6106c8SMatthew Ahrens uint64_t length, boolean_t read, void *tag, int *numbufsp, 664cf6106c8SMatthew Ahrens dmu_buf_t ***dbpp) 66513506d1eSmaybee { 666744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 667744947dcSTom Erickson dnode_t *dn; 66813506d1eSmaybee int err; 66913506d1eSmaybee 670744947dcSTom Erickson DB_DNODE_ENTER(db); 671744947dcSTom Erickson dn = DB_DNODE(db); 67213506d1eSmaybee err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 6737bfdf011SNeil Perrin numbufsp, dbpp, DMU_READ_PREFETCH); 674744947dcSTom Erickson DB_DNODE_EXIT(db); 67513506d1eSmaybee 67613506d1eSmaybee return (err); 67713506d1eSmaybee } 67813506d1eSmaybee 679fa9e4066Sahrens void 680ea8dc4b6Seschrock dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 681fa9e4066Sahrens { 682fa9e4066Sahrens int i; 683fa9e4066Sahrens dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 684fa9e4066Sahrens 685fa9e4066Sahrens if (numbufs == 0) 686fa9e4066Sahrens return; 687fa9e4066Sahrens 688ea8dc4b6Seschrock for (i = 0; i < numbufs; i++) { 689ea8dc4b6Seschrock if (dbp[i]) 690ea8dc4b6Seschrock dbuf_rele(dbp[i], tag); 691ea8dc4b6Seschrock } 692fa9e4066Sahrens 693fa9e4066Sahrens kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 694fa9e4066Sahrens } 695fa9e4066Sahrens 69669962b56SMatthew Ahrens /* 697a2cdcdd2SPaul Dagnelie * Issue prefetch i/os for the given blocks. If level is greater than 0, the 698a2cdcdd2SPaul Dagnelie * indirect blocks prefeteched will be those that point to the blocks containing 699a2cdcdd2SPaul Dagnelie * the data starting at offset, and continuing to offset + len. 70069962b56SMatthew Ahrens * 701eb633035STom Caputi * Note that if the indirect blocks above the blocks being prefetched are not 702eb633035STom Caputi * in cache, they will be asychronously read in. 70369962b56SMatthew Ahrens */ 704fa9e4066Sahrens void 705a2cdcdd2SPaul Dagnelie dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 706a2cdcdd2SPaul Dagnelie uint64_t len, zio_priority_t pri) 707fa9e4066Sahrens { 708fa9e4066Sahrens dnode_t *dn; 709fa9e4066Sahrens uint64_t blkid; 71069962b56SMatthew Ahrens int nblks, err; 711fa9e4066Sahrens 712fa9e4066Sahrens if (len == 0) { /* they're interested in the bonus buffer */ 713744947dcSTom Erickson dn = DMU_META_DNODE(os); 714fa9e4066Sahrens 715fa9e4066Sahrens if (object == 0 || object >= DN_MAX_OBJECT) 716fa9e4066Sahrens return; 717fa9e4066Sahrens 718fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 719a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, 720a2cdcdd2SPaul Dagnelie object * sizeof (dnode_phys_t)); 721a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid, pri, 0); 722fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 723fa9e4066Sahrens return; 724fa9e4066Sahrens } 725fa9e4066Sahrens 72652abb70eSMatthew Ahrens /* 72752abb70eSMatthew Ahrens * See comment before the definition of dmu_prefetch_max. 72852abb70eSMatthew Ahrens */ 72952abb70eSMatthew Ahrens len = MIN(len, dmu_prefetch_max); 73052abb70eSMatthew Ahrens 731fa9e4066Sahrens /* 732fa9e4066Sahrens * XXX - Note, if the dnode for the requested object is not 733fa9e4066Sahrens * already cached, we will do a *synchronous* read in the 734fa9e4066Sahrens * dnode_hold() call. The same is true for any indirects. 735fa9e4066Sahrens */ 736503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 737ea8dc4b6Seschrock if (err != 0) 738fa9e4066Sahrens return; 739fa9e4066Sahrens 740a2cdcdd2SPaul Dagnelie /* 741a2cdcdd2SPaul Dagnelie * offset + len - 1 is the last byte we want to prefetch for, and offset 742a2cdcdd2SPaul Dagnelie * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 743a2cdcdd2SPaul Dagnelie * last block we want to prefetch, and dbuf_whichblock(dn, level, 744a2cdcdd2SPaul Dagnelie * offset) is the first. Then the number we need to prefetch is the 745a2cdcdd2SPaul Dagnelie * last - first + 1. 746a2cdcdd2SPaul Dagnelie */ 747*9704bf7fSPaul Dagnelie rw_enter(&dn->dn_struct_rwlock, RW_READER); 748a2cdcdd2SPaul Dagnelie if (level > 0 || dn->dn_datablkshift != 0) { 749a2cdcdd2SPaul Dagnelie nblks = dbuf_whichblock(dn, level, offset + len - 1) - 750a2cdcdd2SPaul Dagnelie dbuf_whichblock(dn, level, offset) + 1; 751fa9e4066Sahrens } else { 752fa9e4066Sahrens nblks = (offset < dn->dn_datablksz); 753fa9e4066Sahrens } 754fa9e4066Sahrens 755fa9e4066Sahrens if (nblks != 0) { 756a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, level, offset); 75769962b56SMatthew Ahrens for (int i = 0; i < nblks; i++) 758a2cdcdd2SPaul Dagnelie dbuf_prefetch(dn, level, blkid + i, pri, 0); 759fa9e4066Sahrens } 760fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 761fa9e4066Sahrens 762fa9e4066Sahrens dnode_rele(dn, FTAG); 763fa9e4066Sahrens } 764fa9e4066Sahrens 76576256205SMark Maybee /* 76676256205SMark Maybee * Get the next "chunk" of file data to free. We traverse the file from 76776256205SMark Maybee * the end so that the file gets shorter over time (if we crashes in the 76876256205SMark Maybee * middle, this will leave us in a better state). We find allocated file 76976256205SMark Maybee * data by simply searching the allocated level 1 indirects. 770713d6c20SMatthew Ahrens * 771713d6c20SMatthew Ahrens * On input, *start should be the first offset that does not need to be 772713d6c20SMatthew Ahrens * freed (e.g. "offset + length"). On return, *start will be the first 773713d6c20SMatthew Ahrens * offset that should be freed. 77476256205SMark Maybee */ 775cdb0ab79Smaybee static int 776713d6c20SMatthew Ahrens get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum) 777cdb0ab79Smaybee { 778713d6c20SMatthew Ahrens uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 779713d6c20SMatthew Ahrens /* bytes of data covered by a level-1 indirect block */ 78076256205SMark Maybee uint64_t iblkrange = 7811c8564a7SMark Maybee dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 782cdb0ab79Smaybee 783713d6c20SMatthew Ahrens ASSERT3U(minimum, <=, *start); 784cdb0ab79Smaybee 785713d6c20SMatthew Ahrens if (*start - minimum <= iblkrange * maxblks) { 786713d6c20SMatthew Ahrens *start = minimum; 787cdb0ab79Smaybee return (0); 788cdb0ab79Smaybee } 78976256205SMark Maybee ASSERT(ISP2(iblkrange)); 790cdb0ab79Smaybee 791713d6c20SMatthew Ahrens for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) { 7921c8564a7SMark Maybee int err; 793cdb0ab79Smaybee 794713d6c20SMatthew Ahrens /* 795713d6c20SMatthew Ahrens * dnode_next_offset(BACKWARDS) will find an allocated L1 796713d6c20SMatthew Ahrens * indirect block at or before the input offset. We must 797713d6c20SMatthew Ahrens * decrement *start so that it is at the end of the region 798713d6c20SMatthew Ahrens * to search. 799713d6c20SMatthew Ahrens */ 800713d6c20SMatthew Ahrens (*start)--; 801cdb0ab79Smaybee err = dnode_next_offset(dn, 80276256205SMark Maybee DNODE_FIND_BACKWARDS, start, 2, 1, 0); 803cdb0ab79Smaybee 804713d6c20SMatthew Ahrens /* if there are no indirect blocks before start, we are done */ 80576256205SMark Maybee if (err == ESRCH) { 806713d6c20SMatthew Ahrens *start = minimum; 807713d6c20SMatthew Ahrens break; 808713d6c20SMatthew Ahrens } else if (err != 0) { 809cdb0ab79Smaybee return (err); 81076256205SMark Maybee } 81176256205SMark Maybee 812713d6c20SMatthew Ahrens /* set start to the beginning of this L1 indirect */ 81376256205SMark Maybee *start = P2ALIGN(*start, iblkrange); 814cdb0ab79Smaybee } 815713d6c20SMatthew Ahrens if (*start < minimum) 816713d6c20SMatthew Ahrens *start = minimum; 817cdb0ab79Smaybee return (0); 818cdb0ab79Smaybee } 819cdb0ab79Smaybee 820eb721827SAlek Pinchuk /* 821eb721827SAlek Pinchuk * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 822eb721827SAlek Pinchuk * otherwise return false. 823eb721827SAlek Pinchuk * Used below in dmu_free_long_range_impl() to enable abort when unmounting 824eb721827SAlek Pinchuk */ 825eb721827SAlek Pinchuk /*ARGSUSED*/ 826eb721827SAlek Pinchuk static boolean_t 827eb721827SAlek Pinchuk dmu_objset_zfs_unmounting(objset_t *os) 828eb721827SAlek Pinchuk { 829eb721827SAlek Pinchuk #ifdef _KERNEL 830eb721827SAlek Pinchuk if (dmu_objset_type(os) == DMU_OST_ZFS) 831eb721827SAlek Pinchuk return (zfs_get_vfs_flag_unmounted(os)); 832eb721827SAlek Pinchuk #endif 833eb721827SAlek Pinchuk return (B_FALSE); 834eb721827SAlek Pinchuk } 835eb721827SAlek Pinchuk 836cdb0ab79Smaybee static int 837cdb0ab79Smaybee dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 838713d6c20SMatthew Ahrens uint64_t length) 839cdb0ab79Smaybee { 840713d6c20SMatthew Ahrens uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 841713d6c20SMatthew Ahrens int err; 842ff5177eeSAlek Pinchuk uint64_t dirty_frees_threshold; 843ff5177eeSAlek Pinchuk dsl_pool_t *dp = dmu_objset_pool(os); 844713d6c20SMatthew Ahrens 845713d6c20SMatthew Ahrens if (offset >= object_size) 846cdb0ab79Smaybee return (0); 847cdb0ab79Smaybee 848ff5177eeSAlek Pinchuk if (zfs_per_txg_dirty_frees_percent <= 100) 849ff5177eeSAlek Pinchuk dirty_frees_threshold = 850ff5177eeSAlek Pinchuk zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 851ff5177eeSAlek Pinchuk else 852ff5177eeSAlek Pinchuk dirty_frees_threshold = zfs_dirty_data_max / 4; 853ff5177eeSAlek Pinchuk 854713d6c20SMatthew Ahrens if (length == DMU_OBJECT_END || offset + length > object_size) 855713d6c20SMatthew Ahrens length = object_size - offset; 856713d6c20SMatthew Ahrens 857713d6c20SMatthew Ahrens while (length != 0) { 858ff5177eeSAlek Pinchuk uint64_t chunk_end, chunk_begin, chunk_len; 859ff5177eeSAlek Pinchuk uint64_t long_free_dirty_all_txgs = 0; 860ff5177eeSAlek Pinchuk dmu_tx_t *tx; 861713d6c20SMatthew Ahrens 862eb721827SAlek Pinchuk if (dmu_objset_zfs_unmounting(dn->dn_objset)) 863eb721827SAlek Pinchuk return (SET_ERROR(EINTR)); 864eb721827SAlek Pinchuk 865713d6c20SMatthew Ahrens chunk_end = chunk_begin = offset + length; 866713d6c20SMatthew Ahrens 867713d6c20SMatthew Ahrens /* move chunk_begin backwards to the beginning of this chunk */ 868713d6c20SMatthew Ahrens err = get_next_chunk(dn, &chunk_begin, offset); 869cdb0ab79Smaybee if (err) 870cdb0ab79Smaybee return (err); 871713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, >=, offset); 872713d6c20SMatthew Ahrens ASSERT3U(chunk_begin, <=, chunk_end); 873cdb0ab79Smaybee 874ff5177eeSAlek Pinchuk chunk_len = chunk_end - chunk_begin; 875ff5177eeSAlek Pinchuk 876ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 877ff5177eeSAlek Pinchuk for (int t = 0; t < TXG_SIZE; t++) { 878ff5177eeSAlek Pinchuk long_free_dirty_all_txgs += 879ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[t]; 880ff5177eeSAlek Pinchuk } 881ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 882ff5177eeSAlek Pinchuk 883ff5177eeSAlek Pinchuk /* 884ff5177eeSAlek Pinchuk * To avoid filling up a TXG with just frees wait for 885ff5177eeSAlek Pinchuk * the next TXG to open before freeing more chunks if 886ff5177eeSAlek Pinchuk * we have reached the threshold of frees 887ff5177eeSAlek Pinchuk */ 888ff5177eeSAlek Pinchuk if (dirty_frees_threshold != 0 && 889ff5177eeSAlek Pinchuk long_free_dirty_all_txgs >= dirty_frees_threshold) { 890084fd14fSBrian Behlendorf txg_wait_open(dp, 0, B_TRUE); 891ff5177eeSAlek Pinchuk continue; 892ff5177eeSAlek Pinchuk } 893ff5177eeSAlek Pinchuk 894ff5177eeSAlek Pinchuk tx = dmu_tx_create(os); 895ff5177eeSAlek Pinchuk dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 8964bb73804SMatthew Ahrens 8974bb73804SMatthew Ahrens /* 8984bb73804SMatthew Ahrens * Mark this transaction as typically resulting in a net 8994bb73804SMatthew Ahrens * reduction in space used. 9004bb73804SMatthew Ahrens */ 9014bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 902cdb0ab79Smaybee err = dmu_tx_assign(tx, TXG_WAIT); 903cdb0ab79Smaybee if (err) { 904cdb0ab79Smaybee dmu_tx_abort(tx); 905cdb0ab79Smaybee return (err); 906cdb0ab79Smaybee } 907ff5177eeSAlek Pinchuk 908ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 909ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] += 910ff5177eeSAlek Pinchuk chunk_len; 911ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 912ff5177eeSAlek Pinchuk DTRACE_PROBE3(free__long__range, 913ff5177eeSAlek Pinchuk uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len, 914ff5177eeSAlek Pinchuk uint64_t, dmu_tx_get_txg(tx)); 915ff5177eeSAlek Pinchuk dnode_free_range(dn, chunk_begin, chunk_len, tx); 916eb633035STom Caputi 917cdb0ab79Smaybee dmu_tx_commit(tx); 918713d6c20SMatthew Ahrens 919ff5177eeSAlek Pinchuk length -= chunk_len; 920cdb0ab79Smaybee } 921cdb0ab79Smaybee return (0); 922cdb0ab79Smaybee } 923cdb0ab79Smaybee 924cdb0ab79Smaybee int 925cdb0ab79Smaybee dmu_free_long_range(objset_t *os, uint64_t object, 926cdb0ab79Smaybee uint64_t offset, uint64_t length) 927cdb0ab79Smaybee { 928cdb0ab79Smaybee dnode_t *dn; 929cdb0ab79Smaybee int err; 930cdb0ab79Smaybee 931503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 932cdb0ab79Smaybee if (err != 0) 933cdb0ab79Smaybee return (err); 934713d6c20SMatthew Ahrens err = dmu_free_long_range_impl(os, dn, offset, length); 9355253393bSMatthew Ahrens 9365253393bSMatthew Ahrens /* 9375253393bSMatthew Ahrens * It is important to zero out the maxblkid when freeing the entire 9385253393bSMatthew Ahrens * file, so that (a) subsequent calls to dmu_free_long_range_impl() 9395253393bSMatthew Ahrens * will take the fast path, and (b) dnode_reallocate() can verify 9405253393bSMatthew Ahrens * that the entire file has been freed. 9415253393bSMatthew Ahrens */ 94243466aaeSMax Grossman if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 9435253393bSMatthew Ahrens dn->dn_maxblkid = 0; 9445253393bSMatthew Ahrens 945cdb0ab79Smaybee dnode_rele(dn, FTAG); 946cdb0ab79Smaybee return (err); 947cdb0ab79Smaybee } 948cdb0ab79Smaybee 949cdb0ab79Smaybee int 950713d6c20SMatthew Ahrens dmu_free_long_object(objset_t *os, uint64_t object) 951cdb0ab79Smaybee { 952cdb0ab79Smaybee dmu_tx_t *tx; 953cdb0ab79Smaybee int err; 954cdb0ab79Smaybee 955713d6c20SMatthew Ahrens err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 956cdb0ab79Smaybee if (err != 0) 957cdb0ab79Smaybee return (err); 958713d6c20SMatthew Ahrens 959713d6c20SMatthew Ahrens tx = dmu_tx_create(os); 960713d6c20SMatthew Ahrens dmu_tx_hold_bonus(tx, object); 961713d6c20SMatthew Ahrens dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 9624bb73804SMatthew Ahrens dmu_tx_mark_netfree(tx); 963713d6c20SMatthew Ahrens err = dmu_tx_assign(tx, TXG_WAIT); 964713d6c20SMatthew Ahrens if (err == 0) { 965eb633035STom Caputi if (err == 0) 966eb633035STom Caputi err = dmu_object_free(os, object, tx); 967eb633035STom Caputi 968713d6c20SMatthew Ahrens dmu_tx_commit(tx); 969cdb0ab79Smaybee } else { 970713d6c20SMatthew Ahrens dmu_tx_abort(tx); 971cdb0ab79Smaybee } 972713d6c20SMatthew Ahrens 973cdb0ab79Smaybee return (err); 974cdb0ab79Smaybee } 975cdb0ab79Smaybee 976ea8dc4b6Seschrock int 977fa9e4066Sahrens dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 978fa9e4066Sahrens uint64_t size, dmu_tx_t *tx) 979fa9e4066Sahrens { 980ea8dc4b6Seschrock dnode_t *dn; 981503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 982ea8dc4b6Seschrock if (err) 983ea8dc4b6Seschrock return (err); 984fa9e4066Sahrens ASSERT(offset < UINT64_MAX); 985eb633035STom Caputi ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); 986fa9e4066Sahrens dnode_free_range(dn, offset, size, tx); 987fa9e4066Sahrens dnode_rele(dn, FTAG); 988ea8dc4b6Seschrock return (0); 989fa9e4066Sahrens } 990fa9e4066Sahrens 991b0c42cd4Sbzzz static int 992b0c42cd4Sbzzz dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 9937bfdf011SNeil Perrin void *buf, uint32_t flags) 994fa9e4066Sahrens { 995fa9e4066Sahrens dmu_buf_t **dbp; 996b0c42cd4Sbzzz int numbufs, err = 0; 997feb08c6bSbillm 998feb08c6bSbillm /* 999feb08c6bSbillm * Deal with odd block sizes, where there can't be data past the first 1000feb08c6bSbillm * block. If we ever do the tail block optimization, we will need to 1001feb08c6bSbillm * handle that here as well. 1002feb08c6bSbillm */ 1003c87b8fc5SMark J Musante if (dn->dn_maxblkid == 0) { 1004fa9e4066Sahrens int newsz = offset > dn->dn_datablksz ? 0 : 1005fa9e4066Sahrens MIN(size, dn->dn_datablksz - offset); 1006fa9e4066Sahrens bzero((char *)buf + newsz, size - newsz); 1007fa9e4066Sahrens size = newsz; 1008fa9e4066Sahrens } 1009fa9e4066Sahrens 1010fa9e4066Sahrens while (size > 0) { 1011fa9e4066Sahrens uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 1012c87b8fc5SMark J Musante int i; 1013fa9e4066Sahrens 1014fa9e4066Sahrens /* 1015fa9e4066Sahrens * NB: we could do this block-at-a-time, but it's nice 1016fa9e4066Sahrens * to be reading in parallel. 1017fa9e4066Sahrens */ 1018a2eea2e1Sahrens err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 10197bfdf011SNeil Perrin TRUE, FTAG, &numbufs, &dbp, flags); 1020ea8dc4b6Seschrock if (err) 10211934e92fSmaybee break; 1022fa9e4066Sahrens 1023fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1024fa9e4066Sahrens int tocpy; 1025fa9e4066Sahrens int bufoff; 1026fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1027fa9e4066Sahrens 1028fa9e4066Sahrens ASSERT(size > 0); 1029fa9e4066Sahrens 1030fa9e4066Sahrens bufoff = offset - db->db_offset; 1031fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1032fa9e4066Sahrens 1033fa9e4066Sahrens bcopy((char *)db->db_data + bufoff, buf, tocpy); 1034fa9e4066Sahrens 1035fa9e4066Sahrens offset += tocpy; 1036fa9e4066Sahrens size -= tocpy; 1037fa9e4066Sahrens buf = (char *)buf + tocpy; 1038fa9e4066Sahrens } 1039ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1040fa9e4066Sahrens } 10411934e92fSmaybee return (err); 1042fa9e4066Sahrens } 1043fa9e4066Sahrens 1044b0c42cd4Sbzzz int 1045b0c42cd4Sbzzz dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1046b0c42cd4Sbzzz void *buf, uint32_t flags) 1047fa9e4066Sahrens { 1048b0c42cd4Sbzzz dnode_t *dn; 1049b0c42cd4Sbzzz int err; 1050fa9e4066Sahrens 1051b0c42cd4Sbzzz err = dnode_hold(os, object, FTAG, &dn); 1052b0c42cd4Sbzzz if (err != 0) 1053b0c42cd4Sbzzz return (err); 105413506d1eSmaybee 1055b0c42cd4Sbzzz err = dmu_read_impl(dn, offset, size, buf, flags); 1056b0c42cd4Sbzzz dnode_rele(dn, FTAG); 1057b0c42cd4Sbzzz return (err); 1058b0c42cd4Sbzzz } 1059b0c42cd4Sbzzz 1060b0c42cd4Sbzzz int 1061b0c42cd4Sbzzz dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 1062b0c42cd4Sbzzz uint32_t flags) 1063b0c42cd4Sbzzz { 1064b0c42cd4Sbzzz return (dmu_read_impl(dn, offset, size, buf, flags)); 1065b0c42cd4Sbzzz } 1066b0c42cd4Sbzzz 1067b0c42cd4Sbzzz static void 1068b0c42cd4Sbzzz dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 1069b0c42cd4Sbzzz const void *buf, dmu_tx_t *tx) 1070b0c42cd4Sbzzz { 1071b0c42cd4Sbzzz int i; 1072fa9e4066Sahrens 1073fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1074fa9e4066Sahrens int tocpy; 1075fa9e4066Sahrens int bufoff; 1076fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1077fa9e4066Sahrens 1078fa9e4066Sahrens ASSERT(size > 0); 1079fa9e4066Sahrens 1080fa9e4066Sahrens bufoff = offset - db->db_offset; 1081fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1082fa9e4066Sahrens 1083fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1084fa9e4066Sahrens 1085fa9e4066Sahrens if (tocpy == db->db_size) 1086fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1087fa9e4066Sahrens else 1088fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1089fa9e4066Sahrens 1090fa9e4066Sahrens bcopy(buf, (char *)db->db_data + bufoff, tocpy); 1091fa9e4066Sahrens 1092fa9e4066Sahrens if (tocpy == db->db_size) 1093fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1094fa9e4066Sahrens 1095fa9e4066Sahrens offset += tocpy; 1096fa9e4066Sahrens size -= tocpy; 1097fa9e4066Sahrens buf = (char *)buf + tocpy; 1098fa9e4066Sahrens } 1099b0c42cd4Sbzzz } 1100b0c42cd4Sbzzz 1101b0c42cd4Sbzzz void 1102b0c42cd4Sbzzz dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1103b0c42cd4Sbzzz const void *buf, dmu_tx_t *tx) 1104b0c42cd4Sbzzz { 1105b0c42cd4Sbzzz dmu_buf_t **dbp; 1106b0c42cd4Sbzzz int numbufs; 1107b0c42cd4Sbzzz 1108b0c42cd4Sbzzz if (size == 0) 1109b0c42cd4Sbzzz return; 1110b0c42cd4Sbzzz 1111b0c42cd4Sbzzz VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1112b0c42cd4Sbzzz FALSE, FTAG, &numbufs, &dbp)); 1113b0c42cd4Sbzzz dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1114b0c42cd4Sbzzz dmu_buf_rele_array(dbp, numbufs, FTAG); 1115b0c42cd4Sbzzz } 1116b0c42cd4Sbzzz 1117b0c42cd4Sbzzz void 1118b0c42cd4Sbzzz dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1119b0c42cd4Sbzzz const void *buf, dmu_tx_t *tx) 1120b0c42cd4Sbzzz { 1121b0c42cd4Sbzzz dmu_buf_t **dbp; 1122b0c42cd4Sbzzz int numbufs; 1123b0c42cd4Sbzzz 1124b0c42cd4Sbzzz if (size == 0) 1125b0c42cd4Sbzzz return; 1126b0c42cd4Sbzzz 1127b0c42cd4Sbzzz VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1128b0c42cd4Sbzzz FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1129b0c42cd4Sbzzz dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1130ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1131fa9e4066Sahrens } 1132fa9e4066Sahrens 11335cabbc6bSPrashanth Sreenivasa static int 11345cabbc6bSPrashanth Sreenivasa dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn, 11355cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg, uint64_t offset) 11365cabbc6bSPrashanth Sreenivasa { 11375cabbc6bSPrashanth Sreenivasa uint64_t l1blkid = dbuf_whichblock(dn, 1, offset); 11385cabbc6bSPrashanth Sreenivasa int err = 0; 11395cabbc6bSPrashanth Sreenivasa 11405cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_READER); 11415cabbc6bSPrashanth Sreenivasa dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG); 11425cabbc6bSPrashanth Sreenivasa ASSERT3P(dbuf, !=, NULL); 11435cabbc6bSPrashanth Sreenivasa 11445cabbc6bSPrashanth Sreenivasa /* 11455cabbc6bSPrashanth Sreenivasa * If the block hasn't been written yet, this default will ensure 11465cabbc6bSPrashanth Sreenivasa * we don't try to remap it. 11475cabbc6bSPrashanth Sreenivasa */ 11485cabbc6bSPrashanth Sreenivasa uint64_t birth = UINT64_MAX; 11495cabbc6bSPrashanth Sreenivasa ASSERT3U(last_removal_txg, !=, UINT64_MAX); 11505cabbc6bSPrashanth Sreenivasa if (dbuf->db_blkptr != NULL) 11515cabbc6bSPrashanth Sreenivasa birth = dbuf->db_blkptr->blk_birth; 11525cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 11535cabbc6bSPrashanth Sreenivasa 11545cabbc6bSPrashanth Sreenivasa /* 11555cabbc6bSPrashanth Sreenivasa * If this L1 was already written after the last removal, then we've 11565cabbc6bSPrashanth Sreenivasa * already tried to remap it. 11575cabbc6bSPrashanth Sreenivasa */ 11585cabbc6bSPrashanth Sreenivasa if (birth <= last_removal_txg && 11595cabbc6bSPrashanth Sreenivasa dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 && 11605cabbc6bSPrashanth Sreenivasa dbuf_can_remap(dbuf)) { 11615cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 11625cabbc6bSPrashanth Sreenivasa dmu_tx_hold_remap_l1indirect(tx, dn->dn_object); 11635cabbc6bSPrashanth Sreenivasa err = dmu_tx_assign(tx, TXG_WAIT); 11645cabbc6bSPrashanth Sreenivasa if (err == 0) { 11655cabbc6bSPrashanth Sreenivasa (void) dbuf_dirty(dbuf, tx); 11665cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 11675cabbc6bSPrashanth Sreenivasa } else { 11685cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 11695cabbc6bSPrashanth Sreenivasa } 11705cabbc6bSPrashanth Sreenivasa } 11715cabbc6bSPrashanth Sreenivasa 11725cabbc6bSPrashanth Sreenivasa dbuf_rele(dbuf, FTAG); 11735cabbc6bSPrashanth Sreenivasa 11745cabbc6bSPrashanth Sreenivasa delay(zfs_object_remap_one_indirect_delay_ticks); 11755cabbc6bSPrashanth Sreenivasa 11765cabbc6bSPrashanth Sreenivasa return (err); 11775cabbc6bSPrashanth Sreenivasa } 11785cabbc6bSPrashanth Sreenivasa 11795cabbc6bSPrashanth Sreenivasa /* 11805cabbc6bSPrashanth Sreenivasa * Remap all blockpointers in the object, if possible, so that they reference 11815cabbc6bSPrashanth Sreenivasa * only concrete vdevs. 11825cabbc6bSPrashanth Sreenivasa * 11835cabbc6bSPrashanth Sreenivasa * To do this, iterate over the L0 blockpointers and remap any that reference 11845cabbc6bSPrashanth Sreenivasa * an indirect vdev. Note that we only examine L0 blockpointers; since we 11855cabbc6bSPrashanth Sreenivasa * cannot guarantee that we can remap all blockpointer anyways (due to split 11865cabbc6bSPrashanth Sreenivasa * blocks), we do not want to make the code unnecessarily complicated to 11875cabbc6bSPrashanth Sreenivasa * catch the unlikely case that there is an L1 block on an indirect vdev that 11885cabbc6bSPrashanth Sreenivasa * contains no indirect blockpointers. 11895cabbc6bSPrashanth Sreenivasa */ 11905cabbc6bSPrashanth Sreenivasa int 11915cabbc6bSPrashanth Sreenivasa dmu_object_remap_indirects(objset_t *os, uint64_t object, 11925cabbc6bSPrashanth Sreenivasa uint64_t last_removal_txg) 11935cabbc6bSPrashanth Sreenivasa { 11945cabbc6bSPrashanth Sreenivasa uint64_t offset, l1span; 11955cabbc6bSPrashanth Sreenivasa int err; 11965cabbc6bSPrashanth Sreenivasa dnode_t *dn; 11975cabbc6bSPrashanth Sreenivasa 11985cabbc6bSPrashanth Sreenivasa err = dnode_hold(os, object, FTAG, &dn); 11995cabbc6bSPrashanth Sreenivasa if (err != 0) { 12005cabbc6bSPrashanth Sreenivasa return (err); 12015cabbc6bSPrashanth Sreenivasa } 12025cabbc6bSPrashanth Sreenivasa 12035cabbc6bSPrashanth Sreenivasa if (dn->dn_nlevels <= 1) { 12045cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 12055cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 12065cabbc6bSPrashanth Sreenivasa } 12075cabbc6bSPrashanth Sreenivasa 12085cabbc6bSPrashanth Sreenivasa /* 12095cabbc6bSPrashanth Sreenivasa * If the dnode has no indirect blocks, we cannot dirty them. 12105cabbc6bSPrashanth Sreenivasa * We still want to remap the blkptr(s) in the dnode if 12115cabbc6bSPrashanth Sreenivasa * appropriate, so mark it as dirty. 12125cabbc6bSPrashanth Sreenivasa */ 12135cabbc6bSPrashanth Sreenivasa if (err == 0 && dnode_needs_remap(dn)) { 12145cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = dmu_tx_create(os); 12155cabbc6bSPrashanth Sreenivasa dmu_tx_hold_bonus(tx, dn->dn_object); 12165cabbc6bSPrashanth Sreenivasa if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) { 12175cabbc6bSPrashanth Sreenivasa dnode_setdirty(dn, tx); 12185cabbc6bSPrashanth Sreenivasa dmu_tx_commit(tx); 12195cabbc6bSPrashanth Sreenivasa } else { 12205cabbc6bSPrashanth Sreenivasa dmu_tx_abort(tx); 12215cabbc6bSPrashanth Sreenivasa } 12225cabbc6bSPrashanth Sreenivasa } 12235cabbc6bSPrashanth Sreenivasa 12245cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 12255cabbc6bSPrashanth Sreenivasa return (err); 12265cabbc6bSPrashanth Sreenivasa } 12275cabbc6bSPrashanth Sreenivasa 12285cabbc6bSPrashanth Sreenivasa offset = 0; 12295cabbc6bSPrashanth Sreenivasa l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT + 12305cabbc6bSPrashanth Sreenivasa dn->dn_datablkshift); 12315cabbc6bSPrashanth Sreenivasa /* 12325cabbc6bSPrashanth Sreenivasa * Find the next L1 indirect that is not a hole. 12335cabbc6bSPrashanth Sreenivasa */ 12345cabbc6bSPrashanth Sreenivasa while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) { 12355cabbc6bSPrashanth Sreenivasa if (issig(JUSTLOOKING) && issig(FORREAL)) { 12365cabbc6bSPrashanth Sreenivasa err = SET_ERROR(EINTR); 12375cabbc6bSPrashanth Sreenivasa break; 12385cabbc6bSPrashanth Sreenivasa } 12395cabbc6bSPrashanth Sreenivasa if ((err = dmu_object_remap_one_indirect(os, dn, 12405cabbc6bSPrashanth Sreenivasa last_removal_txg, offset)) != 0) { 12415cabbc6bSPrashanth Sreenivasa break; 12425cabbc6bSPrashanth Sreenivasa } 12435cabbc6bSPrashanth Sreenivasa offset += l1span; 12445cabbc6bSPrashanth Sreenivasa } 12455cabbc6bSPrashanth Sreenivasa 12465cabbc6bSPrashanth Sreenivasa dnode_rele(dn, FTAG); 12475cabbc6bSPrashanth Sreenivasa return (err); 12485cabbc6bSPrashanth Sreenivasa } 12495cabbc6bSPrashanth Sreenivasa 125082c9918fSTim Haley void 125182c9918fSTim Haley dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 125282c9918fSTim Haley dmu_tx_t *tx) 125382c9918fSTim Haley { 125482c9918fSTim Haley dmu_buf_t **dbp; 125582c9918fSTim Haley int numbufs, i; 125682c9918fSTim Haley 125782c9918fSTim Haley if (size == 0) 125882c9918fSTim Haley return; 125982c9918fSTim Haley 126082c9918fSTim Haley VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 126182c9918fSTim Haley FALSE, FTAG, &numbufs, &dbp)); 126282c9918fSTim Haley 126382c9918fSTim Haley for (i = 0; i < numbufs; i++) { 126482c9918fSTim Haley dmu_buf_t *db = dbp[i]; 126582c9918fSTim Haley 126682c9918fSTim Haley dmu_buf_will_not_fill(db, tx); 126782c9918fSTim Haley } 126882c9918fSTim Haley dmu_buf_rele_array(dbp, numbufs, FTAG); 126982c9918fSTim Haley } 127082c9918fSTim Haley 12715d7b4d43SMatthew Ahrens void 12725d7b4d43SMatthew Ahrens dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 12735d7b4d43SMatthew Ahrens void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 12745d7b4d43SMatthew Ahrens int compressed_size, int byteorder, dmu_tx_t *tx) 12755d7b4d43SMatthew Ahrens { 12765d7b4d43SMatthew Ahrens dmu_buf_t *db; 12775d7b4d43SMatthew Ahrens 12785d7b4d43SMatthew Ahrens ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 12795d7b4d43SMatthew Ahrens ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 12805d7b4d43SMatthew Ahrens VERIFY0(dmu_buf_hold_noread(os, object, offset, 12815d7b4d43SMatthew Ahrens FTAG, &db)); 12825d7b4d43SMatthew Ahrens 12835d7b4d43SMatthew Ahrens dmu_buf_write_embedded(db, 12845d7b4d43SMatthew Ahrens data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 12855d7b4d43SMatthew Ahrens uncompressed_size, compressed_size, byteorder, tx); 12865d7b4d43SMatthew Ahrens 12875d7b4d43SMatthew Ahrens dmu_buf_rele(db, FTAG); 12885d7b4d43SMatthew Ahrens } 12895d7b4d43SMatthew Ahrens 1290c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1291c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * DMU support for xuio 1292c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1293c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_t *xuio_ksp = NULL; 1294c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1295c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1296c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_init(xuio_t *xuio, int nblk) 1297c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1298c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv; 1299c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1300c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1301c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iovcnt = nblk; 1302c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP); 1303c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1304c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP); 1305c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->cnt = nblk; 1306c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP); 1307c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->iovp = uio->uio_iov; 1308c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIO_XUZC_PRIV(xuio) = priv; 1309c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1310c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1311c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk); 1312c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1313c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk); 1314c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1315c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1316c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1317c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1318c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1319c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_fini(xuio_t *xuio) 1320c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1321c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1322c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int nblk = priv->cnt; 1323c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1324c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->iovp, nblk * sizeof (iovec_t)); 1325c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *)); 1326c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kmem_free(priv, sizeof (dmu_xuio_t)); 1327c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1328c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (XUIO_XUZC_RW(xuio) == UIO_READ) 1329c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk); 1330c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1331c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); 1332c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1333c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1334c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 1335c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } 1336c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * and increase priv->next by 1. 1337c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 1338c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1339c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n) 1340c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1341c242f9a0Schunli zhang - Sun Microsystems - Irvine United States struct iovec *iov; 1342c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio_t *uio = &xuio->xu_uio; 1343c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1344c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int i = priv->next++; 1345c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1346c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 13475602294fSDan Kimmel ASSERT(off + n <= arc_buf_lsize(abuf)); 1348c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov = uio->uio_iov + i; 1349c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_base = (char *)abuf->b_data + off; 1350c242f9a0Schunli zhang - Sun Microsystems - Irvine United States iov->iov_len = n; 1351c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = abuf; 1352c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (0); 1353c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1354c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1355c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int 1356c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_cnt(xuio_t *xuio) 1357c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1358c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1359c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->cnt); 1360c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1361c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1362c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 1363c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_arcbuf(xuio_t *xuio, int i) 1364c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1365c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1366c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1367c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1368c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (priv->bufs[i]); 1369c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1370c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1371c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 1372c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_clear(xuio_t *xuio, int i) 1373c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1374c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio); 1375c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1376c242f9a0Schunli zhang - Sun Microsystems - Irvine United States ASSERT(i < priv->cnt); 1377c242f9a0Schunli zhang - Sun Microsystems - Irvine United States priv->bufs[i] = NULL; 1378c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1379c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1380c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1381c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_init(void) 1382c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1383c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc", 1384c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t), 1385c242f9a0Schunli zhang - Sun Microsystems - Irvine United States KSTAT_FLAG_VIRTUAL); 1386c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1387c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp->ks_data = &xuio_stats; 1388c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_install(xuio_ksp); 1389c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1390c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1391c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1392c242f9a0Schunli zhang - Sun Microsystems - Irvine United States static void 1393c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(void) 1394c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1395c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio_ksp != NULL) { 1396c242f9a0Schunli zhang - Sun Microsystems - Irvine United States kstat_delete(xuio_ksp); 1397c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_ksp = NULL; 1398c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1399c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1400c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1401c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 140299aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_copied(void) 1403c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1404c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 1405c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1406c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1407c242f9a0Schunli zhang - Sun Microsystems - Irvine United States void 140899aa8b55SPrashanth Sreenivasa xuio_stat_wbuf_nocopy(void) 1409c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 1410c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_nocopy); 1411c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1412c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1413fa9e4066Sahrens #ifdef _KERNEL 14148dfe5547SRichard Yao int 1415f8554bb9SMatthew Ahrens dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) 1416feb08c6bSbillm { 1417feb08c6bSbillm dmu_buf_t **dbp; 1418feb08c6bSbillm int numbufs, i, err; 1419c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_t *xuio = NULL; 1420feb08c6bSbillm 1421feb08c6bSbillm /* 1422feb08c6bSbillm * NB: we could do this block-at-a-time, but it's nice 1423feb08c6bSbillm * to be reading in parallel. 1424feb08c6bSbillm */ 1425f8554bb9SMatthew Ahrens err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 1426f8554bb9SMatthew Ahrens TRUE, FTAG, &numbufs, &dbp, 0); 1427feb08c6bSbillm if (err) 1428feb08c6bSbillm return (err); 1429feb08c6bSbillm 1430c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (uio->uio_extflg == UIO_XUIO) 1431c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio = (xuio_t *)uio; 1432c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1433feb08c6bSbillm for (i = 0; i < numbufs; i++) { 1434feb08c6bSbillm int tocpy; 1435feb08c6bSbillm int bufoff; 1436feb08c6bSbillm dmu_buf_t *db = dbp[i]; 1437feb08c6bSbillm 1438feb08c6bSbillm ASSERT(size > 0); 1439feb08c6bSbillm 1440feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1441feb08c6bSbillm tocpy = (int)MIN(db->db_size - bufoff, size); 1442feb08c6bSbillm 1443c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (xuio) { 1444c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 1445c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *dbuf_abuf = dbi->db_buf; 1446c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf = dbuf_loan_arcbuf(dbi); 1447c242f9a0Schunli zhang - Sun Microsystems - Irvine United States err = dmu_xuio_add(xuio, abuf, bufoff, tocpy); 1448c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (!err) { 1449c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_resid -= tocpy; 1450c242f9a0Schunli zhang - Sun Microsystems - Irvine United States uio->uio_loffset += tocpy; 1451c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1452c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 1453c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (abuf == dbuf_abuf) 1454c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_nocopy); 1455c242f9a0Schunli zhang - Sun Microsystems - Irvine United States else 1456c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_rbuf_copied); 1457c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 1458c242f9a0Schunli zhang - Sun Microsystems - Irvine United States err = uiomove((char *)db->db_data + bufoff, tocpy, 1459c242f9a0Schunli zhang - Sun Microsystems - Irvine United States UIO_READ, uio); 1460c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 1461feb08c6bSbillm if (err) 1462feb08c6bSbillm break; 1463feb08c6bSbillm 1464feb08c6bSbillm size -= tocpy; 1465feb08c6bSbillm } 1466feb08c6bSbillm dmu_buf_rele_array(dbp, numbufs, FTAG); 1467feb08c6bSbillm 1468feb08c6bSbillm return (err); 1469feb08c6bSbillm } 1470feb08c6bSbillm 1471f8554bb9SMatthew Ahrens /* 1472f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1473f8554bb9SMatthew Ahrens * From object zdb->db_object. 1474f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1475f8554bb9SMatthew Ahrens * 1476f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1477f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1478f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1479f8554bb9SMatthew Ahrens */ 1480f8554bb9SMatthew Ahrens int 1481f8554bb9SMatthew Ahrens dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size) 1482f8554bb9SMatthew Ahrens { 1483f8554bb9SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1484f8554bb9SMatthew Ahrens dnode_t *dn; 1485f8554bb9SMatthew Ahrens int err; 1486f8554bb9SMatthew Ahrens 1487f8554bb9SMatthew Ahrens if (size == 0) 1488f8554bb9SMatthew Ahrens return (0); 1489f8554bb9SMatthew Ahrens 1490f8554bb9SMatthew Ahrens DB_DNODE_ENTER(db); 1491f8554bb9SMatthew Ahrens dn = DB_DNODE(db); 1492f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1493f8554bb9SMatthew Ahrens DB_DNODE_EXIT(db); 1494f8554bb9SMatthew Ahrens 1495f8554bb9SMatthew Ahrens return (err); 1496f8554bb9SMatthew Ahrens } 1497f8554bb9SMatthew Ahrens 1498f8554bb9SMatthew Ahrens /* 1499f8554bb9SMatthew Ahrens * Read 'size' bytes into the uio buffer. 1500f8554bb9SMatthew Ahrens * From the specified object 1501f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1502f8554bb9SMatthew Ahrens */ 1503f8554bb9SMatthew Ahrens int 1504f8554bb9SMatthew Ahrens dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) 1505f8554bb9SMatthew Ahrens { 1506f8554bb9SMatthew Ahrens dnode_t *dn; 1507f8554bb9SMatthew Ahrens int err; 1508f8554bb9SMatthew Ahrens 1509f8554bb9SMatthew Ahrens if (size == 0) 1510f8554bb9SMatthew Ahrens return (0); 1511f8554bb9SMatthew Ahrens 1512f8554bb9SMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 1513f8554bb9SMatthew Ahrens if (err) 1514f8554bb9SMatthew Ahrens return (err); 1515f8554bb9SMatthew Ahrens 1516f8554bb9SMatthew Ahrens err = dmu_read_uio_dnode(dn, uio, size); 1517f8554bb9SMatthew Ahrens 1518f8554bb9SMatthew Ahrens dnode_rele(dn, FTAG); 1519f8554bb9SMatthew Ahrens 1520f8554bb9SMatthew Ahrens return (err); 1521f8554bb9SMatthew Ahrens } 1522f8554bb9SMatthew Ahrens 15238dfe5547SRichard Yao int 152494d1a210STim Haley dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) 1525fa9e4066Sahrens { 1526fa9e4066Sahrens dmu_buf_t **dbp; 152794d1a210STim Haley int numbufs; 1528fa9e4066Sahrens int err = 0; 152994d1a210STim Haley int i; 1530fa9e4066Sahrens 153194d1a210STim Haley err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size, 153294d1a210STim Haley FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1533ea8dc4b6Seschrock if (err) 1534ea8dc4b6Seschrock return (err); 1535fa9e4066Sahrens 1536fa9e4066Sahrens for (i = 0; i < numbufs; i++) { 1537fa9e4066Sahrens int tocpy; 1538fa9e4066Sahrens int bufoff; 1539fa9e4066Sahrens dmu_buf_t *db = dbp[i]; 1540fa9e4066Sahrens 1541fa9e4066Sahrens ASSERT(size > 0); 1542fa9e4066Sahrens 1543feb08c6bSbillm bufoff = uio->uio_loffset - db->db_offset; 1544fa9e4066Sahrens tocpy = (int)MIN(db->db_size - bufoff, size); 1545fa9e4066Sahrens 1546fa9e4066Sahrens ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1547fa9e4066Sahrens 1548fa9e4066Sahrens if (tocpy == db->db_size) 1549fa9e4066Sahrens dmu_buf_will_fill(db, tx); 1550fa9e4066Sahrens else 1551fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 1552fa9e4066Sahrens 1553fa9e4066Sahrens /* 1554fa9e4066Sahrens * XXX uiomove could block forever (eg. nfs-backed 1555fa9e4066Sahrens * pages). There needs to be a uiolockdown() function 1556fa9e4066Sahrens * to lock the pages in memory, so that uiomove won't 1557fa9e4066Sahrens * block. 1558fa9e4066Sahrens */ 1559fa9e4066Sahrens err = uiomove((char *)db->db_data + bufoff, tocpy, 1560fa9e4066Sahrens UIO_WRITE, uio); 1561fa9e4066Sahrens 1562fa9e4066Sahrens if (tocpy == db->db_size) 1563fa9e4066Sahrens dmu_buf_fill_done(db, tx); 1564fa9e4066Sahrens 1565fa9e4066Sahrens if (err) 1566fa9e4066Sahrens break; 1567fa9e4066Sahrens 1568fa9e4066Sahrens size -= tocpy; 1569fa9e4066Sahrens } 157094d1a210STim Haley 1571ea8dc4b6Seschrock dmu_buf_rele_array(dbp, numbufs, FTAG); 1572fa9e4066Sahrens return (err); 1573fa9e4066Sahrens } 157444eda4d7Smaybee 1575f8554bb9SMatthew Ahrens /* 1576f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1577f8554bb9SMatthew Ahrens * To object zdb->db_object. 1578f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1579f8554bb9SMatthew Ahrens * 1580f8554bb9SMatthew Ahrens * If the caller already has a dbuf in the target object 1581f8554bb9SMatthew Ahrens * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1582f8554bb9SMatthew Ahrens * because we don't have to find the dnode_t for the object. 1583f8554bb9SMatthew Ahrens */ 158494d1a210STim Haley int 158594d1a210STim Haley dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size, 158694d1a210STim Haley dmu_tx_t *tx) 158794d1a210STim Haley { 1588744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1589744947dcSTom Erickson dnode_t *dn; 1590744947dcSTom Erickson int err; 1591744947dcSTom Erickson 159294d1a210STim Haley if (size == 0) 159394d1a210STim Haley return (0); 159494d1a210STim Haley 1595744947dcSTom Erickson DB_DNODE_ENTER(db); 1596744947dcSTom Erickson dn = DB_DNODE(db); 1597744947dcSTom Erickson err = dmu_write_uio_dnode(dn, uio, size, tx); 1598744947dcSTom Erickson DB_DNODE_EXIT(db); 1599744947dcSTom Erickson 1600744947dcSTom Erickson return (err); 160194d1a210STim Haley } 160294d1a210STim Haley 1603f8554bb9SMatthew Ahrens /* 1604f8554bb9SMatthew Ahrens * Write 'size' bytes from the uio buffer. 1605f8554bb9SMatthew Ahrens * To the specified object. 1606f8554bb9SMatthew Ahrens * Starting at offset uio->uio_loffset. 1607f8554bb9SMatthew Ahrens */ 160894d1a210STim Haley int 160994d1a210STim Haley dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, 161094d1a210STim Haley dmu_tx_t *tx) 161194d1a210STim Haley { 161294d1a210STim Haley dnode_t *dn; 161394d1a210STim Haley int err; 161494d1a210STim Haley 161594d1a210STim Haley if (size == 0) 161694d1a210STim Haley return (0); 161794d1a210STim Haley 161894d1a210STim Haley err = dnode_hold(os, object, FTAG, &dn); 161994d1a210STim Haley if (err) 162094d1a210STim Haley return (err); 162194d1a210STim Haley 162294d1a210STim Haley err = dmu_write_uio_dnode(dn, uio, size, tx); 162394d1a210STim Haley 162494d1a210STim Haley dnode_rele(dn, FTAG); 162594d1a210STim Haley 162694d1a210STim Haley return (err); 162794d1a210STim Haley } 162894d1a210STim Haley 162944eda4d7Smaybee int 163044eda4d7Smaybee dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 163144eda4d7Smaybee page_t *pp, dmu_tx_t *tx) 163244eda4d7Smaybee { 163344eda4d7Smaybee dmu_buf_t **dbp; 163444eda4d7Smaybee int numbufs, i; 163544eda4d7Smaybee int err; 163644eda4d7Smaybee 163744eda4d7Smaybee if (size == 0) 163844eda4d7Smaybee return (0); 163944eda4d7Smaybee 164044eda4d7Smaybee err = dmu_buf_hold_array(os, object, offset, size, 164144eda4d7Smaybee FALSE, FTAG, &numbufs, &dbp); 164244eda4d7Smaybee if (err) 164344eda4d7Smaybee return (err); 164444eda4d7Smaybee 164544eda4d7Smaybee for (i = 0; i < numbufs; i++) { 164644eda4d7Smaybee int tocpy, copied, thiscpy; 164744eda4d7Smaybee int bufoff; 164844eda4d7Smaybee dmu_buf_t *db = dbp[i]; 164944eda4d7Smaybee caddr_t va; 165044eda4d7Smaybee 165144eda4d7Smaybee ASSERT(size > 0); 165244eda4d7Smaybee ASSERT3U(db->db_size, >=, PAGESIZE); 165344eda4d7Smaybee 165444eda4d7Smaybee bufoff = offset - db->db_offset; 165544eda4d7Smaybee tocpy = (int)MIN(db->db_size - bufoff, size); 165644eda4d7Smaybee 165744eda4d7Smaybee ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 165844eda4d7Smaybee 165944eda4d7Smaybee if (tocpy == db->db_size) 166044eda4d7Smaybee dmu_buf_will_fill(db, tx); 166144eda4d7Smaybee else 166244eda4d7Smaybee dmu_buf_will_dirty(db, tx); 166344eda4d7Smaybee 166444eda4d7Smaybee for (copied = 0; copied < tocpy; copied += PAGESIZE) { 166544eda4d7Smaybee ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); 166644eda4d7Smaybee thiscpy = MIN(PAGESIZE, tocpy - copied); 16670fab61baSJonathan W Adams va = zfs_map_page(pp, S_READ); 166844eda4d7Smaybee bcopy(va, (char *)db->db_data + bufoff, thiscpy); 16690fab61baSJonathan W Adams zfs_unmap_page(pp, va); 167044eda4d7Smaybee pp = pp->p_next; 167144eda4d7Smaybee bufoff += PAGESIZE; 167244eda4d7Smaybee } 167344eda4d7Smaybee 167444eda4d7Smaybee if (tocpy == db->db_size) 167544eda4d7Smaybee dmu_buf_fill_done(db, tx); 167644eda4d7Smaybee 167744eda4d7Smaybee offset += tocpy; 167844eda4d7Smaybee size -= tocpy; 167944eda4d7Smaybee } 168044eda4d7Smaybee dmu_buf_rele_array(dbp, numbufs, FTAG); 168144eda4d7Smaybee return (err); 168244eda4d7Smaybee } 1683fa9e4066Sahrens #endif 1684fa9e4066Sahrens 16852fdbea25SAleksandr Guzovskiy /* 16862fdbea25SAleksandr Guzovskiy * Allocate a loaned anonymous arc buffer. 16872fdbea25SAleksandr Guzovskiy */ 16882fdbea25SAleksandr Guzovskiy arc_buf_t * 16892fdbea25SAleksandr Guzovskiy dmu_request_arcbuf(dmu_buf_t *handle, int size) 16902fdbea25SAleksandr Guzovskiy { 1691744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 16922fdbea25SAleksandr Guzovskiy 16935602294fSDan Kimmel return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 16942fdbea25SAleksandr Guzovskiy } 16952fdbea25SAleksandr Guzovskiy 16962fdbea25SAleksandr Guzovskiy /* 16972fdbea25SAleksandr Guzovskiy * Free a loaned arc buffer. 16982fdbea25SAleksandr Guzovskiy */ 16992fdbea25SAleksandr Guzovskiy void 17002fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(arc_buf_t *buf) 17012fdbea25SAleksandr Guzovskiy { 17022fdbea25SAleksandr Guzovskiy arc_return_buf(buf, FTAG); 1703dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, FTAG); 17042fdbea25SAleksandr Guzovskiy } 17052fdbea25SAleksandr Guzovskiy 1706eb633035STom Caputi void 1707eb633035STom Caputi dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, 1708eb633035STom Caputi dmu_buf_t *handle, dmu_tx_t *tx) 1709eb633035STom Caputi { 1710eb633035STom Caputi dmu_buf_t *dst_handle; 1711eb633035STom Caputi dmu_buf_impl_t *dstdb; 1712eb633035STom Caputi dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle; 1713a60ca23dSTom Caputi dmu_object_type_t type; 1714eb633035STom Caputi arc_buf_t *abuf; 1715eb633035STom Caputi uint64_t datalen; 1716eb633035STom Caputi boolean_t byteorder; 1717eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 1718eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 1719eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 1720eb633035STom Caputi 1721eb633035STom Caputi ASSERT3P(srcdb->db_buf, !=, NULL); 1722eb633035STom Caputi 1723eb633035STom Caputi /* hold the db that we want to write to */ 1724eb633035STom Caputi VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &dst_handle, 1725eb633035STom Caputi DMU_READ_NO_DECRYPT)); 1726eb633035STom Caputi dstdb = (dmu_buf_impl_t *)dst_handle; 1727eb633035STom Caputi datalen = arc_buf_size(srcdb->db_buf); 1728eb633035STom Caputi 1729a60ca23dSTom Caputi DB_DNODE_ENTER(dstdb); 1730a60ca23dSTom Caputi type = DB_DNODE(dstdb)->dn_type; 1731a60ca23dSTom Caputi DB_DNODE_EXIT(dstdb); 1732a60ca23dSTom Caputi 1733eb633035STom Caputi /* allocated an arc buffer that matches the type of srcdb->db_buf */ 1734eb633035STom Caputi if (arc_is_encrypted(srcdb->db_buf)) { 1735eb633035STom Caputi arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac); 1736eb633035STom Caputi abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os), 1737a60ca23dSTom Caputi byteorder, salt, iv, mac, type, 1738eb633035STom Caputi datalen, arc_buf_lsize(srcdb->db_buf), 1739eb633035STom Caputi arc_get_compression(srcdb->db_buf)); 1740eb633035STom Caputi } else { 1741eb633035STom Caputi /* we won't get a compressed db back from dmu_buf_hold() */ 1742eb633035STom Caputi ASSERT3U(arc_get_compression(srcdb->db_buf), 1743eb633035STom Caputi ==, ZIO_COMPRESS_OFF); 1744eb633035STom Caputi abuf = arc_loan_buf(os->os_spa, 1745a60ca23dSTom Caputi DMU_OT_IS_METADATA(type), datalen); 1746eb633035STom Caputi } 1747eb633035STom Caputi 1748eb633035STom Caputi ASSERT3U(datalen, ==, arc_buf_size(abuf)); 1749eb633035STom Caputi 1750eb633035STom Caputi /* copy the data to the new buffer and assign it to the dstdb */ 1751eb633035STom Caputi bcopy(srcdb->db_buf->b_data, abuf->b_data, datalen); 1752eb633035STom Caputi dbuf_assign_arcbuf(dstdb, abuf, tx); 1753eb633035STom Caputi dmu_buf_rele(dst_handle, FTAG); 1754eb633035STom Caputi } 1755eb633035STom Caputi 17562fdbea25SAleksandr Guzovskiy /* 17572fdbea25SAleksandr Guzovskiy * When possible directly assign passed loaned arc buffer to a dbuf. 17582fdbea25SAleksandr Guzovskiy * If this is not possible copy the contents of passed arc buf via 17592fdbea25SAleksandr Guzovskiy * dmu_write(). 17602fdbea25SAleksandr Guzovskiy */ 1761eb633035STom Caputi int 1762eb633035STom Caputi dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 17632fdbea25SAleksandr Guzovskiy dmu_tx_t *tx) 17642fdbea25SAleksandr Guzovskiy { 17652fdbea25SAleksandr Guzovskiy dmu_buf_impl_t *db; 1766eb633035STom Caputi objset_t *os = dn->dn_objset; 1767eb633035STom Caputi uint64_t object = dn->dn_object; 17685602294fSDan Kimmel uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 17692fdbea25SAleksandr Guzovskiy uint64_t blkid; 17702fdbea25SAleksandr Guzovskiy 17712fdbea25SAleksandr Guzovskiy rw_enter(&dn->dn_struct_rwlock, RW_READER); 1772a2cdcdd2SPaul Dagnelie blkid = dbuf_whichblock(dn, 0, offset); 1773eb633035STom Caputi db = dbuf_hold(dn, blkid, FTAG); 1774eb633035STom Caputi if (db == NULL) 1775eb633035STom Caputi return (SET_ERROR(EIO)); 17762fdbea25SAleksandr Guzovskiy rw_exit(&dn->dn_struct_rwlock); 17772fdbea25SAleksandr Guzovskiy 17788a904709SMatthew Ahrens /* 17798a904709SMatthew Ahrens * We can only assign if the offset is aligned, the arc buf is the 17805602294fSDan Kimmel * same size as the dbuf, and the dbuf is not metadata. 17818a904709SMatthew Ahrens */ 17825602294fSDan Kimmel if (offset == db->db.db_offset && blksz == db->db.db_size) { 17832fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(db, buf, tx); 17842fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 17852fdbea25SAleksandr Guzovskiy } else { 17865602294fSDan Kimmel /* compressed bufs must always be assignable to their dbuf */ 17875602294fSDan Kimmel ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 17885602294fSDan Kimmel ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 17895602294fSDan Kimmel 1790744947dcSTom Erickson os = dn->dn_objset; 1791744947dcSTom Erickson object = dn->dn_object; 17922fdbea25SAleksandr Guzovskiy dbuf_rele(db, FTAG); 1793744947dcSTom Erickson dmu_write(os, object, offset, blksz, buf->b_data, tx); 17942fdbea25SAleksandr Guzovskiy dmu_return_arcbuf(buf); 1795c242f9a0Schunli zhang - Sun Microsystems - Irvine United States XUIOSTAT_BUMP(xuiostat_wbuf_copied); 17962fdbea25SAleksandr Guzovskiy } 1797eb633035STom Caputi 1798eb633035STom Caputi return (0); 17992fdbea25SAleksandr Guzovskiy } 18002fdbea25SAleksandr Guzovskiy 1801eb633035STom Caputi int 1802eb633035STom Caputi dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 18038dfe5547SRichard Yao dmu_tx_t *tx) 18048dfe5547SRichard Yao { 1805eb633035STom Caputi int err; 18068dfe5547SRichard Yao dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 18078dfe5547SRichard Yao 18088dfe5547SRichard Yao DB_DNODE_ENTER(dbuf); 1809eb633035STom Caputi err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); 18108dfe5547SRichard Yao DB_DNODE_EXIT(dbuf); 1811eb633035STom Caputi 1812eb633035STom Caputi return (err); 18138dfe5547SRichard Yao } 18148dfe5547SRichard Yao 1815c5c6ffa0Smaybee typedef struct { 1816b24ab676SJeff Bonwick dbuf_dirty_record_t *dsa_dr; 1817b24ab676SJeff Bonwick dmu_sync_cb_t *dsa_done; 1818b24ab676SJeff Bonwick zgd_t *dsa_zgd; 1819b24ab676SJeff Bonwick dmu_tx_t *dsa_tx; 1820c717a561Smaybee } dmu_sync_arg_t; 1821c5c6ffa0Smaybee 1822e14bb325SJeff Bonwick /* ARGSUSED */ 1823e14bb325SJeff Bonwick static void 1824e14bb325SJeff Bonwick dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1825e14bb325SJeff Bonwick { 1826b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1827b24ab676SJeff Bonwick dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1828e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1829975c32a0SNeil Perrin 1830b24ab676SJeff Bonwick if (zio->io_error == 0) { 1831b24ab676SJeff Bonwick if (BP_IS_HOLE(bp)) { 1832b24ab676SJeff Bonwick /* 1833b24ab676SJeff Bonwick * A block of zeros may compress to a hole, but the 1834b24ab676SJeff Bonwick * block size still needs to be known for replay. 1835b24ab676SJeff Bonwick */ 1836b24ab676SJeff Bonwick BP_SET_LSIZE(bp, db->db_size); 18375d7b4d43SMatthew Ahrens } else if (!BP_IS_EMBEDDED(bp)) { 1838b24ab676SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == 0); 1839eb633035STom Caputi BP_SET_FILL(bp, 1); 1840b24ab676SJeff Bonwick } 1841e14bb325SJeff Bonwick } 1842e14bb325SJeff Bonwick } 1843e14bb325SJeff Bonwick 1844b24ab676SJeff Bonwick static void 1845b24ab676SJeff Bonwick dmu_sync_late_arrival_ready(zio_t *zio) 1846b24ab676SJeff Bonwick { 1847b24ab676SJeff Bonwick dmu_sync_ready(zio, NULL, zio->io_private); 1848b24ab676SJeff Bonwick } 1849b24ab676SJeff Bonwick 1850c5c6ffa0Smaybee /* ARGSUSED */ 1851c5c6ffa0Smaybee static void 1852c5c6ffa0Smaybee dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1853c5c6ffa0Smaybee { 1854b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = varg; 1855b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = dsa->dsa_dr; 1856c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1857cab3a55eSPrakash Surya zgd_t *zgd = dsa->dsa_zgd; 1858cab3a55eSPrakash Surya 1859cab3a55eSPrakash Surya /* 1860cab3a55eSPrakash Surya * Record the vdev(s) backing this blkptr so they can be flushed after 1861cab3a55eSPrakash Surya * the writes for the lwb have completed. 1862cab3a55eSPrakash Surya */ 1863cab3a55eSPrakash Surya if (zio->io_error == 0) { 1864cab3a55eSPrakash Surya zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1865cab3a55eSPrakash Surya } 1866c5c6ffa0Smaybee 1867b50a0fe0SNeil Perrin mutex_enter(&db->db_mtx); 1868b50a0fe0SNeil Perrin ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1869b24ab676SJeff Bonwick if (zio->io_error == 0) { 187080901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 187180901aeaSGeorge Wilson if (dr->dt.dl.dr_nopwrite) { 187280901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 187380901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 187480901aeaSGeorge Wilson uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 187580901aeaSGeorge Wilson 187680901aeaSGeorge Wilson ASSERT(BP_EQUAL(bp, bp_orig)); 1877b7edcb94SMatthew Ahrens VERIFY(BP_EQUAL(bp, db->db_blkptr)); 187880901aeaSGeorge Wilson ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 187945818ee1SMatthew Ahrens ASSERT(zio_checksum_table[chksum].ci_flags & 188045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 188180901aeaSGeorge Wilson } 1882b24ab676SJeff Bonwick dr->dt.dl.dr_overridden_by = *zio->io_bp; 1883b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1884b24ab676SJeff Bonwick dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 188570163ac5SPrakash Surya 188670163ac5SPrakash Surya /* 188770163ac5SPrakash Surya * Old style holes are filled with all zeros, whereas 188870163ac5SPrakash Surya * new-style holes maintain their lsize, type, level, 188970163ac5SPrakash Surya * and birth time (see zio_write_compress). While we 189070163ac5SPrakash Surya * need to reset the BP_SET_LSIZE() call that happened 189170163ac5SPrakash Surya * in dmu_sync_ready for old style holes, we do *not* 189270163ac5SPrakash Surya * want to wipe out the information contained in new 189370163ac5SPrakash Surya * style holes. Thus, only zero out the block pointer if 189470163ac5SPrakash Surya * it's an old style hole. 189570163ac5SPrakash Surya */ 189670163ac5SPrakash Surya if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 189770163ac5SPrakash Surya dr->dt.dl.dr_overridden_by.blk_birth == 0) 1898b24ab676SJeff Bonwick BP_ZERO(&dr->dt.dl.dr_overridden_by); 1899b24ab676SJeff Bonwick } else { 1900b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1901b24ab676SJeff Bonwick } 1902c5c6ffa0Smaybee cv_broadcast(&db->db_changed); 1903b50a0fe0SNeil Perrin mutex_exit(&db->db_mtx); 1904b50a0fe0SNeil Perrin 1905b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1906b24ab676SJeff Bonwick 1907b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1908b24ab676SJeff Bonwick } 1909b24ab676SJeff Bonwick 1910b24ab676SJeff Bonwick static void 1911b24ab676SJeff Bonwick dmu_sync_late_arrival_done(zio_t *zio) 1912b24ab676SJeff Bonwick { 1913b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1914b24ab676SJeff Bonwick dmu_sync_arg_t *dsa = zio->io_private; 191580901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 1916cab3a55eSPrakash Surya zgd_t *zgd = dsa->dsa_zgd; 1917b24ab676SJeff Bonwick 1918cab3a55eSPrakash Surya if (zio->io_error == 0) { 1919cab3a55eSPrakash Surya /* 1920cab3a55eSPrakash Surya * Record the vdev(s) backing this blkptr so they can be 1921cab3a55eSPrakash Surya * flushed after the writes for the lwb have completed. 1922cab3a55eSPrakash Surya */ 1923cab3a55eSPrakash Surya zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1924cab3a55eSPrakash Surya 1925cab3a55eSPrakash Surya if (!BP_IS_HOLE(bp)) { 1926cab3a55eSPrakash Surya ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 1927cab3a55eSPrakash Surya ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1928cab3a55eSPrakash Surya ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1929cab3a55eSPrakash Surya ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1930cab3a55eSPrakash Surya zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1931cab3a55eSPrakash Surya } 1932b24ab676SJeff Bonwick } 1933b24ab676SJeff Bonwick 1934b24ab676SJeff Bonwick dmu_tx_commit(dsa->dsa_tx); 1935b24ab676SJeff Bonwick 1936b24ab676SJeff Bonwick dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1937b24ab676SJeff Bonwick 1938770499e1SDan Kimmel abd_put(zio->io_abd); 1939b24ab676SJeff Bonwick kmem_free(dsa, sizeof (*dsa)); 1940b24ab676SJeff Bonwick } 1941b24ab676SJeff Bonwick 1942b24ab676SJeff Bonwick static int 1943b24ab676SJeff Bonwick dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 19447802d7bfSMatthew Ahrens zio_prop_t *zp, zbookmark_phys_t *zb) 1945b24ab676SJeff Bonwick { 1946b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 1947b24ab676SJeff Bonwick dmu_tx_t *tx; 1948b24ab676SJeff Bonwick 1949b24ab676SJeff Bonwick tx = dmu_tx_create(os); 1950b24ab676SJeff Bonwick dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 19516e1f5caaSNeil Perrin if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1952b24ab676SJeff Bonwick dmu_tx_abort(tx); 1953be6fd75aSMatthew Ahrens /* Make zl_get_data do txg_waited_synced() */ 1954be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1955b24ab676SJeff Bonwick } 1956b24ab676SJeff Bonwick 19571271e4b1SPrakash Surya /* 19581271e4b1SPrakash Surya * In order to prevent the zgd's lwb from being free'd prior to 19591271e4b1SPrakash Surya * dmu_sync_late_arrival_done() being called, we have to ensure 19601271e4b1SPrakash Surya * the lwb's "max txg" takes this tx's txg into account. 19611271e4b1SPrakash Surya */ 19621271e4b1SPrakash Surya zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 19631271e4b1SPrakash Surya 1964b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1965b24ab676SJeff Bonwick dsa->dsa_dr = NULL; 1966b24ab676SJeff Bonwick dsa->dsa_done = done; 1967b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 1968b24ab676SJeff Bonwick dsa->dsa_tx = tx; 1969c717a561Smaybee 1970b7edcb94SMatthew Ahrens /* 1971b7edcb94SMatthew Ahrens * Since we are currently syncing this txg, it's nontrivial to 1972b7edcb94SMatthew Ahrens * determine what BP to nopwrite against, so we disable nopwrite. 1973b7edcb94SMatthew Ahrens * 1974b7edcb94SMatthew Ahrens * When syncing, the db_blkptr is initially the BP of the previous 1975b7edcb94SMatthew Ahrens * txg. We can not nopwrite against it because it will be changed 1976b7edcb94SMatthew Ahrens * (this is similar to the non-late-arrival case where the dbuf is 1977b7edcb94SMatthew Ahrens * dirty in a future txg). 1978b7edcb94SMatthew Ahrens * 1979b7edcb94SMatthew Ahrens * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1980b7edcb94SMatthew Ahrens * We can not nopwrite against it because although the BP will not 1981b7edcb94SMatthew Ahrens * (typically) be changed, the data has not yet been persisted to this 1982b7edcb94SMatthew Ahrens * location. 1983b7edcb94SMatthew Ahrens * 1984b7edcb94SMatthew Ahrens * Finally, when dbuf_write_done() is called, it is theoretically 1985b7edcb94SMatthew Ahrens * possible to always nopwrite, because the data that was written in 1986b7edcb94SMatthew Ahrens * this txg is the same data that we are trying to write. However we 1987b7edcb94SMatthew Ahrens * would need to check that this dbuf is not dirty in any future 1988b7edcb94SMatthew Ahrens * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1989b7edcb94SMatthew Ahrens * don't nopwrite in this case. 1990b7edcb94SMatthew Ahrens */ 1991b7edcb94SMatthew Ahrens zp->zp_nopwrite = B_FALSE; 1992b7edcb94SMatthew Ahrens 19935602294fSDan Kimmel zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1994770499e1SDan Kimmel abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1995770499e1SDan Kimmel zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1996770499e1SDan Kimmel dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, 1997770499e1SDan Kimmel dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1998b24ab676SJeff Bonwick 1999b24ab676SJeff Bonwick return (0); 2000c5c6ffa0Smaybee } 2001c5c6ffa0Smaybee 2002fa9e4066Sahrens /* 2003c5c6ffa0Smaybee * Intent log support: sync the block associated with db to disk. 2004c5c6ffa0Smaybee * N.B. and XXX: the caller is responsible for making sure that the 2005c5c6ffa0Smaybee * data isn't changing while dmu_sync() is writing it. 2006fa9e4066Sahrens * 2007fa9e4066Sahrens * Return values: 2008fa9e4066Sahrens * 200980901aeaSGeorge Wilson * EEXIST: this txg has already been synced, so there's nothing to do. 2010fa9e4066Sahrens * The caller should not log the write. 2011fa9e4066Sahrens * 2012fa9e4066Sahrens * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 2013fa9e4066Sahrens * The caller should not log the write. 2014fa9e4066Sahrens * 2015c5c6ffa0Smaybee * EALREADY: this block is already in the process of being synced. 2016c5c6ffa0Smaybee * The caller should track its progress (somehow). 2017fa9e4066Sahrens * 2018b24ab676SJeff Bonwick * EIO: could not do the I/O. 2019b24ab676SJeff Bonwick * The caller should do a txg_wait_synced(). 2020fa9e4066Sahrens * 2021b24ab676SJeff Bonwick * 0: the I/O has been initiated. 2022b24ab676SJeff Bonwick * The caller should log this blkptr in the done callback. 2023b24ab676SJeff Bonwick * It is possible that the I/O will fail, in which case 2024b24ab676SJeff Bonwick * the error will be reported to the done callback and 2025b24ab676SJeff Bonwick * propagated to pio from zio_done(). 2026fa9e4066Sahrens */ 2027fa9e4066Sahrens int 2028b24ab676SJeff Bonwick dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 2029fa9e4066Sahrens { 2030b24ab676SJeff Bonwick dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 2031503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 2032b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 2033c717a561Smaybee dbuf_dirty_record_t *dr; 2034b24ab676SJeff Bonwick dmu_sync_arg_t *dsa; 20357802d7bfSMatthew Ahrens zbookmark_phys_t zb; 2036b24ab676SJeff Bonwick zio_prop_t zp; 2037744947dcSTom Erickson dnode_t *dn; 2038fa9e4066Sahrens 2039b24ab676SJeff Bonwick ASSERT(pio != NULL); 2040fa9e4066Sahrens ASSERT(txg != 0); 2041fa9e4066Sahrens 2042b24ab676SJeff Bonwick SET_BOOKMARK(&zb, ds->ds_object, 2043b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 2044b24ab676SJeff Bonwick 2045744947dcSTom Erickson DB_DNODE_ENTER(db); 2046744947dcSTom Erickson dn = DB_DNODE(db); 2047adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 2048744947dcSTom Erickson DB_DNODE_EXIT(db); 2049fa9e4066Sahrens 2050ea8dc4b6Seschrock /* 2051b24ab676SJeff Bonwick * If we're frozen (running ziltest), we always need to generate a bp. 2052ea8dc4b6Seschrock */ 2053b24ab676SJeff Bonwick if (txg > spa_freeze_txg(os->os_spa)) 2054b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2055ea8dc4b6Seschrock 2056fa9e4066Sahrens /* 2057b24ab676SJeff Bonwick * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 2058b24ab676SJeff Bonwick * and us. If we determine that this txg is not yet syncing, 2059b24ab676SJeff Bonwick * but it begins to sync a moment later, that's OK because the 2060b24ab676SJeff Bonwick * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 2061fa9e4066Sahrens */ 2062b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 2063b24ab676SJeff Bonwick 2064b24ab676SJeff Bonwick if (txg <= spa_last_synced_txg(os->os_spa)) { 2065fa9e4066Sahrens /* 2066b24ab676SJeff Bonwick * This txg has already synced. There's nothing to do. 2067fa9e4066Sahrens */ 2068b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 2069be6fd75aSMatthew Ahrens return (SET_ERROR(EEXIST)); 2070fa9e4066Sahrens } 2071fa9e4066Sahrens 2072b24ab676SJeff Bonwick if (txg <= spa_syncing_txg(os->os_spa)) { 2073b24ab676SJeff Bonwick /* 2074b24ab676SJeff Bonwick * This txg is currently syncing, so we can't mess with 2075b24ab676SJeff Bonwick * the dirty record anymore; just write a new log block. 2076b24ab676SJeff Bonwick */ 2077b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 2078b24ab676SJeff Bonwick return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 2079fa9e4066Sahrens } 2080fa9e4066Sahrens 2081c717a561Smaybee dr = db->db_last_dirty; 2082b24ab676SJeff Bonwick while (dr && dr->dr_txg != txg) 2083c717a561Smaybee dr = dr->dr_next; 2084b24ab676SJeff Bonwick 2085b24ab676SJeff Bonwick if (dr == NULL) { 2086c5c6ffa0Smaybee /* 2087b24ab676SJeff Bonwick * There's no dr for this dbuf, so it must have been freed. 2088c5c6ffa0Smaybee * There's no need to log writes to freed blocks, so we're done. 2089c5c6ffa0Smaybee */ 2090c5c6ffa0Smaybee mutex_exit(&db->db_mtx); 2091be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2092c5c6ffa0Smaybee } 2093c5c6ffa0Smaybee 209480901aeaSGeorge Wilson ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg); 209580901aeaSGeorge Wilson 2096b7edcb94SMatthew Ahrens if (db->db_blkptr != NULL) { 2097b7edcb94SMatthew Ahrens /* 2098b7edcb94SMatthew Ahrens * We need to fill in zgd_bp with the current blkptr so that 2099b7edcb94SMatthew Ahrens * the nopwrite code can check if we're writing the same 2100b7edcb94SMatthew Ahrens * data that's already on disk. We can only nopwrite if we 2101b7edcb94SMatthew Ahrens * are sure that after making the copy, db_blkptr will not 2102b7edcb94SMatthew Ahrens * change until our i/o completes. We ensure this by 2103b7edcb94SMatthew Ahrens * holding the db_mtx, and only allowing nopwrite if the 2104b7edcb94SMatthew Ahrens * block is not already dirty (see below). This is verified 2105b7edcb94SMatthew Ahrens * by dmu_sync_done(), which VERIFYs that the db_blkptr has 2106b7edcb94SMatthew Ahrens * not changed. 2107b7edcb94SMatthew Ahrens */ 2108b7edcb94SMatthew Ahrens *zgd->zgd_bp = *db->db_blkptr; 2109b7edcb94SMatthew Ahrens } 2110b7edcb94SMatthew Ahrens 211180901aeaSGeorge Wilson /* 211234e8acefSMatthew Ahrens * Assume the on-disk data is X, the current syncing data (in 211334e8acefSMatthew Ahrens * txg - 1) is Y, and the current in-memory data is Z (currently 211434e8acefSMatthew Ahrens * in dmu_sync). 211534e8acefSMatthew Ahrens * 211634e8acefSMatthew Ahrens * We usually want to perform a nopwrite if X and Z are the 211734e8acefSMatthew Ahrens * same. However, if Y is different (i.e. the BP is going to 211834e8acefSMatthew Ahrens * change before this write takes effect), then a nopwrite will 211934e8acefSMatthew Ahrens * be incorrect - we would override with X, which could have 212034e8acefSMatthew Ahrens * been freed when Y was written. 212134e8acefSMatthew Ahrens * 212234e8acefSMatthew Ahrens * (Note that this is not a concern when we are nop-writing from 212334e8acefSMatthew Ahrens * syncing context, because X and Y must be identical, because 212434e8acefSMatthew Ahrens * all previous txgs have been synced.) 212534e8acefSMatthew Ahrens * 212634e8acefSMatthew Ahrens * Therefore, we disable nopwrite if the current BP could change 212734e8acefSMatthew Ahrens * before this TXG. There are two ways it could change: by 212834e8acefSMatthew Ahrens * being dirty (dr_next is non-NULL), or by being freed 212934e8acefSMatthew Ahrens * (dnode_block_freed()). This behavior is verified by 213034e8acefSMatthew Ahrens * zio_done(), which VERIFYs that the override BP is identical 213134e8acefSMatthew Ahrens * to the on-disk BP. 213280901aeaSGeorge Wilson */ 213334e8acefSMatthew Ahrens DB_DNODE_ENTER(db); 213434e8acefSMatthew Ahrens dn = DB_DNODE(db); 213534e8acefSMatthew Ahrens if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 213680901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 213734e8acefSMatthew Ahrens DB_DNODE_EXIT(db); 213880901aeaSGeorge Wilson 2139c717a561Smaybee ASSERT(dr->dr_txg == txg); 2140b24ab676SJeff Bonwick if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 2141b24ab676SJeff Bonwick dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2142c717a561Smaybee /* 2143b24ab676SJeff Bonwick * We have already issued a sync write for this buffer, 2144b24ab676SJeff Bonwick * or this buffer has already been synced. It could not 2145c717a561Smaybee * have been dirtied since, or we would have cleared the state. 2146c717a561Smaybee */ 2147c717a561Smaybee mutex_exit(&db->db_mtx); 2148be6fd75aSMatthew Ahrens return (SET_ERROR(EALREADY)); 2149c717a561Smaybee } 2150c717a561Smaybee 2151b24ab676SJeff Bonwick ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2152c717a561Smaybee dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 2153fa9e4066Sahrens mutex_exit(&db->db_mtx); 2154e14bb325SJeff Bonwick 2155b24ab676SJeff Bonwick dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 2156b24ab676SJeff Bonwick dsa->dsa_dr = dr; 2157b24ab676SJeff Bonwick dsa->dsa_done = done; 2158b24ab676SJeff Bonwick dsa->dsa_zgd = zgd; 2159b24ab676SJeff Bonwick dsa->dsa_tx = NULL; 2160e14bb325SJeff Bonwick 2161b24ab676SJeff Bonwick zio_nowait(arc_write(pio, os->os_spa, txg, 2162b7edcb94SMatthew Ahrens zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db), 2163dcbf3bd6SGeorge Wilson &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 21648df0bcf0SPaul Dagnelie ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 2165e14bb325SJeff Bonwick 2166b24ab676SJeff Bonwick return (0); 2167fa9e4066Sahrens } 2168fa9e4066Sahrens 2169eb633035STom Caputi int 2170eb633035STom Caputi dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx) 2171eb633035STom Caputi { 2172eb633035STom Caputi dnode_t *dn; 2173eb633035STom Caputi int err; 2174eb633035STom Caputi 2175eb633035STom Caputi err = dnode_hold(os, object, FTAG, &dn); 2176eb633035STom Caputi if (err) 2177eb633035STom Caputi return (err); 2178eb633035STom Caputi err = dnode_set_nlevels(dn, nlevels, tx); 2179eb633035STom Caputi dnode_rele(dn, FTAG); 2180eb633035STom Caputi return (err); 2181eb633035STom Caputi } 2182eb633035STom Caputi 2183fa9e4066Sahrens int 2184fa9e4066Sahrens dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 21859a686fbcSPaul Dagnelie dmu_tx_t *tx) 2186fa9e4066Sahrens { 2187ea8dc4b6Seschrock dnode_t *dn; 2188ea8dc4b6Seschrock int err; 2189ea8dc4b6Seschrock 2190503ad85cSMatthew Ahrens err = dnode_hold(os, object, FTAG, &dn); 2191ea8dc4b6Seschrock if (err) 2192ea8dc4b6Seschrock return (err); 2193ea8dc4b6Seschrock err = dnode_set_blksz(dn, size, ibs, tx); 2194fa9e4066Sahrens dnode_rele(dn, FTAG); 2195fa9e4066Sahrens return (err); 2196fa9e4066Sahrens } 2197fa9e4066Sahrens 2198eb633035STom Caputi int 2199eb633035STom Caputi dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, 2200eb633035STom Caputi dmu_tx_t *tx) 2201eb633035STom Caputi { 2202eb633035STom Caputi dnode_t *dn; 2203eb633035STom Caputi int err; 2204eb633035STom Caputi 2205eb633035STom Caputi err = dnode_hold(os, object, FTAG, &dn); 2206eb633035STom Caputi if (err) 2207eb633035STom Caputi return (err); 2208eb633035STom Caputi rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2209eb633035STom Caputi dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE); 2210eb633035STom Caputi rw_exit(&dn->dn_struct_rwlock); 2211eb633035STom Caputi dnode_rele(dn, FTAG); 2212eb633035STom Caputi return (0); 2213eb633035STom Caputi } 2214eb633035STom Caputi 2215fa9e4066Sahrens void 2216fa9e4066Sahrens dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 22179a686fbcSPaul Dagnelie dmu_tx_t *tx) 2218fa9e4066Sahrens { 2219ea8dc4b6Seschrock dnode_t *dn; 2220ea8dc4b6Seschrock 22215d7b4d43SMatthew Ahrens /* 22225d7b4d43SMatthew Ahrens * Send streams include each object's checksum function. This 22235d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 22245d7b4d43SMatthew Ahrens * checksum function transmitted. 22255d7b4d43SMatthew Ahrens */ 22265d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 22275d7b4d43SMatthew Ahrens 22285d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 22295d7b4d43SMatthew Ahrens ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 2230fa9e4066Sahrens dn->dn_checksum = checksum; 2231fa9e4066Sahrens dnode_setdirty(dn, tx); 2232fa9e4066Sahrens dnode_rele(dn, FTAG); 2233fa9e4066Sahrens } 2234fa9e4066Sahrens 2235fa9e4066Sahrens void 2236fa9e4066Sahrens dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 22379a686fbcSPaul Dagnelie dmu_tx_t *tx) 2238fa9e4066Sahrens { 2239ea8dc4b6Seschrock dnode_t *dn; 2240ea8dc4b6Seschrock 22415d7b4d43SMatthew Ahrens /* 22425d7b4d43SMatthew Ahrens * Send streams include each object's compression function. This 22435d7b4d43SMatthew Ahrens * check ensures that the receiving system can understand the 22445d7b4d43SMatthew Ahrens * compression function transmitted. 22455d7b4d43SMatthew Ahrens */ 22465d7b4d43SMatthew Ahrens ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 22475d7b4d43SMatthew Ahrens 22485d7b4d43SMatthew Ahrens VERIFY0(dnode_hold(os, object, FTAG, &dn)); 2249fa9e4066Sahrens dn->dn_compress = compress; 2250fa9e4066Sahrens dnode_setdirty(dn, tx); 2251fa9e4066Sahrens dnode_rele(dn, FTAG); 2252fa9e4066Sahrens } 2253fa9e4066Sahrens 2254edf345e6SMatthew Ahrens /* 2255edf345e6SMatthew Ahrens * When the "redundant_metadata" property is set to "most", only indirect 2256edf345e6SMatthew Ahrens * blocks of this level and higher will have an additional ditto block. 2257edf345e6SMatthew Ahrens */ 2258edf345e6SMatthew Ahrens int zfs_redundant_metadata_most_ditto_level = 2; 2259edf345e6SMatthew Ahrens 2260b24ab676SJeff Bonwick void 2261adaec86aSMatthew Ahrens dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 2262b24ab676SJeff Bonwick { 2263b24ab676SJeff Bonwick dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 2264ad135b5dSChristopher Siden boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 22651d8ccc7bSMark Shellenbaum (wp & WP_SPILL)); 2266b24ab676SJeff Bonwick enum zio_checksum checksum = os->os_checksum; 2267b24ab676SJeff Bonwick enum zio_compress compress = os->os_compress; 2268b24ab676SJeff Bonwick enum zio_checksum dedup_checksum = os->os_dedup_checksum; 22697540df39SGeorge Wilson boolean_t dedup = B_FALSE; 22707540df39SGeorge Wilson boolean_t nopwrite = B_FALSE; 2271b24ab676SJeff Bonwick boolean_t dedup_verify = os->os_dedup_verify; 2272eb633035STom Caputi boolean_t encrypt = B_FALSE; 2273b24ab676SJeff Bonwick int copies = os->os_copies; 2274b24ab676SJeff Bonwick 2275b24ab676SJeff Bonwick /* 227680901aeaSGeorge Wilson * We maintain different write policies for each of the following 227780901aeaSGeorge Wilson * types of data: 227880901aeaSGeorge Wilson * 1. metadata 227980901aeaSGeorge Wilson * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 228080901aeaSGeorge Wilson * 3. all other level 0 blocks 2281b24ab676SJeff Bonwick */ 2282b24ab676SJeff Bonwick if (ismd) { 2283eb633035STom Caputi /* 2284eb633035STom Caputi * XXX -- we should design a compression algorithm 2285eb633035STom Caputi * that specializes in arrays of bps. 2286eb633035STom Caputi */ 2287eb633035STom Caputi compress = zio_compress_select(os->os_spa, 2288eb633035STom Caputi ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 228980901aeaSGeorge Wilson 2290b24ab676SJeff Bonwick /* 2291b24ab676SJeff Bonwick * Metadata always gets checksummed. If the data 2292b24ab676SJeff Bonwick * checksum is multi-bit correctable, and it's not a 2293b24ab676SJeff Bonwick * ZBT-style checksum, then it's suitable for metadata 2294b24ab676SJeff Bonwick * as well. Otherwise, the metadata checksum defaults 2295b24ab676SJeff Bonwick * to fletcher4. 2296b24ab676SJeff Bonwick */ 229745818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 229845818ee1SMatthew Ahrens ZCHECKSUM_FLAG_METADATA) || 229945818ee1SMatthew Ahrens (zio_checksum_table[checksum].ci_flags & 230045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_EMBEDDED)) 2301b24ab676SJeff Bonwick checksum = ZIO_CHECKSUM_FLETCHER_4; 2302edf345e6SMatthew Ahrens 2303edf345e6SMatthew Ahrens if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL || 2304edf345e6SMatthew Ahrens (os->os_redundant_metadata == 2305edf345e6SMatthew Ahrens ZFS_REDUNDANT_METADATA_MOST && 2306edf345e6SMatthew Ahrens (level >= zfs_redundant_metadata_most_ditto_level || 2307edf345e6SMatthew Ahrens DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)))) 2308edf345e6SMatthew Ahrens copies++; 230980901aeaSGeorge Wilson } else if (wp & WP_NOFILL) { 231080901aeaSGeorge Wilson ASSERT(level == 0); 231180901aeaSGeorge Wilson 2312b24ab676SJeff Bonwick /* 231380901aeaSGeorge Wilson * If we're writing preallocated blocks, we aren't actually 231480901aeaSGeorge Wilson * writing them so don't set any policy properties. These 231580901aeaSGeorge Wilson * blocks are currently only used by an external subsystem 231680901aeaSGeorge Wilson * outside of zfs (i.e. dump) and not written by the zio 231780901aeaSGeorge Wilson * pipeline. 2318b24ab676SJeff Bonwick */ 231980901aeaSGeorge Wilson compress = ZIO_COMPRESS_OFF; 2320810e43b2SBill Pijewski checksum = ZIO_CHECKSUM_NOPARITY; 2321b24ab676SJeff Bonwick } else { 2322db1741f5SJustin T. Gibbs compress = zio_compress_select(os->os_spa, dn->dn_compress, 2323db1741f5SJustin T. Gibbs compress); 2324b24ab676SJeff Bonwick 232580901aeaSGeorge Wilson checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 232680901aeaSGeorge Wilson zio_checksum_select(dn->dn_checksum, checksum) : 232780901aeaSGeorge Wilson dedup_checksum; 2328b24ab676SJeff Bonwick 232980901aeaSGeorge Wilson /* 233080901aeaSGeorge Wilson * Determine dedup setting. If we are in dmu_sync(), 233180901aeaSGeorge Wilson * we won't actually dedup now because that's all 233280901aeaSGeorge Wilson * done in syncing context; but we do want to use the 233380901aeaSGeorge Wilson * dedup checkum. If the checksum is not strong 233480901aeaSGeorge Wilson * enough to ensure unique signatures, force 233580901aeaSGeorge Wilson * dedup_verify. 233680901aeaSGeorge Wilson */ 233780901aeaSGeorge Wilson if (dedup_checksum != ZIO_CHECKSUM_OFF) { 233880901aeaSGeorge Wilson dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 233945818ee1SMatthew Ahrens if (!(zio_checksum_table[checksum].ci_flags & 234045818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) 234180901aeaSGeorge Wilson dedup_verify = B_TRUE; 234280901aeaSGeorge Wilson } 2343b24ab676SJeff Bonwick 234480901aeaSGeorge Wilson /* 234545818ee1SMatthew Ahrens * Enable nopwrite if we have secure enough checksum 234645818ee1SMatthew Ahrens * algorithm (see comment in zio_nop_write) and 234745818ee1SMatthew Ahrens * compression is enabled. We don't enable nopwrite if 234845818ee1SMatthew Ahrens * dedup is enabled as the two features are mutually 234945818ee1SMatthew Ahrens * exclusive. 235080901aeaSGeorge Wilson */ 235145818ee1SMatthew Ahrens nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 235245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) && 235380901aeaSGeorge Wilson compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2354b24ab676SJeff Bonwick } 2355b24ab676SJeff Bonwick 2356eb633035STom Caputi /* 2357eb633035STom Caputi * All objects in an encrypted objset are protected from modification 2358eb633035STom Caputi * via a MAC. Encrypted objects store their IV and salt in the last DVA 2359eb633035STom Caputi * in the bp, so we cannot use all copies. Encrypted objects are also 2360eb633035STom Caputi * not subject to nopwrite since writing the same data will still 2361eb633035STom Caputi * result in a new ciphertext. Only encrypted blocks can be dedup'd 2362eb633035STom Caputi * to avoid ambiguity in the dedup code since the DDT does not store 2363eb633035STom Caputi * object types. 2364eb633035STom Caputi */ 2365eb633035STom Caputi if (os->os_encrypted && (wp & WP_NOFILL) == 0) { 2366eb633035STom Caputi encrypt = B_TRUE; 23675602294fSDan Kimmel 2368eb633035STom Caputi if (DMU_OT_IS_ENCRYPTED(type)) { 2369eb633035STom Caputi copies = MIN(copies, SPA_DVAS_PER_BP - 1); 2370eb633035STom Caputi nopwrite = B_FALSE; 2371eb633035STom Caputi } else { 2372eb633035STom Caputi dedup = B_FALSE; 2373eb633035STom Caputi } 2374eb633035STom Caputi 2375eb633035STom Caputi if (level <= 0 && 2376eb633035STom Caputi (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) { 2377eb633035STom Caputi compress = ZIO_COMPRESS_EMPTY; 2378eb633035STom Caputi } 2379eb633035STom Caputi } 2380eb633035STom Caputi 2381eb633035STom Caputi zp->zp_compress = compress; 2382eb633035STom Caputi zp->zp_checksum = checksum; 23830a586ceaSMark Shellenbaum zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2384b24ab676SJeff Bonwick zp->zp_level = level; 2385edf345e6SMatthew Ahrens zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2386b24ab676SJeff Bonwick zp->zp_dedup = dedup; 2387b24ab676SJeff Bonwick zp->zp_dedup_verify = dedup && dedup_verify; 238880901aeaSGeorge Wilson zp->zp_nopwrite = nopwrite; 2389663207adSDon Brady zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ? 2390663207adSDon Brady os->os_zpl_special_smallblock : 0; 2391eb633035STom Caputi zp->zp_encrypt = encrypt; 2392eb633035STom Caputi zp->zp_byteorder = ZFS_HOST_BYTEORDER; 2393eb633035STom Caputi bzero(zp->zp_salt, ZIO_DATA_SALT_LEN); 2394eb633035STom Caputi bzero(zp->zp_iv, ZIO_DATA_IV_LEN); 2395eb633035STom Caputi bzero(zp->zp_mac, ZIO_DATA_MAC_LEN); 2396b24ab676SJeff Bonwick } 2397b24ab676SJeff Bonwick 2398fa9e4066Sahrens int 2399fa9e4066Sahrens dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2400fa9e4066Sahrens { 2401fa9e4066Sahrens dnode_t *dn; 24022bcf0248SMax Grossman int err; 2403fa9e4066Sahrens 2404fa9e4066Sahrens /* 2405fa9e4066Sahrens * Sync any current changes before 2406fa9e4066Sahrens * we go trundling through the block pointers. 2407fa9e4066Sahrens */ 24082bcf0248SMax Grossman err = dmu_object_wait_synced(os, object); 24092bcf0248SMax Grossman if (err) { 24102bcf0248SMax Grossman return (err); 2411fa9e4066Sahrens } 24122bcf0248SMax Grossman 24132bcf0248SMax Grossman err = dnode_hold(os, object, FTAG, &dn); 24142bcf0248SMax Grossman if (err) { 24152bcf0248SMax Grossman return (err); 2416fa9e4066Sahrens } 2417fa9e4066Sahrens 2418cdb0ab79Smaybee err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 2419fa9e4066Sahrens dnode_rele(dn, FTAG); 2420fa9e4066Sahrens 2421fa9e4066Sahrens return (err); 2422fa9e4066Sahrens } 2423fa9e4066Sahrens 24242bcf0248SMax Grossman /* 24252bcf0248SMax Grossman * Given the ZFS object, if it contains any dirty nodes 24262bcf0248SMax Grossman * this function flushes all dirty blocks to disk. This 24272bcf0248SMax Grossman * ensures the DMU object info is updated. A more efficient 24282bcf0248SMax Grossman * future version might just find the TXG with the maximum 24292bcf0248SMax Grossman * ID and wait for that to be synced. 24302bcf0248SMax Grossman */ 24312bcf0248SMax Grossman int 24329a686fbcSPaul Dagnelie dmu_object_wait_synced(objset_t *os, uint64_t object) 24339a686fbcSPaul Dagnelie { 24342bcf0248SMax Grossman dnode_t *dn; 24352bcf0248SMax Grossman int error, i; 24362bcf0248SMax Grossman 24372bcf0248SMax Grossman error = dnode_hold(os, object, FTAG, &dn); 24382bcf0248SMax Grossman if (error) { 24392bcf0248SMax Grossman return (error); 24402bcf0248SMax Grossman } 24412bcf0248SMax Grossman 24422bcf0248SMax Grossman for (i = 0; i < TXG_SIZE; i++) { 24432bcf0248SMax Grossman if (list_link_active(&dn->dn_dirty_link[i])) { 24442bcf0248SMax Grossman break; 24452bcf0248SMax Grossman } 24462bcf0248SMax Grossman } 24472bcf0248SMax Grossman dnode_rele(dn, FTAG); 24482bcf0248SMax Grossman if (i != TXG_SIZE) { 24492bcf0248SMax Grossman txg_wait_synced(dmu_objset_pool(os), 0); 24502bcf0248SMax Grossman } 24512bcf0248SMax Grossman 24522bcf0248SMax Grossman return (0); 24532bcf0248SMax Grossman } 24542bcf0248SMax Grossman 2455fa9e4066Sahrens void 2456fa9e4066Sahrens dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2457fa9e4066Sahrens { 2458b24ab676SJeff Bonwick dnode_phys_t *dnp; 2459b24ab676SJeff Bonwick 2460fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 2461fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 2462fa9e4066Sahrens 2463b24ab676SJeff Bonwick dnp = dn->dn_phys; 2464b24ab676SJeff Bonwick 2465fa9e4066Sahrens doi->doi_data_block_size = dn->dn_datablksz; 2466fa9e4066Sahrens doi->doi_metadata_block_size = dn->dn_indblkshift ? 2467fa9e4066Sahrens 1ULL << dn->dn_indblkshift : 0; 2468b24ab676SJeff Bonwick doi->doi_type = dn->dn_type; 2469b24ab676SJeff Bonwick doi->doi_bonus_type = dn->dn_bonustype; 2470b24ab676SJeff Bonwick doi->doi_bonus_size = dn->dn_bonuslen; 247154811da5SToomas Soome doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT; 2472fa9e4066Sahrens doi->doi_indirection = dn->dn_nlevels; 2473fa9e4066Sahrens doi->doi_checksum = dn->dn_checksum; 2474fa9e4066Sahrens doi->doi_compress = dn->dn_compress; 2475e77d42eaSMatthew Ahrens doi->doi_nblkptr = dn->dn_nblkptr; 2476b24ab676SJeff Bonwick doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2477d0475637SMatthew Ahrens doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2478b24ab676SJeff Bonwick doi->doi_fill_count = 0; 2479b24ab676SJeff Bonwick for (int i = 0; i < dnp->dn_nblkptr; i++) 24805d7b4d43SMatthew Ahrens doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2481fa9e4066Sahrens 2482fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 2483fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 2484fa9e4066Sahrens } 2485fa9e4066Sahrens 2486fa9e4066Sahrens /* 2487fa9e4066Sahrens * Get information on a DMU object. 2488fa9e4066Sahrens * If doi is NULL, just indicates whether the object exists. 2489fa9e4066Sahrens */ 2490fa9e4066Sahrens int 2491fa9e4066Sahrens dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2492fa9e4066Sahrens { 2493ea8dc4b6Seschrock dnode_t *dn; 2494503ad85cSMatthew Ahrens int err = dnode_hold(os, object, FTAG, &dn); 2495fa9e4066Sahrens 2496ea8dc4b6Seschrock if (err) 2497ea8dc4b6Seschrock return (err); 2498fa9e4066Sahrens 2499fa9e4066Sahrens if (doi != NULL) 2500fa9e4066Sahrens dmu_object_info_from_dnode(dn, doi); 2501fa9e4066Sahrens 2502fa9e4066Sahrens dnode_rele(dn, FTAG); 2503fa9e4066Sahrens return (0); 2504fa9e4066Sahrens } 2505fa9e4066Sahrens 2506fa9e4066Sahrens /* 2507fa9e4066Sahrens * As above, but faster; can be used when you have a held dbuf in hand. 2508fa9e4066Sahrens */ 2509fa9e4066Sahrens void 2510744947dcSTom Erickson dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2511fa9e4066Sahrens { 2512744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2513744947dcSTom Erickson 2514744947dcSTom Erickson DB_DNODE_ENTER(db); 2515744947dcSTom Erickson dmu_object_info_from_dnode(DB_DNODE(db), doi); 2516744947dcSTom Erickson DB_DNODE_EXIT(db); 2517fa9e4066Sahrens } 2518fa9e4066Sahrens 2519fa9e4066Sahrens /* 2520fa9e4066Sahrens * Faster still when you only care about the size. 2521fa9e4066Sahrens * This is specifically optimized for zfs_getattr(). 2522fa9e4066Sahrens */ 2523fa9e4066Sahrens void 2524744947dcSTom Erickson dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2525744947dcSTom Erickson u_longlong_t *nblk512) 2526fa9e4066Sahrens { 2527744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2528744947dcSTom Erickson dnode_t *dn; 2529744947dcSTom Erickson 2530744947dcSTom Erickson DB_DNODE_ENTER(db); 2531744947dcSTom Erickson dn = DB_DNODE(db); 2532fa9e4066Sahrens 2533fa9e4066Sahrens *blksize = dn->dn_datablksz; 253454811da5SToomas Soome /* add in number of slots used for the dnode itself */ 253599653d4eSeschrock *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 253654811da5SToomas Soome SPA_MINBLOCKSHIFT) + dn->dn_num_slots; 253754811da5SToomas Soome DB_DNODE_EXIT(db); 253854811da5SToomas Soome } 253954811da5SToomas Soome 254054811da5SToomas Soome void 254154811da5SToomas Soome dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize) 254254811da5SToomas Soome { 254354811da5SToomas Soome dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 254454811da5SToomas Soome dnode_t *dn; 254554811da5SToomas Soome 254654811da5SToomas Soome DB_DNODE_ENTER(db); 254754811da5SToomas Soome dn = DB_DNODE(db); 254854811da5SToomas Soome *dnsize = dn->dn_num_slots << DNODE_SHIFT; 2549744947dcSTom Erickson DB_DNODE_EXIT(db); 2550fa9e4066Sahrens } 2551fa9e4066Sahrens 2552fa9e4066Sahrens void 2553fa9e4066Sahrens byteswap_uint64_array(void *vbuf, size_t size) 2554fa9e4066Sahrens { 2555fa9e4066Sahrens uint64_t *buf = vbuf; 2556fa9e4066Sahrens size_t count = size >> 3; 2557fa9e4066Sahrens int i; 2558fa9e4066Sahrens 2559fa9e4066Sahrens ASSERT((size & 7) == 0); 2560fa9e4066Sahrens 2561fa9e4066Sahrens for (i = 0; i < count; i++) 2562fa9e4066Sahrens buf[i] = BSWAP_64(buf[i]); 2563fa9e4066Sahrens } 2564fa9e4066Sahrens 2565fa9e4066Sahrens void 2566fa9e4066Sahrens byteswap_uint32_array(void *vbuf, size_t size) 2567fa9e4066Sahrens { 2568fa9e4066Sahrens uint32_t *buf = vbuf; 2569fa9e4066Sahrens size_t count = size >> 2; 2570fa9e4066Sahrens int i; 2571fa9e4066Sahrens 2572fa9e4066Sahrens ASSERT((size & 3) == 0); 2573fa9e4066Sahrens 2574fa9e4066Sahrens for (i = 0; i < count; i++) 2575fa9e4066Sahrens buf[i] = BSWAP_32(buf[i]); 2576fa9e4066Sahrens } 2577fa9e4066Sahrens 2578fa9e4066Sahrens void 2579fa9e4066Sahrens byteswap_uint16_array(void *vbuf, size_t size) 2580fa9e4066Sahrens { 2581fa9e4066Sahrens uint16_t *buf = vbuf; 2582fa9e4066Sahrens size_t count = size >> 1; 2583fa9e4066Sahrens int i; 2584fa9e4066Sahrens 2585fa9e4066Sahrens ASSERT((size & 1) == 0); 2586fa9e4066Sahrens 2587fa9e4066Sahrens for (i = 0; i < count; i++) 2588fa9e4066Sahrens buf[i] = BSWAP_16(buf[i]); 2589fa9e4066Sahrens } 2590fa9e4066Sahrens 2591fa9e4066Sahrens /* ARGSUSED */ 2592fa9e4066Sahrens void 2593fa9e4066Sahrens byteswap_uint8_array(void *vbuf, size_t size) 2594fa9e4066Sahrens { 2595fa9e4066Sahrens } 2596fa9e4066Sahrens 2597fa9e4066Sahrens void 2598fa9e4066Sahrens dmu_init(void) 2599fa9e4066Sahrens { 2600770499e1SDan Kimmel abd_init(); 26013f9d6ad7SLin Ling zfs_dbgmsg_init(); 2602744947dcSTom Erickson sa_cache_init(); 2603744947dcSTom Erickson xuio_stat_init(); 2604744947dcSTom Erickson dmu_objset_init(); 2605fa9e4066Sahrens dnode_init(); 26067cbf8b43SRich Morris zfetch_init(); 2607fa94a07fSbrendan l2arc_init(); 2608ce636f8bSMatthew Ahrens arc_init(); 2609dcbf3bd6SGeorge Wilson dbuf_init(); 2610fa9e4066Sahrens } 2611fa9e4066Sahrens 2612fa9e4066Sahrens void 2613fa9e4066Sahrens dmu_fini(void) 2614fa9e4066Sahrens { 26153e30c24aSWill Andrews arc_fini(); /* arc depends on l2arc, so arc must go first */ 2616ce636f8bSMatthew Ahrens l2arc_fini(); 26177cbf8b43SRich Morris zfetch_fini(); 2618fa9e4066Sahrens dbuf_fini(); 2619744947dcSTom Erickson dnode_fini(); 2620744947dcSTom Erickson dmu_objset_fini(); 2621c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_fini(); 26220a586ceaSMark Shellenbaum sa_cache_fini(); 26233f9d6ad7SLin Ling zfs_dbgmsg_fini(); 2624770499e1SDan Kimmel abd_fini(); 2625fa9e4066Sahrens } 2626