1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 27 * Copyright 2013 DEY Storage Systems, Inc. 28 * Copyright 2014 HybridCluster. All rights reserved. 29 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 30 * Copyright 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2014 Integros [integros.com] 32 */ 33 34 /* Portions Copyright 2010 Robert Milkowski */ 35 36 #ifndef _SYS_DMU_H 37 #define _SYS_DMU_H 38 39 /* 40 * This file describes the interface that the DMU provides for its 41 * consumers. 42 * 43 * The DMU also interacts with the SPA. That interface is described in 44 * dmu_spa.h. 45 */ 46 47 #include <sys/zfs_context.h> 48 #include <sys/inttypes.h> 49 #include <sys/cred.h> 50 #include <sys/fs/zfs.h> 51 #include <sys/zio_compress.h> 52 #include <sys/zio_priority.h> 53 54 #ifdef __cplusplus 55 extern "C" { 56 #endif 57 58 struct uio; 59 struct xuio; 60 struct page; 61 struct vnode; 62 struct spa; 63 struct zilog; 64 struct zio; 65 struct blkptr; 66 struct zap_cursor; 67 struct dsl_dataset; 68 struct dsl_pool; 69 struct dnode; 70 struct drr_begin; 71 struct drr_end; 72 struct zbookmark_phys; 73 struct spa; 74 struct nvlist; 75 struct arc_buf; 76 struct zio_prop; 77 struct sa_handle; 78 79 typedef struct objset objset_t; 80 typedef struct dmu_tx dmu_tx_t; 81 typedef struct dsl_dir dsl_dir_t; 82 typedef struct dnode dnode_t; 83 84 typedef enum dmu_object_byteswap { 85 DMU_BSWAP_UINT8, 86 DMU_BSWAP_UINT16, 87 DMU_BSWAP_UINT32, 88 DMU_BSWAP_UINT64, 89 DMU_BSWAP_ZAP, 90 DMU_BSWAP_DNODE, 91 DMU_BSWAP_OBJSET, 92 DMU_BSWAP_ZNODE, 93 DMU_BSWAP_OLDACL, 94 DMU_BSWAP_ACL, 95 /* 96 * Allocating a new byteswap type number makes the on-disk format 97 * incompatible with any other format that uses the same number. 98 * 99 * Data can usually be structured to work with one of the 100 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types. 101 */ 102 DMU_BSWAP_NUMFUNCS 103 } dmu_object_byteswap_t; 104 105 #define DMU_OT_NEWTYPE 0x80 106 #define DMU_OT_METADATA 0x40 107 #define DMU_OT_BYTESWAP_MASK 0x3f 108 109 /* 110 * Defines a uint8_t object type. Object types specify if the data 111 * in the object is metadata (boolean) and how to byteswap the data 112 * (dmu_object_byteswap_t). All of the types created by this method 113 * are cached in the dbuf metadata cache. 114 */ 115 #define DMU_OT(byteswap, metadata) \ 116 (DMU_OT_NEWTYPE | \ 117 ((metadata) ? DMU_OT_METADATA : 0) | \ 118 ((byteswap) & DMU_OT_BYTESWAP_MASK)) 119 120 #define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 121 ((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \ 122 (ot) < DMU_OT_NUMTYPES) 123 124 #define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 125 ((ot) & DMU_OT_METADATA) : \ 126 dmu_ot[(ot)].ot_metadata) 127 128 #define DMU_OT_IS_METADATA_CACHED(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 129 B_TRUE : dmu_ot[(ot)].ot_dbuf_metadata_cache) 130 131 /* 132 * These object types use bp_fill != 1 for their L0 bp's. Therefore they can't 133 * have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill 134 * is repurposed for embedded BPs. 135 */ 136 #define DMU_OT_HAS_FILL(ot) \ 137 ((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET) 138 139 #define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 140 ((ot) & DMU_OT_BYTESWAP_MASK) : \ 141 dmu_ot[(ot)].ot_byteswap) 142 143 typedef enum dmu_object_type { 144 DMU_OT_NONE, 145 /* general: */ 146 DMU_OT_OBJECT_DIRECTORY, /* ZAP */ 147 DMU_OT_OBJECT_ARRAY, /* UINT64 */ 148 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ 149 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ 150 DMU_OT_BPOBJ, /* UINT64 */ 151 DMU_OT_BPOBJ_HDR, /* UINT64 */ 152 /* spa: */ 153 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ 154 DMU_OT_SPACE_MAP, /* UINT64 */ 155 /* zil: */ 156 DMU_OT_INTENT_LOG, /* UINT64 */ 157 /* dmu: */ 158 DMU_OT_DNODE, /* DNODE */ 159 DMU_OT_OBJSET, /* OBJSET */ 160 /* dsl: */ 161 DMU_OT_DSL_DIR, /* UINT64 */ 162 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ 163 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ 164 DMU_OT_DSL_PROPS, /* ZAP */ 165 DMU_OT_DSL_DATASET, /* UINT64 */ 166 /* zpl: */ 167 DMU_OT_ZNODE, /* ZNODE */ 168 DMU_OT_OLDACL, /* Old ACL */ 169 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ 170 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ 171 DMU_OT_MASTER_NODE, /* ZAP */ 172 DMU_OT_UNLINKED_SET, /* ZAP */ 173 /* zvol: */ 174 DMU_OT_ZVOL, /* UINT8 */ 175 DMU_OT_ZVOL_PROP, /* ZAP */ 176 /* other; for testing only! */ 177 DMU_OT_PLAIN_OTHER, /* UINT8 */ 178 DMU_OT_UINT64_OTHER, /* UINT64 */ 179 DMU_OT_ZAP_OTHER, /* ZAP */ 180 /* new object types: */ 181 DMU_OT_ERROR_LOG, /* ZAP */ 182 DMU_OT_SPA_HISTORY, /* UINT8 */ 183 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ 184 DMU_OT_POOL_PROPS, /* ZAP */ 185 DMU_OT_DSL_PERMS, /* ZAP */ 186 DMU_OT_ACL, /* ACL */ 187 DMU_OT_SYSACL, /* SYSACL */ 188 DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ 189 DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ 190 DMU_OT_NEXT_CLONES, /* ZAP */ 191 DMU_OT_SCAN_QUEUE, /* ZAP */ 192 DMU_OT_USERGROUP_USED, /* ZAP */ 193 DMU_OT_USERGROUP_QUOTA, /* ZAP */ 194 DMU_OT_USERREFS, /* ZAP */ 195 DMU_OT_DDT_ZAP, /* ZAP */ 196 DMU_OT_DDT_STATS, /* ZAP */ 197 DMU_OT_SA, /* System attr */ 198 DMU_OT_SA_MASTER_NODE, /* ZAP */ 199 DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ 200 DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ 201 DMU_OT_SCAN_XLATE, /* ZAP */ 202 DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ 203 DMU_OT_DEADLIST, /* ZAP */ 204 DMU_OT_DEADLIST_HDR, /* UINT64 */ 205 DMU_OT_DSL_CLONES, /* ZAP */ 206 DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */ 207 /* 208 * Do not allocate new object types here. Doing so makes the on-disk 209 * format incompatible with any other format that uses the same object 210 * type number. 211 * 212 * When creating an object which does not have one of the above types 213 * use the DMU_OTN_* type with the correct byteswap and metadata 214 * values. 215 * 216 * The DMU_OTN_* types do not have entries in the dmu_ot table, 217 * use the DMU_OT_IS_METDATA() and DMU_OT_BYTESWAP() macros instead 218 * of indexing into dmu_ot directly (this works for both DMU_OT_* types 219 * and DMU_OTN_* types). 220 */ 221 DMU_OT_NUMTYPES, 222 223 /* 224 * Names for valid types declared with DMU_OT(). 225 */ 226 DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE), 227 DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE), 228 DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE), 229 DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE), 230 DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE), 231 DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE), 232 DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE), 233 DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE), 234 DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE), 235 DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE), 236 } dmu_object_type_t; 237 238 /* 239 * These flags are intended to be used to specify the "txg_how" 240 * parameter when calling the dmu_tx_assign() function. See the comment 241 * above dmu_tx_assign() for more details on the meaning of these flags. 242 */ 243 #define TXG_NOWAIT (0ULL) 244 #define TXG_WAIT (1ULL<<0) 245 #define TXG_NOTHROTTLE (1ULL<<1) 246 247 void byteswap_uint64_array(void *buf, size_t size); 248 void byteswap_uint32_array(void *buf, size_t size); 249 void byteswap_uint16_array(void *buf, size_t size); 250 void byteswap_uint8_array(void *buf, size_t size); 251 void zap_byteswap(void *buf, size_t size); 252 void zfs_oldacl_byteswap(void *buf, size_t size); 253 void zfs_acl_byteswap(void *buf, size_t size); 254 void zfs_znode_byteswap(void *buf, size_t size); 255 256 #define DS_FIND_SNAPSHOTS (1<<0) 257 #define DS_FIND_CHILDREN (1<<1) 258 #define DS_FIND_SERIALIZE (1<<2) 259 260 /* 261 * The maximum number of bytes that can be accessed as part of one 262 * operation, including metadata. 263 */ 264 #define DMU_MAX_ACCESS (32 * 1024 * 1024) /* 32MB */ 265 #define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */ 266 267 #define DMU_USERUSED_OBJECT (-1ULL) 268 #define DMU_GROUPUSED_OBJECT (-2ULL) 269 270 /* 271 * artificial blkids for bonus buffer and spill blocks 272 */ 273 #define DMU_BONUS_BLKID (-1ULL) 274 #define DMU_SPILL_BLKID (-2ULL) 275 /* 276 * Public routines to create, destroy, open, and close objsets. 277 */ 278 int dmu_objset_hold(const char *name, void *tag, objset_t **osp); 279 int dmu_objset_own(const char *name, dmu_objset_type_t type, 280 boolean_t readonly, void *tag, objset_t **osp); 281 void dmu_objset_rele(objset_t *os, void *tag); 282 void dmu_objset_disown(objset_t *os, void *tag); 283 int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp); 284 285 void dmu_objset_evict_dbufs(objset_t *os); 286 int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 287 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); 288 int dmu_objset_clone(const char *name, const char *origin); 289 int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer, 290 struct nvlist *errlist); 291 int dmu_objset_snapshot_one(const char *fsname, const char *snapname); 292 int dmu_objset_snapshot_tmp(const char *, const char *, int); 293 int dmu_objset_find(char *name, int func(const char *, void *), void *arg, 294 int flags); 295 void dmu_objset_byteswap(void *buf, size_t size); 296 int dsl_dataset_rename_snapshot(const char *fsname, 297 const char *oldsnapname, const char *newsnapname, boolean_t recursive); 298 int dmu_objset_remap_indirects(const char *fsname); 299 300 typedef struct dmu_buf { 301 uint64_t db_object; /* object that this buffer is part of */ 302 uint64_t db_offset; /* byte offset in this object */ 303 uint64_t db_size; /* size of buffer in bytes */ 304 void *db_data; /* data in buffer */ 305 } dmu_buf_t; 306 307 /* 308 * The names of zap entries in the DIRECTORY_OBJECT of the MOS. 309 */ 310 #define DMU_POOL_DIRECTORY_OBJECT 1 311 #define DMU_POOL_CONFIG "config" 312 #define DMU_POOL_FEATURES_FOR_WRITE "features_for_write" 313 #define DMU_POOL_FEATURES_FOR_READ "features_for_read" 314 #define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions" 315 #define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg" 316 #define DMU_POOL_ROOT_DATASET "root_dataset" 317 #define DMU_POOL_SYNC_BPOBJ "sync_bplist" 318 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" 319 #define DMU_POOL_ERRLOG_LAST "errlog_last" 320 #define DMU_POOL_SPARES "spares" 321 #define DMU_POOL_DEFLATE "deflate" 322 #define DMU_POOL_HISTORY "history" 323 #define DMU_POOL_PROPS "pool_props" 324 #define DMU_POOL_L2CACHE "l2cache" 325 #define DMU_POOL_TMP_USERREFS "tmp_userrefs" 326 #define DMU_POOL_DDT "DDT-%s-%s-%s" 327 #define DMU_POOL_DDT_STATS "DDT-statistics" 328 #define DMU_POOL_CREATION_VERSION "creation_version" 329 #define DMU_POOL_SCAN "scan" 330 #define DMU_POOL_FREE_BPOBJ "free_bpobj" 331 #define DMU_POOL_BPTREE_OBJ "bptree_obj" 332 #define DMU_POOL_EMPTY_BPOBJ "empty_bpobj" 333 #define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt" 334 #define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map" 335 #define DMU_POOL_REMOVING "com.delphix:removing" 336 #define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj" 337 #define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect" 338 #define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint" 339 340 /* 341 * Allocate an object from this objset. The range of object numbers 342 * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode. 343 * 344 * The transaction must be assigned to a txg. The newly allocated 345 * object will be "held" in the transaction (ie. you can modify the 346 * newly allocated object in this transaction). 347 * 348 * dmu_object_alloc() chooses an object and returns it in *objectp. 349 * 350 * dmu_object_claim() allocates a specific object number. If that 351 * number is already allocated, it fails and returns EEXIST. 352 * 353 * Return 0 on success, or ENOSPC or EEXIST as specified above. 354 */ 355 uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, 356 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 357 uint64_t dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize, 358 int indirect_blockshift, 359 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 360 int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, 361 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 362 int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, 363 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp); 364 365 /* 366 * Free an object from this objset. 367 * 368 * The object's data will be freed as well (ie. you don't need to call 369 * dmu_free(object, 0, -1, tx)). 370 * 371 * The object need not be held in the transaction. 372 * 373 * If there are any holds on this object's buffers (via dmu_buf_hold()), 374 * or tx holds on the object (via dmu_tx_hold_object()), you can not 375 * free it; it fails and returns EBUSY. 376 * 377 * If the object is not allocated, it fails and returns ENOENT. 378 * 379 * Return 0 on success, or EBUSY or ENOENT as specified above. 380 */ 381 int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); 382 383 /* 384 * Find the next allocated or free object. 385 * 386 * The objectp parameter is in-out. It will be updated to be the next 387 * object which is allocated. Ignore objects which have not been 388 * modified since txg. 389 * 390 * XXX Can only be called on a objset with no dirty data. 391 * 392 * Returns 0 on success, or ENOENT if there are no more objects. 393 */ 394 int dmu_object_next(objset_t *os, uint64_t *objectp, 395 boolean_t hole, uint64_t txg); 396 397 /* 398 * Set the data blocksize for an object. 399 * 400 * The object cannot have any blocks allcated beyond the first. If 401 * the first block is allocated already, the new size must be greater 402 * than the current block size. If these conditions are not met, 403 * ENOTSUP will be returned. 404 * 405 * Returns 0 on success, or EBUSY if there are any holds on the object 406 * contents, or ENOTSUP as described above. 407 */ 408 int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, 409 int ibs, dmu_tx_t *tx); 410 411 /* 412 * Set the checksum property on a dnode. The new checksum algorithm will 413 * apply to all newly written blocks; existing blocks will not be affected. 414 */ 415 void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 416 dmu_tx_t *tx); 417 418 /* 419 * Set the compress property on a dnode. The new compression algorithm will 420 * apply to all newly written blocks; existing blocks will not be affected. 421 */ 422 void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 423 dmu_tx_t *tx); 424 425 int dmu_object_remap_indirects(objset_t *os, uint64_t object, uint64_t txg); 426 427 void 428 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 429 void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 430 int compressed_size, int byteorder, dmu_tx_t *tx); 431 432 /* 433 * Decide how to write a block: checksum, compression, number of copies, etc. 434 */ 435 #define WP_NOFILL 0x1 436 #define WP_DMU_SYNC 0x2 437 #define WP_SPILL 0x4 438 439 void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, 440 struct zio_prop *zp); 441 /* 442 * The bonus data is accessed more or less like a regular buffer. 443 * You must dmu_bonus_hold() to get the buffer, which will give you a 444 * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus 445 * data. As with any normal buffer, you must call dmu_buf_will_dirty() 446 * before modifying it, and the 447 * object must be held in an assigned transaction before calling 448 * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus 449 * buffer as well. You must release your hold with dmu_buf_rele(). 450 * 451 * Returns ENOENT, EIO, or 0. 452 */ 453 int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **); 454 int dmu_bonus_max(void); 455 int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *); 456 int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *); 457 dmu_object_type_t dmu_get_bonustype(dmu_buf_t *); 458 int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *); 459 460 /* 461 * Special spill buffer support used by "SA" framework 462 */ 463 464 int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); 465 int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, 466 void *tag, dmu_buf_t **dbp); 467 int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); 468 469 /* 470 * Obtain the DMU buffer from the specified object which contains the 471 * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so 472 * that it will remain in memory. You must release the hold with 473 * dmu_buf_rele(). You musn't access the dmu_buf_t after releasing your 474 * hold. You must have a hold on any dmu_buf_t* you pass to the DMU. 475 * 476 * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill 477 * on the returned buffer before reading or writing the buffer's 478 * db_data. The comments for those routines describe what particular 479 * operations are valid after calling them. 480 * 481 * The object number must be a valid, allocated object number. 482 */ 483 int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 484 void *tag, dmu_buf_t **, int flags); 485 int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 486 void *tag, dmu_buf_t **dbp, int flags); 487 488 /* 489 * Add a reference to a dmu buffer that has already been held via 490 * dmu_buf_hold() in the current context. 491 */ 492 void dmu_buf_add_ref(dmu_buf_t *db, void* tag); 493 494 /* 495 * Attempt to add a reference to a dmu buffer that is in an unknown state, 496 * using a pointer that may have been invalidated by eviction processing. 497 * The request will succeed if the passed in dbuf still represents the 498 * same os/object/blkid, is ineligible for eviction, and has at least 499 * one hold by a user other than the syncer. 500 */ 501 boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object, 502 uint64_t blkid, void *tag); 503 504 void dmu_buf_rele(dmu_buf_t *db, void *tag); 505 uint64_t dmu_buf_refcount(dmu_buf_t *db); 506 507 /* 508 * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a 509 * range of an object. A pointer to an array of dmu_buf_t*'s is 510 * returned (in *dbpp). 511 * 512 * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and 513 * frees the array. The hold on the array of buffers MUST be released 514 * with dmu_buf_rele_array. You can NOT release the hold on each buffer 515 * individually with dmu_buf_rele. 516 */ 517 int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 518 uint64_t length, boolean_t read, void *tag, 519 int *numbufsp, dmu_buf_t ***dbpp); 520 void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag); 521 522 typedef void dmu_buf_evict_func_t(void *user_ptr); 523 524 /* 525 * A DMU buffer user object may be associated with a dbuf for the 526 * duration of its lifetime. This allows the user of a dbuf (client) 527 * to attach private data to a dbuf (e.g. in-core only data such as a 528 * dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified 529 * when that dbuf has been evicted. Clients typically respond to the 530 * eviction notification by freeing their private data, thus ensuring 531 * the same lifetime for both dbuf and private data. 532 * 533 * The mapping from a dmu_buf_user_t to any client private data is the 534 * client's responsibility. All current consumers of the API with private 535 * data embed a dmu_buf_user_t as the first member of the structure for 536 * their private data. This allows conversions between the two types 537 * with a simple cast. Since the DMU buf user API never needs access 538 * to the private data, other strategies can be employed if necessary 539 * or convenient for the client (e.g. using container_of() to do the 540 * conversion for private data that cannot have the dmu_buf_user_t as 541 * its first member). 542 * 543 * Eviction callbacks are executed without the dbuf mutex held or any 544 * other type of mechanism to guarantee that the dbuf is still available. 545 * For this reason, users must assume the dbuf has already been freed 546 * and not reference the dbuf from the callback context. 547 * 548 * Users requesting "immediate eviction" are notified as soon as the dbuf 549 * is only referenced by dirty records (dirties == holds). Otherwise the 550 * notification occurs after eviction processing for the dbuf begins. 551 */ 552 typedef struct dmu_buf_user { 553 /* 554 * Asynchronous user eviction callback state. 555 */ 556 taskq_ent_t dbu_tqent; 557 558 /* 559 * This instance's eviction function pointers. 560 * 561 * dbu_evict_func_sync is called synchronously and then 562 * dbu_evict_func_async is executed asynchronously on a taskq. 563 */ 564 dmu_buf_evict_func_t *dbu_evict_func_sync; 565 dmu_buf_evict_func_t *dbu_evict_func_async; 566 #ifdef ZFS_DEBUG 567 /* 568 * Pointer to user's dbuf pointer. NULL for clients that do 569 * not associate a dbuf with their user data. 570 * 571 * The dbuf pointer is cleared upon eviction so as to catch 572 * use-after-evict bugs in clients. 573 */ 574 dmu_buf_t **dbu_clear_on_evict_dbufp; 575 #endif 576 } dmu_buf_user_t; 577 578 /* 579 * Initialize the given dmu_buf_user_t instance with the eviction function 580 * evict_func, to be called when the user is evicted. 581 * 582 * NOTE: This function should only be called once on a given dmu_buf_user_t. 583 * To allow enforcement of this, dbu must already be zeroed on entry. 584 */ 585 /*ARGSUSED*/ 586 inline void 587 dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync, 588 dmu_buf_evict_func_t *evict_func_async, dmu_buf_t **clear_on_evict_dbufp) 589 { 590 ASSERT(dbu->dbu_evict_func_sync == NULL); 591 ASSERT(dbu->dbu_evict_func_async == NULL); 592 593 /* must have at least one evict func */ 594 IMPLY(evict_func_sync == NULL, evict_func_async != NULL); 595 dbu->dbu_evict_func_sync = evict_func_sync; 596 dbu->dbu_evict_func_async = evict_func_async; 597 #ifdef ZFS_DEBUG 598 dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp; 599 #endif 600 } 601 602 /* 603 * Attach user data to a dbuf and mark it for normal (when the dbuf's 604 * data is cleared or its reference count goes to zero) eviction processing. 605 * 606 * Returns NULL on success, or the existing user if another user currently 607 * owns the buffer. 608 */ 609 void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user); 610 611 /* 612 * Attach user data to a dbuf and mark it for immediate (its dirty and 613 * reference counts are equal) eviction processing. 614 * 615 * Returns NULL on success, or the existing user if another user currently 616 * owns the buffer. 617 */ 618 void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user); 619 620 /* 621 * Replace the current user of a dbuf. 622 * 623 * If given the current user of a dbuf, replaces the dbuf's user with 624 * "new_user" and returns the user data pointer that was replaced. 625 * Otherwise returns the current, and unmodified, dbuf user pointer. 626 */ 627 void *dmu_buf_replace_user(dmu_buf_t *db, 628 dmu_buf_user_t *old_user, dmu_buf_user_t *new_user); 629 630 /* 631 * Remove the specified user data for a DMU buffer. 632 * 633 * Returns the user that was removed on success, or the current user if 634 * another user currently owns the buffer. 635 */ 636 void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user); 637 638 /* 639 * Returns the user data (dmu_buf_user_t *) associated with this dbuf. 640 */ 641 void *dmu_buf_get_user(dmu_buf_t *db); 642 643 objset_t *dmu_buf_get_objset(dmu_buf_t *db); 644 dnode_t *dmu_buf_dnode_enter(dmu_buf_t *db); 645 void dmu_buf_dnode_exit(dmu_buf_t *db); 646 647 /* Block until any in-progress dmu buf user evictions complete. */ 648 void dmu_buf_user_evict_wait(void); 649 650 /* 651 * Returns the blkptr associated with this dbuf, or NULL if not set. 652 */ 653 struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db); 654 655 /* 656 * Indicate that you are going to modify the buffer's data (db_data). 657 * 658 * The transaction (tx) must be assigned to a txg (ie. you've called 659 * dmu_tx_assign()). The buffer's object must be held in the tx 660 * (ie. you've called dmu_tx_hold_object(tx, db->db_object)). 661 */ 662 void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); 663 664 /* 665 * You must create a transaction, then hold the objects which you will 666 * (or might) modify as part of this transaction. Then you must assign 667 * the transaction to a transaction group. Once the transaction has 668 * been assigned, you can modify buffers which belong to held objects as 669 * part of this transaction. You can't modify buffers before the 670 * transaction has been assigned; you can't modify buffers which don't 671 * belong to objects which this transaction holds; you can't hold 672 * objects once the transaction has been assigned. You may hold an 673 * object which you are going to free (with dmu_object_free()), but you 674 * don't have to. 675 * 676 * You can abort the transaction before it has been assigned. 677 * 678 * Note that you may hold buffers (with dmu_buf_hold) at any time, 679 * regardless of transaction state. 680 */ 681 682 #define DMU_NEW_OBJECT (-1ULL) 683 #define DMU_OBJECT_END (-1ULL) 684 685 dmu_tx_t *dmu_tx_create(objset_t *os); 686 void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); 687 void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, 688 int len); 689 void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, 690 uint64_t len); 691 void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, 692 uint64_t len); 693 void dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object); 694 void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name); 695 void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, 696 const char *name); 697 void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object); 698 void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn); 699 void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object); 700 void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow); 701 void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size); 702 void dmu_tx_abort(dmu_tx_t *tx); 703 int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); 704 void dmu_tx_wait(dmu_tx_t *tx); 705 void dmu_tx_commit(dmu_tx_t *tx); 706 void dmu_tx_mark_netfree(dmu_tx_t *tx); 707 708 /* 709 * To register a commit callback, dmu_tx_callback_register() must be called. 710 * 711 * dcb_data is a pointer to caller private data that is passed on as a 712 * callback parameter. The caller is responsible for properly allocating and 713 * freeing it. 714 * 715 * When registering a callback, the transaction must be already created, but 716 * it cannot be committed or aborted. It can be assigned to a txg or not. 717 * 718 * The callback will be called after the transaction has been safely written 719 * to stable storage and will also be called if the dmu_tx is aborted. 720 * If there is any error which prevents the transaction from being committed to 721 * disk, the callback will be called with a value of error != 0. 722 */ 723 typedef void dmu_tx_callback_func_t(void *dcb_data, int error); 724 725 void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, 726 void *dcb_data); 727 728 /* 729 * Free up the data blocks for a defined range of a file. If size is 730 * -1, the range from offset to end-of-file is freed. 731 */ 732 int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 733 uint64_t size, dmu_tx_t *tx); 734 int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset, 735 uint64_t size); 736 int dmu_free_long_object(objset_t *os, uint64_t object); 737 738 /* 739 * Convenience functions. 740 * 741 * Canfail routines will return 0 on success, or an errno if there is a 742 * nonrecoverable I/O error. 743 */ 744 #define DMU_READ_PREFETCH 0 /* prefetch */ 745 #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */ 746 int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 747 void *buf, uint32_t flags); 748 int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 749 uint32_t flags); 750 void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 751 const void *buf, dmu_tx_t *tx); 752 void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 753 const void *buf, dmu_tx_t *tx); 754 void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 755 dmu_tx_t *tx); 756 int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); 757 int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); 758 int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, 759 dmu_tx_t *tx); 760 int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, 761 dmu_tx_t *tx); 762 int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, 763 uint64_t size, struct page *pp, dmu_tx_t *tx); 764 struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); 765 void dmu_return_arcbuf(struct arc_buf *buf); 766 void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf, 767 dmu_tx_t *tx); 768 int dmu_xuio_init(struct xuio *uio, int niov); 769 void dmu_xuio_fini(struct xuio *uio); 770 int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off, 771 size_t n); 772 int dmu_xuio_cnt(struct xuio *uio); 773 struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i); 774 void dmu_xuio_clear(struct xuio *uio, int i); 775 void xuio_stat_wbuf_copied(void); 776 void xuio_stat_wbuf_nocopy(void); 777 778 extern boolean_t zfs_prefetch_disable; 779 extern int zfs_max_recordsize; 780 781 /* 782 * Asynchronously try to read in the data. 783 */ 784 void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 785 uint64_t len, enum zio_priority pri); 786 787 typedef struct dmu_object_info { 788 /* All sizes are in bytes unless otherwise indicated. */ 789 uint32_t doi_data_block_size; 790 uint32_t doi_metadata_block_size; 791 dmu_object_type_t doi_type; 792 dmu_object_type_t doi_bonus_type; 793 uint64_t doi_bonus_size; 794 uint8_t doi_indirection; /* 2 = dnode->indirect->data */ 795 uint8_t doi_checksum; 796 uint8_t doi_compress; 797 uint8_t doi_nblkptr; 798 uint8_t doi_pad[4]; 799 uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */ 800 uint64_t doi_max_offset; 801 uint64_t doi_fill_count; /* number of non-empty blocks */ 802 } dmu_object_info_t; 803 804 typedef void arc_byteswap_func_t(void *buf, size_t size); 805 806 typedef struct dmu_object_type_info { 807 dmu_object_byteswap_t ot_byteswap; 808 boolean_t ot_metadata; 809 boolean_t ot_dbuf_metadata_cache; 810 char *ot_name; 811 } dmu_object_type_info_t; 812 813 typedef struct dmu_object_byteswap_info { 814 arc_byteswap_func_t *ob_func; 815 char *ob_name; 816 } dmu_object_byteswap_info_t; 817 818 extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES]; 819 extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS]; 820 821 /* 822 * Get information on a DMU object. 823 * 824 * Return 0 on success or ENOENT if object is not allocated. 825 * 826 * If doi is NULL, just indicates whether the object exists. 827 */ 828 int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi); 829 /* Like dmu_object_info, but faster if you have a held dnode in hand. */ 830 void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi); 831 /* Like dmu_object_info, but faster if you have a held dbuf in hand. */ 832 void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi); 833 /* 834 * Like dmu_object_info_from_db, but faster still when you only care about 835 * the size. This is specifically optimized for zfs_getattr(). 836 */ 837 void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, 838 u_longlong_t *nblk512); 839 840 typedef struct dmu_objset_stats { 841 uint64_t dds_num_clones; /* number of clones of this */ 842 uint64_t dds_creation_txg; 843 uint64_t dds_guid; 844 dmu_objset_type_t dds_type; 845 uint8_t dds_is_snapshot; 846 uint8_t dds_inconsistent; 847 char dds_origin[ZFS_MAX_DATASET_NAME_LEN]; 848 } dmu_objset_stats_t; 849 850 /* 851 * Get stats on a dataset. 852 */ 853 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); 854 855 /* 856 * Add entries to the nvlist for all the objset's properties. See 857 * zfs_prop_table[] and zfs(1m) for details on the properties. 858 */ 859 void dmu_objset_stats(objset_t *os, struct nvlist *nv); 860 861 /* 862 * Get the space usage statistics for statvfs(). 863 * 864 * refdbytes is the amount of space "referenced" by this objset. 865 * availbytes is the amount of space available to this objset, taking 866 * into account quotas & reservations, assuming that no other objsets 867 * use the space first. These values correspond to the 'referenced' and 868 * 'available' properties, described in the zfs(1m) manpage. 869 * 870 * usedobjs and availobjs are the number of objects currently allocated, 871 * and available. 872 */ 873 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 874 uint64_t *usedobjsp, uint64_t *availobjsp); 875 876 /* 877 * The fsid_guid is a 56-bit ID that can change to avoid collisions. 878 * (Contrast with the ds_guid which is a 64-bit ID that will never 879 * change, so there is a small probability that it will collide.) 880 */ 881 uint64_t dmu_objset_fsid_guid(objset_t *os); 882 883 /* 884 * Get the [cm]time for an objset's snapshot dir 885 */ 886 timestruc_t dmu_objset_snap_cmtime(objset_t *os); 887 888 int dmu_objset_is_snapshot(objset_t *os); 889 890 extern struct spa *dmu_objset_spa(objset_t *os); 891 extern struct zilog *dmu_objset_zil(objset_t *os); 892 extern struct dsl_pool *dmu_objset_pool(objset_t *os); 893 extern struct dsl_dataset *dmu_objset_ds(objset_t *os); 894 extern void dmu_objset_name(objset_t *os, char *buf); 895 extern dmu_objset_type_t dmu_objset_type(objset_t *os); 896 extern uint64_t dmu_objset_id(objset_t *os); 897 extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os); 898 extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os); 899 extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 900 uint64_t *id, uint64_t *offp, boolean_t *case_conflict); 901 extern int dmu_snapshot_realname(objset_t *os, char *name, char *real, 902 int maxlen, boolean_t *conflict); 903 extern int dmu_dir_list_next(objset_t *os, int namelen, char *name, 904 uint64_t *idp, uint64_t *offp); 905 906 typedef int objset_used_cb_t(dmu_object_type_t bonustype, 907 void *bonus, uint64_t *userp, uint64_t *groupp); 908 extern void dmu_objset_register_type(dmu_objset_type_t ost, 909 objset_used_cb_t *cb); 910 extern void dmu_objset_set_user(objset_t *os, void *user_ptr); 911 extern void *dmu_objset_get_user(objset_t *os); 912 913 /* 914 * Return the txg number for the given assigned transaction. 915 */ 916 uint64_t dmu_tx_get_txg(dmu_tx_t *tx); 917 918 /* 919 * Synchronous write. 920 * If a parent zio is provided this function initiates a write on the 921 * provided buffer as a child of the parent zio. 922 * In the absence of a parent zio, the write is completed synchronously. 923 * At write completion, blk is filled with the bp of the written block. 924 * Note that while the data covered by this function will be on stable 925 * storage when the write completes this new data does not become a 926 * permanent part of the file until the associated transaction commits. 927 */ 928 929 /* 930 * {zfs,zvol,ztest}_get_done() args 931 */ 932 typedef struct zgd { 933 struct lwb *zgd_lwb; 934 struct blkptr *zgd_bp; 935 dmu_buf_t *zgd_db; 936 struct rl *zgd_rl; 937 void *zgd_private; 938 } zgd_t; 939 940 typedef void dmu_sync_cb_t(zgd_t *arg, int error); 941 int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd); 942 943 /* 944 * Find the next hole or data block in file starting at *off 945 * Return found offset in *off. Return ESRCH for end of file. 946 */ 947 int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, 948 uint64_t *off); 949 950 /* 951 * Check if a DMU object has any dirty blocks. If so, sync out 952 * all pending transaction groups. Otherwise, this function 953 * does not alter DMU state. This could be improved to only sync 954 * out the necessary transaction groups for this particular 955 * object. 956 */ 957 int dmu_object_wait_synced(objset_t *os, uint64_t object); 958 959 /* 960 * Initial setup and final teardown. 961 */ 962 extern void dmu_init(void); 963 extern void dmu_fini(void); 964 965 typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp, 966 uint64_t object, uint64_t offset, int len); 967 void dmu_traverse_objset(objset_t *os, uint64_t txg_start, 968 dmu_traverse_cb_t cb, void *arg); 969 970 int dmu_diff(const char *tosnap_name, const char *fromsnap_name, 971 struct vnode *vp, offset_t *offp); 972 973 /* CRC64 table */ 974 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ 975 extern uint64_t zfs_crc64_table[256]; 976 977 extern int zfs_mdcomp_disable; 978 979 #ifdef __cplusplus 980 } 981 #endif 982 983 #endif /* _SYS_DMU_H */ 984