1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/dsl_pool.h> 27 #include <sys/dsl_dataset.h> 28 #include <sys/dsl_prop.h> 29 #include <sys/dsl_dir.h> 30 #include <sys/dsl_synctask.h> 31 #include <sys/dsl_scan.h> 32 #include <sys/dnode.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/arc.h> 36 #include <sys/zap.h> 37 #include <sys/zio.h> 38 #include <sys/zfs_context.h> 39 #include <sys/fs/zfs.h> 40 #include <sys/zfs_znode.h> 41 #include <sys/spa_impl.h> 42 #include <sys/dsl_deadlist.h> 43 #include <sys/bptree.h> 44 #include <sys/zfeature.h> 45 #include <sys/zil_impl.h> 46 47 int zfs_no_write_throttle = 0; 48 int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ 49 int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */ 50 51 uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ 52 uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ 53 uint64_t zfs_write_limit_inflated = 0; 54 uint64_t zfs_write_limit_override = 0; 55 56 kmutex_t zfs_write_limit_lock; 57 58 static pgcnt_t old_physmem = 0; 59 60 int 61 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 62 { 63 uint64_t obj; 64 int err; 65 66 err = zap_lookup(dp->dp_meta_objset, 67 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, 68 name, sizeof (obj), 1, &obj); 69 if (err) 70 return (err); 71 72 return (dsl_dir_open_obj(dp, obj, name, dp, ddp)); 73 } 74 75 static dsl_pool_t * 76 dsl_pool_open_impl(spa_t *spa, uint64_t txg) 77 { 78 dsl_pool_t *dp; 79 blkptr_t *bp = spa_get_rootblkptr(spa); 80 81 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 82 dp->dp_spa = spa; 83 dp->dp_meta_rootbp = *bp; 84 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL); 85 dp->dp_write_limit = zfs_write_limit_min; 86 txg_init(dp, txg); 87 88 txg_list_create(&dp->dp_dirty_datasets, 89 offsetof(dsl_dataset_t, ds_dirty_link)); 90 txg_list_create(&dp->dp_dirty_zilogs, 91 offsetof(zilog_t, zl_dirty_link)); 92 txg_list_create(&dp->dp_dirty_dirs, 93 offsetof(dsl_dir_t, dd_dirty_link)); 94 txg_list_create(&dp->dp_sync_tasks, 95 offsetof(dsl_sync_task_group_t, dstg_node)); 96 97 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 98 99 dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 100 1, 4, 0); 101 102 return (dp); 103 } 104 105 int 106 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 107 { 108 int err; 109 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 110 111 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, 112 &dp->dp_meta_objset); 113 if (err != 0) 114 dsl_pool_close(dp); 115 else 116 *dpp = dp; 117 118 return (err); 119 } 120 121 int 122 dsl_pool_open(dsl_pool_t *dp) 123 { 124 int err; 125 dsl_dir_t *dd; 126 dsl_dataset_t *ds; 127 uint64_t obj; 128 129 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 130 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 131 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 132 &dp->dp_root_dir_obj); 133 if (err) 134 goto out; 135 136 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 137 NULL, dp, &dp->dp_root_dir); 138 if (err) 139 goto out; 140 141 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 142 if (err) 143 goto out; 144 145 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { 146 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 147 if (err) 148 goto out; 149 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, 150 FTAG, &ds); 151 if (err == 0) { 152 err = dsl_dataset_hold_obj(dp, 153 ds->ds_phys->ds_prev_snap_obj, dp, 154 &dp->dp_origin_snap); 155 dsl_dataset_rele(ds, FTAG); 156 } 157 dsl_dir_close(dd, dp); 158 if (err) 159 goto out; 160 } 161 162 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 163 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, 164 &dp->dp_free_dir); 165 if (err) 166 goto out; 167 168 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 169 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); 170 if (err) 171 goto out; 172 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, 173 dp->dp_meta_objset, obj)); 174 } 175 176 if (spa_feature_is_active(dp->dp_spa, 177 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) { 178 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 179 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, 180 &dp->dp_bptree_obj); 181 if (err != 0) 182 goto out; 183 } 184 185 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 186 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, 187 &dp->dp_tmp_userrefs_obj); 188 if (err == ENOENT) 189 err = 0; 190 if (err) 191 goto out; 192 193 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); 194 195 out: 196 rw_exit(&dp->dp_config_rwlock); 197 return (err); 198 } 199 200 void 201 dsl_pool_close(dsl_pool_t *dp) 202 { 203 /* drop our references from dsl_pool_open() */ 204 205 /* 206 * Since we held the origin_snap from "syncing" context (which 207 * includes pool-opening context), it actually only got a "ref" 208 * and not a hold, so just drop that here. 209 */ 210 if (dp->dp_origin_snap) 211 dsl_dataset_drop_ref(dp->dp_origin_snap, dp); 212 if (dp->dp_mos_dir) 213 dsl_dir_close(dp->dp_mos_dir, dp); 214 if (dp->dp_free_dir) 215 dsl_dir_close(dp->dp_free_dir, dp); 216 if (dp->dp_root_dir) 217 dsl_dir_close(dp->dp_root_dir, dp); 218 219 bpobj_close(&dp->dp_free_bpobj); 220 221 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 222 if (dp->dp_meta_objset) 223 dmu_objset_evict(dp->dp_meta_objset); 224 225 txg_list_destroy(&dp->dp_dirty_datasets); 226 txg_list_destroy(&dp->dp_dirty_zilogs); 227 txg_list_destroy(&dp->dp_sync_tasks); 228 txg_list_destroy(&dp->dp_dirty_dirs); 229 230 arc_flush(dp->dp_spa); 231 txg_fini(dp); 232 dsl_scan_fini(dp); 233 rw_destroy(&dp->dp_config_rwlock); 234 mutex_destroy(&dp->dp_lock); 235 taskq_destroy(dp->dp_vnrele_taskq); 236 if (dp->dp_blkstats) 237 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 238 kmem_free(dp, sizeof (dsl_pool_t)); 239 } 240 241 dsl_pool_t * 242 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 243 { 244 int err; 245 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 246 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 247 objset_t *os; 248 dsl_dataset_t *ds; 249 uint64_t obj; 250 251 /* create and open the MOS (meta-objset) */ 252 dp->dp_meta_objset = dmu_objset_create_impl(spa, 253 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); 254 255 /* create the pool directory */ 256 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 257 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 258 ASSERT3U(err, ==, 0); 259 260 /* Initialize scan structures */ 261 VERIFY3U(0, ==, dsl_scan_init(dp, txg)); 262 263 /* create and open the root dir */ 264 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 265 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 266 NULL, dp, &dp->dp_root_dir)); 267 268 /* create and open the meta-objset dir */ 269 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 270 VERIFY(0 == dsl_pool_open_special_dir(dp, 271 MOS_DIR_NAME, &dp->dp_mos_dir)); 272 273 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 274 /* create and open the free dir */ 275 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 276 FREE_DIR_NAME, tx); 277 VERIFY(0 == dsl_pool_open_special_dir(dp, 278 FREE_DIR_NAME, &dp->dp_free_dir)); 279 280 /* create and open the free_bplist */ 281 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx); 282 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 283 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); 284 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, 285 dp->dp_meta_objset, obj)); 286 } 287 288 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 289 dsl_pool_create_origin(dp, tx); 290 291 /* create the root dataset */ 292 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 293 294 /* create the root objset */ 295 VERIFY(0 == dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); 296 os = dmu_objset_create_impl(dp->dp_spa, ds, 297 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 298 #ifdef _KERNEL 299 zfs_create_fs(os, kcred, zplprops, tx); 300 #endif 301 dsl_dataset_rele(ds, FTAG); 302 303 dmu_tx_commit(tx); 304 305 return (dp); 306 } 307 308 /* 309 * Account for the meta-objset space in its placeholder dsl_dir. 310 */ 311 void 312 dsl_pool_mos_diduse_space(dsl_pool_t *dp, 313 int64_t used, int64_t comp, int64_t uncomp) 314 { 315 ASSERT3U(comp, ==, uncomp); /* it's all metadata */ 316 mutex_enter(&dp->dp_lock); 317 dp->dp_mos_used_delta += used; 318 dp->dp_mos_compressed_delta += comp; 319 dp->dp_mos_uncompressed_delta += uncomp; 320 mutex_exit(&dp->dp_lock); 321 } 322 323 static int 324 deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 325 { 326 dsl_deadlist_t *dl = arg; 327 dsl_pool_t *dp = dmu_objset_pool(dl->dl_os); 328 rw_enter(&dp->dp_config_rwlock, RW_READER); 329 dsl_deadlist_insert(dl, bp, tx); 330 rw_exit(&dp->dp_config_rwlock); 331 return (0); 332 } 333 334 void 335 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 336 { 337 zio_t *zio; 338 dmu_tx_t *tx; 339 dsl_dir_t *dd; 340 dsl_dataset_t *ds; 341 objset_t *mos = dp->dp_meta_objset; 342 hrtime_t start, write_time; 343 uint64_t data_written; 344 int err; 345 list_t synced_datasets; 346 347 list_create(&synced_datasets, sizeof (dsl_dataset_t), 348 offsetof(dsl_dataset_t, ds_synced_link)); 349 350 /* 351 * We need to copy dp_space_towrite() before doing 352 * dsl_sync_task_group_sync(), because 353 * dsl_dataset_snapshot_reserve_space() will increase 354 * dp_space_towrite but not actually write anything. 355 */ 356 data_written = dp->dp_space_towrite[txg & TXG_MASK]; 357 358 tx = dmu_tx_create_assigned(dp, txg); 359 360 dp->dp_read_overhead = 0; 361 start = gethrtime(); 362 363 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 364 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { 365 /* 366 * We must not sync any non-MOS datasets twice, because 367 * we may have taken a snapshot of them. However, we 368 * may sync newly-created datasets on pass 2. 369 */ 370 ASSERT(!list_link_active(&ds->ds_synced_link)); 371 list_insert_tail(&synced_datasets, ds); 372 dsl_dataset_sync(ds, zio, tx); 373 } 374 DTRACE_PROBE(pool_sync__1setup); 375 err = zio_wait(zio); 376 377 write_time = gethrtime() - start; 378 ASSERT(err == 0); 379 DTRACE_PROBE(pool_sync__2rootzio); 380 381 /* 382 * After the data blocks have been written (ensured by the zio_wait() 383 * above), update the user/group space accounting. 384 */ 385 for (ds = list_head(&synced_datasets); ds; 386 ds = list_next(&synced_datasets, ds)) 387 dmu_objset_do_userquota_updates(ds->ds_objset, tx); 388 389 /* 390 * Sync the datasets again to push out the changes due to 391 * userspace updates. This must be done before we process the 392 * sync tasks, so that any snapshots will have the correct 393 * user accounting information (and we won't get confused 394 * about which blocks are part of the snapshot). 395 */ 396 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 397 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { 398 ASSERT(list_link_active(&ds->ds_synced_link)); 399 dmu_buf_rele(ds->ds_dbuf, ds); 400 dsl_dataset_sync(ds, zio, tx); 401 } 402 err = zio_wait(zio); 403 404 /* 405 * Now that the datasets have been completely synced, we can 406 * clean up our in-memory structures accumulated while syncing: 407 * 408 * - move dead blocks from the pending deadlist to the on-disk deadlist 409 * - clean up zil records 410 * - release hold from dsl_dataset_dirty() 411 */ 412 while (ds = list_remove_head(&synced_datasets)) { 413 objset_t *os = ds->ds_objset; 414 bplist_iterate(&ds->ds_pending_deadlist, 415 deadlist_enqueue_cb, &ds->ds_deadlist, tx); 416 ASSERT(!dmu_objset_is_dirty(os, txg)); 417 dmu_buf_rele(ds->ds_dbuf, ds); 418 } 419 420 start = gethrtime(); 421 while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) 422 dsl_dir_sync(dd, tx); 423 write_time += gethrtime() - start; 424 425 /* 426 * The MOS's space is accounted for in the pool/$MOS 427 * (dp_mos_dir). We can't modify the mos while we're syncing 428 * it, so we remember the deltas and apply them here. 429 */ 430 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || 431 dp->dp_mos_uncompressed_delta != 0) { 432 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, 433 dp->dp_mos_used_delta, 434 dp->dp_mos_compressed_delta, 435 dp->dp_mos_uncompressed_delta, tx); 436 dp->dp_mos_used_delta = 0; 437 dp->dp_mos_compressed_delta = 0; 438 dp->dp_mos_uncompressed_delta = 0; 439 } 440 441 start = gethrtime(); 442 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL || 443 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) { 444 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 445 dmu_objset_sync(mos, zio, tx); 446 err = zio_wait(zio); 447 ASSERT(err == 0); 448 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 449 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 450 } 451 write_time += gethrtime() - start; 452 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time, 453 hrtime_t, dp->dp_read_overhead); 454 write_time -= dp->dp_read_overhead; 455 456 /* 457 * If we modify a dataset in the same txg that we want to destroy it, 458 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. 459 * dsl_dir_destroy_check() will fail if there are unexpected holds. 460 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf 461 * and clearing the hold on it) before we process the sync_tasks. 462 * The MOS data dirtied by the sync_tasks will be synced on the next 463 * pass. 464 */ 465 DTRACE_PROBE(pool_sync__3task); 466 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { 467 dsl_sync_task_group_t *dstg; 468 /* 469 * No more sync tasks should have been added while we 470 * were syncing. 471 */ 472 ASSERT(spa_sync_pass(dp->dp_spa) == 1); 473 while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) 474 dsl_sync_task_group_sync(dstg, tx); 475 } 476 477 dmu_tx_commit(tx); 478 479 dp->dp_space_towrite[txg & TXG_MASK] = 0; 480 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); 481 482 /* 483 * If the write limit max has not been explicitly set, set it 484 * to a fraction of available physical memory (default 1/8th). 485 * Note that we must inflate the limit because the spa 486 * inflates write sizes to account for data replication. 487 * Check this each sync phase to catch changing memory size. 488 */ 489 if (physmem != old_physmem && zfs_write_limit_shift) { 490 mutex_enter(&zfs_write_limit_lock); 491 old_physmem = physmem; 492 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 493 zfs_write_limit_inflated = MAX(zfs_write_limit_min, 494 spa_get_asize(dp->dp_spa, zfs_write_limit_max)); 495 mutex_exit(&zfs_write_limit_lock); 496 } 497 498 /* 499 * Attempt to keep the sync time consistent by adjusting the 500 * amount of write traffic allowed into each transaction group. 501 * Weight the throughput calculation towards the current value: 502 * thru = 3/4 old_thru + 1/4 new_thru 503 * 504 * Note: write_time is in nanosecs, so write_time/MICROSEC 505 * yields millisecs 506 */ 507 ASSERT(zfs_write_limit_min > 0); 508 if (data_written > zfs_write_limit_min / 8 && write_time > MICROSEC) { 509 uint64_t throughput = data_written / (write_time / MICROSEC); 510 511 if (dp->dp_throughput) 512 dp->dp_throughput = throughput / 4 + 513 3 * dp->dp_throughput / 4; 514 else 515 dp->dp_throughput = throughput; 516 dp->dp_write_limit = MIN(zfs_write_limit_inflated, 517 MAX(zfs_write_limit_min, 518 dp->dp_throughput * zfs_txg_synctime_ms)); 519 } 520 } 521 522 void 523 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) 524 { 525 zilog_t *zilog; 526 dsl_dataset_t *ds; 527 528 while (zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg)) { 529 ds = dmu_objset_ds(zilog->zl_os); 530 zil_clean(zilog, txg); 531 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); 532 dmu_buf_rele(ds->ds_dbuf, zilog); 533 } 534 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); 535 } 536 537 /* 538 * TRUE if the current thread is the tx_sync_thread or if we 539 * are being called from SPA context during pool initialization. 540 */ 541 int 542 dsl_pool_sync_context(dsl_pool_t *dp) 543 { 544 return (curthread == dp->dp_tx.tx_sync_thread || 545 spa_is_initializing(dp->dp_spa)); 546 } 547 548 uint64_t 549 dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 550 { 551 uint64_t space, resv; 552 553 /* 554 * Reserve about 1.6% (1/64), or at least 32MB, for allocation 555 * efficiency. 556 * XXX The intent log is not accounted for, so it must fit 557 * within this slop. 558 * 559 * If we're trying to assess whether it's OK to do a free, 560 * cut the reservation in half to allow forward progress 561 * (e.g. make it possible to rm(1) files from a full pool). 562 */ 563 space = spa_get_dspace(dp->dp_spa); 564 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1); 565 if (netfree) 566 resv >>= 1; 567 568 return (space - resv); 569 } 570 571 int 572 dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) 573 { 574 uint64_t reserved = 0; 575 uint64_t write_limit = (zfs_write_limit_override ? 576 zfs_write_limit_override : dp->dp_write_limit); 577 578 if (zfs_no_write_throttle) { 579 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], 580 space); 581 return (0); 582 } 583 584 /* 585 * Check to see if we have exceeded the maximum allowed IO for 586 * this transaction group. We can do this without locks since 587 * a little slop here is ok. Note that we do the reserved check 588 * with only half the requested reserve: this is because the 589 * reserve requests are worst-case, and we really don't want to 590 * throttle based off of worst-case estimates. 591 */ 592 if (write_limit > 0) { 593 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] 594 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; 595 596 if (reserved && reserved > write_limit) 597 return (ERESTART); 598 } 599 600 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); 601 602 /* 603 * If this transaction group is over 7/8ths capacity, delay 604 * the caller 1 clock tick. This will slow down the "fill" 605 * rate until the sync process can catch up with us. 606 */ 607 if (reserved && reserved > (write_limit - (write_limit >> 3))) 608 txg_delay(dp, tx->tx_txg, 1); 609 610 return (0); 611 } 612 613 void 614 dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 615 { 616 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space); 617 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space); 618 } 619 620 void 621 dsl_pool_memory_pressure(dsl_pool_t *dp) 622 { 623 uint64_t space_inuse = 0; 624 int i; 625 626 if (dp->dp_write_limit == zfs_write_limit_min) 627 return; 628 629 for (i = 0; i < TXG_SIZE; i++) { 630 space_inuse += dp->dp_space_towrite[i]; 631 space_inuse += dp->dp_tempreserved[i]; 632 } 633 dp->dp_write_limit = MAX(zfs_write_limit_min, 634 MIN(dp->dp_write_limit, space_inuse / 4)); 635 } 636 637 void 638 dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 639 { 640 if (space > 0) { 641 mutex_enter(&dp->dp_lock); 642 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space; 643 mutex_exit(&dp->dp_lock); 644 } 645 } 646 647 /* ARGSUSED */ 648 static int 649 upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 650 { 651 dmu_tx_t *tx = arg; 652 dsl_dataset_t *ds, *prev = NULL; 653 int err; 654 dsl_pool_t *dp = spa_get_dsl(spa); 655 656 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 657 if (err) 658 return (err); 659 660 while (ds->ds_phys->ds_prev_snap_obj != 0) { 661 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 662 FTAG, &prev); 663 if (err) { 664 dsl_dataset_rele(ds, FTAG); 665 return (err); 666 } 667 668 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) 669 break; 670 dsl_dataset_rele(ds, FTAG); 671 ds = prev; 672 prev = NULL; 673 } 674 675 if (prev == NULL) { 676 prev = dp->dp_origin_snap; 677 678 /* 679 * The $ORIGIN can't have any data, or the accounting 680 * will be wrong. 681 */ 682 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0); 683 684 /* The origin doesn't get attached to itself */ 685 if (ds->ds_object == prev->ds_object) { 686 dsl_dataset_rele(ds, FTAG); 687 return (0); 688 } 689 690 dmu_buf_will_dirty(ds->ds_dbuf, tx); 691 ds->ds_phys->ds_prev_snap_obj = prev->ds_object; 692 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg; 693 694 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 695 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object; 696 697 dmu_buf_will_dirty(prev->ds_dbuf, tx); 698 prev->ds_phys->ds_num_children++; 699 700 if (ds->ds_phys->ds_next_snap_obj == 0) { 701 ASSERT(ds->ds_prev == NULL); 702 VERIFY(0 == dsl_dataset_hold_obj(dp, 703 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 704 } 705 } 706 707 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object); 708 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); 709 710 if (prev->ds_phys->ds_next_clones_obj == 0) { 711 dmu_buf_will_dirty(prev->ds_dbuf, tx); 712 prev->ds_phys->ds_next_clones_obj = 713 zap_create(dp->dp_meta_objset, 714 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 715 } 716 VERIFY(0 == zap_add_int(dp->dp_meta_objset, 717 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx)); 718 719 dsl_dataset_rele(ds, FTAG); 720 if (prev != dp->dp_origin_snap) 721 dsl_dataset_rele(prev, FTAG); 722 return (0); 723 } 724 725 void 726 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 727 { 728 ASSERT(dmu_tx_is_syncing(tx)); 729 ASSERT(dp->dp_origin_snap != NULL); 730 731 VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, 732 tx, DS_FIND_CHILDREN)); 733 } 734 735 /* ARGSUSED */ 736 static int 737 upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 738 { 739 dmu_tx_t *tx = arg; 740 dsl_dataset_t *ds; 741 dsl_pool_t *dp = spa_get_dsl(spa); 742 objset_t *mos = dp->dp_meta_objset; 743 744 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 745 746 if (ds->ds_dir->dd_phys->dd_origin_obj) { 747 dsl_dataset_t *origin; 748 749 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 750 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin)); 751 752 if (origin->ds_dir->dd_phys->dd_clones == 0) { 753 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 754 origin->ds_dir->dd_phys->dd_clones = zap_create(mos, 755 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); 756 } 757 758 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 759 origin->ds_dir->dd_phys->dd_clones, dsobj, tx)); 760 761 dsl_dataset_rele(origin, FTAG); 762 } 763 764 dsl_dataset_rele(ds, FTAG); 765 return (0); 766 } 767 768 void 769 dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) 770 { 771 ASSERT(dmu_tx_is_syncing(tx)); 772 uint64_t obj; 773 774 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); 775 VERIFY(0 == dsl_pool_open_special_dir(dp, 776 FREE_DIR_NAME, &dp->dp_free_dir)); 777 778 /* 779 * We can't use bpobj_alloc(), because spa_version() still 780 * returns the old version, and we need a new-version bpobj with 781 * subobj support. So call dmu_object_alloc() directly. 782 */ 783 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, 784 SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); 785 VERIFY3U(0, ==, zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 786 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 787 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, 788 dp->dp_meta_objset, obj)); 789 790 VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, 791 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN)); 792 } 793 794 void 795 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 796 { 797 uint64_t dsobj; 798 dsl_dataset_t *ds; 799 800 ASSERT(dmu_tx_is_syncing(tx)); 801 ASSERT(dp->dp_origin_snap == NULL); 802 803 /* create the origin dir, ds, & snap-ds */ 804 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 805 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 806 NULL, 0, kcred, tx); 807 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 808 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, tx); 809 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 810 dp, &dp->dp_origin_snap)); 811 dsl_dataset_rele(ds, FTAG); 812 rw_exit(&dp->dp_config_rwlock); 813 } 814 815 taskq_t * 816 dsl_pool_vnrele_taskq(dsl_pool_t *dp) 817 { 818 return (dp->dp_vnrele_taskq); 819 } 820 821 /* 822 * Walk through the pool-wide zap object of temporary snapshot user holds 823 * and release them. 824 */ 825 void 826 dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) 827 { 828 zap_attribute_t za; 829 zap_cursor_t zc; 830 objset_t *mos = dp->dp_meta_objset; 831 uint64_t zapobj = dp->dp_tmp_userrefs_obj; 832 833 if (zapobj == 0) 834 return; 835 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 836 837 for (zap_cursor_init(&zc, mos, zapobj); 838 zap_cursor_retrieve(&zc, &za) == 0; 839 zap_cursor_advance(&zc)) { 840 char *htag; 841 uint64_t dsobj; 842 843 htag = strchr(za.za_name, '-'); 844 *htag = '\0'; 845 ++htag; 846 dsobj = strtonum(za.za_name, NULL); 847 (void) dsl_dataset_user_release_tmp(dp, dsobj, htag, B_FALSE); 848 } 849 zap_cursor_fini(&zc); 850 } 851 852 /* 853 * Create the pool-wide zap object for storing temporary snapshot holds. 854 */ 855 void 856 dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) 857 { 858 objset_t *mos = dp->dp_meta_objset; 859 860 ASSERT(dp->dp_tmp_userrefs_obj == 0); 861 ASSERT(dmu_tx_is_syncing(tx)); 862 863 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, 864 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); 865 } 866 867 static int 868 dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, 869 const char *tag, uint64_t *now, dmu_tx_t *tx, boolean_t holding) 870 { 871 objset_t *mos = dp->dp_meta_objset; 872 uint64_t zapobj = dp->dp_tmp_userrefs_obj; 873 char *name; 874 int error; 875 876 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 877 ASSERT(dmu_tx_is_syncing(tx)); 878 879 /* 880 * If the pool was created prior to SPA_VERSION_USERREFS, the 881 * zap object for temporary holds might not exist yet. 882 */ 883 if (zapobj == 0) { 884 if (holding) { 885 dsl_pool_user_hold_create_obj(dp, tx); 886 zapobj = dp->dp_tmp_userrefs_obj; 887 } else { 888 return (ENOENT); 889 } 890 } 891 892 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); 893 if (holding) 894 error = zap_add(mos, zapobj, name, 8, 1, now, tx); 895 else 896 error = zap_remove(mos, zapobj, name, tx); 897 strfree(name); 898 899 return (error); 900 } 901 902 /* 903 * Add a temporary hold for the given dataset object and tag. 904 */ 905 int 906 dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 907 uint64_t *now, dmu_tx_t *tx) 908 { 909 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); 910 } 911 912 /* 913 * Release a temporary hold for the given dataset object and tag. 914 */ 915 int 916 dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 917 dmu_tx_t *tx) 918 { 919 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL, 920 tx, B_FALSE)); 921 } 922