Searched defs:txg (Results 1 - 25 of 43) sorted by relevance

12

/illumos-gate/usr/src/uts/common/fs/zfs/
H A Duberblock.c48 uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg, uint64_t mmp_delay) argument
50 ASSERT(ub->ub_txg < txg);
57 ub->ub_txg = txg;
73 return (ub->ub_rootbp.blk_birth == txg);
H A Dvdev_indirect_births.c127 uint64_t max_offset, uint64_t txg, dmu_tx_t *tx)
141 vibe.vibe_phys_birth_txg = txg;
170 * Return the txg in which the given range was copied (i.e. its physical
171 * birth txg). The specified offset+asize must be contiguously mapped
180 * specified txg.
126 vdev_indirect_births_add_entry(vdev_indirect_births_t *vib, uint64_t max_offset, uint64_t txg, dmu_tx_t *tx) argument
H A Dspa_errlog.c342 spa_errlog_sync(spa_t *spa, uint64_t txg) argument
367 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
H A Ddmu_object.c100 * from the beginning at most once per txg. If we
112 * os_scan_dnodes is set during txg sync if enough
347 * after the specified txg.
350 dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg) argument
401 (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
H A Dspa_config.c363 spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) argument
382 * If txg is -1, report the current value of spa->spa_config_txg.
384 if (txg == -1ULL)
385 txg = spa->spa_config_txg;
409 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
520 uint64_t txg; local
526 txg = spa_last_synced_txg(spa) + 1;
553 vdev_expand(tvd, txg);
561 txg_wait_synced(spa->spa_dsl_pool, txg);
H A Dzfeature.c499 * OUT txg argument.
501 * Returns B_TRUE if the feature is enabled, in which case txg will be filled
506 spa_feature_enabled_txg(spa_t *spa, spa_feature_t fid, uint64_t *txg) argument
514 err = feature_get_enabled_txg(spa, &spa_feature_table[fid], txg);
H A Ddnode_sync.c160 uint64_t txg = tx->tx_txg; local
193 while (dr && dr->dr_txg > txg)
195 ASSERT(dr == NULL || dr->dr_txg == txg);
211 * future txg.
234 * We don't usually free the indirect blocks here. If in one txg we have a
508 uint64_t txg = dr->dr_txg; local
528 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
H A Dvdev_initialize.c28 #include <sys/txg.h>
66 uint64_t txg = dmu_tx_get_txg(tx); local
73 uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
74 vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
201 uint64_t txg = dmu_tx_get_txg(tx); local
206 if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
210 /* This is the first write of this txg. */
232 vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
233 zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
526 * txg sinc
[all...]
H A Dzio_checksum.c241 * Set the external verifier for a gang block based on <vdev, offset, txg>,
248 uint64_t txg = BP_PHYSICAL_BIRTH(bp); local
252 ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
257 * The vdev is implicit, and the txg is unknowable at pool open time --
H A Dtxg.c42 * these transaction groups. Each successive transaction group (txg) is
45 * there may be an active txg associated with each state; each active txg may
47 * be up to three active txgs, and there is always a txg in the open state
50 * accepted into the txg in the open state, and are completed while the txg is
56 * When a new txg becomes active, it first enters the open state. New
58 * currently open txg. There is always a txg in the open state so that ZFS can
59 * accept new changes (though the txg ma
118 txg_init(dsl_pool_t *dp, uint64_t txg) argument
297 uint64_t txg; local
354 txg_quiesce(dsl_pool_t *dp, uint64_t txg) argument
409 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) argument
486 uint64_t txg; local
565 uint64_t txg; local
610 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) argument
636 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig) argument
675 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) argument
685 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg) argument
695 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce) argument
760 txg_verify(spa_t *spa, uint64_t txg) argument
799 txg_list_empty(txg_list_t *tl, uint64_t txg) argument
828 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) argument
853 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) argument
881 txg_list_remove(txg_list_t *tl, uint64_t txg) argument
906 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) argument
930 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) argument
943 txg_list_head(txg_list_t *tl, uint64_t txg) argument
953 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) argument
[all...]
H A Ddmu_tx.c71 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) argument
75 txg_verify(dp->dp_spa, txg);
77 tx->tx_txg = txg;
171 * after the transaction has been assigned. This reduces the lock (and txg)
989 * If TXG_WAIT is set and the currently open txg is full, this function
990 * will wait until there's a new txg. This should be used when no locks
995 * txg without blocking, this function will return immediately with
1078 * A dnode is assigned to the quiescing txg. Wait for its
H A Dvdev_mirror.c493 uint64_t txg = zio->io_txg; local
496 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
514 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
544 * Every device is either missing or has this txg in its DTL.
H A Ddmu_traverse.c720 uint64_t txg = txg_start; local
730 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
731 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
732 err = traverse_dataset(ds, txg, flags, func, arg);
H A Dvdev_trim.c30 #include <sys/txg.h>
201 uint64_t txg = dmu_tx_get_txg(tx); local
208 uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
209 vd->vdev_trim_offset[txg & TXG_MASK] = 0;
479 uint64_t txg = dmu_tx_get_txg(tx); local
485 vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
489 /* This is the first write of this txg. */
514 vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
516 zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
891 * Drop the vdev_trim_lock while we sync out the txg sinc
[all...]
H A Ddsl_pool.c79 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
82 * relevant, the per-txg value is useful for debugging. The tunable
87 * ensure that there is a txg syncing (see the comment in txg.c for a full
108 * zfs_dirty_data_max), push out a txg. This should be less than
186 dsl_pool_open_impl(spa_t *spa, uint64_t txg) argument
195 txg_init(dp, txg);
229 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) argument
232 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
442 uint64_t txg)
441 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, uint64_t txg) argument
573 dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) argument
594 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) argument
769 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) argument
892 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) argument
[all...]
H A Dspa_log_spacemap.c59 * pairs are of the form <key: txg, value: log space map object for that txg>.
213 * flushed per txg) and that's why making it a percentage in terms of the
220 * linearly from txg to txg (e.g. the oldest log should have the most
402 * for a txg as an argument so we can locate the appropriate summary entry for
406 spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg) argument
418 if (e->lse_start > txg)
536 spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg) argument
538 spa_log_sm_t target = { .sls_txg = txg };
565 summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed, uint64_t nblocks) argument
721 uint64_t txg = dmu_tx_get_txg(tx); local
916 spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg) argument
928 uint64_t txg = dmu_tx_get_txg(tx); local
[all...]
H A Dvdev_disk.c1104 uint64_t offset, state, txg = 0; local
1126 &txg) != 0 || txg == 0) {
H A Dvdev_label.c125 * txg Transaction group in which this label was written
569 * which don't have a txg value stored on their label (i.e. spares/cache)
570 * or have not been completely initialized (txg = 0) just return
573 * 'txg' value.
576 vdev_label_read_config(vdev_t *vd, uint64_t txg) argument
611 * Auxiliary vdevs won't have txg values in their
622 } else if (label_txg <= txg && label_txg > best_txg) {
641 * We found a valid label but it didn't pass txg restrictions.
644 vdev_dbgmsg(vd, "label discarded as txg is too large "
646 (u_longlong_t)txg);
663 uint64_t state, pool_guid, device_guid, txg, spare_pool; local
909 uint64_t txg = 0ULL; local
1283 vdev_label_sync(zio_t *zio, uint64_t *good_writes, vdev_t *vd, int l, uint64_t txg, int flags) argument
1330 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) argument
1383 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) argument
[all...]
H A Dddt.c247 ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg) argument
249 ASSERT(txg != 0);
253 BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth);
327 ddt_phys_free(ddt_t *ddt, ddt_key_t *ddk, ddt_phys_t *ddp, uint64_t txg) argument
333 zio_free(ddt->ddt_spa, txg, &blk);
1014 ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg) argument
1037 ddt_phys_free(ddt, ddk, ddp, txg);
1041 ddt_phys_free(ddt, ddk, ddp, txg);
1081 ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) argument
1099 ddt_sync_entry(ddt, dde, tx, txg);
1122 ddt_sync(spa_t *spa, uint64_t txg) argument
[all...]
H A Dvdev_indirect.c527 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
543 uint64_t txg = dmu_tx_get_txg(tx); local
550 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
551 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
572 * If we are the first entry committed this txg, kick off the sync
792 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
803 * referenced as of the given txg.
H A Dvdev_removal.c36 #include <sys/txg.h>
222 * This is called as a synctask in the txg in which we will mark this vdev
241 uint64_t txg = dmu_tx_get_txg(tx); local
291 * Space which we are freeing this txg does not need to
306 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
335 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
481 uint64_t txg = spa_syncing_txg(spa); local
544 * this txg and iterating forward, we might find that this region
548 int txgoff = (txg + i) & TXG_MASK;
552 * will be synced in txg
715 uint64_t txg = dmu_tx_get_txg(tx); local
903 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, uint64_t maxalloc, uint64_t txg, vdev_copy_arg_t *vca, zio_alloc_list_t *zal) argument
1090 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) argument
1137 uint64_t txg; local
1213 uint64_t txg = dmu_tx_get_txg(tx); local
1466 uint64_t txg = dmu_tx_get_txg(tx); local
1751 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) argument
1973 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) argument
2066 uint64_t txg = 0; local
[all...]
H A Ddsl_dir.c769 dsl_dir_update_last_remap_txg(dsl_dir_t *dd, uint64_t txg) argument
773 arg.ddlrta_txg = txg;
1163 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
1265 uint64_t txg = tx->tx_txg; local
1271 ASSERT3U(txg, !=, 0);
1352 dd->dd_tempreserved[txg & TXG_MASK] += asize;
1375 * Reserve space in this dsl_dir, to be used in this tx's txg.
H A Dzil.c52 * (txg), at which point they can be discarded; or
322 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
363 error = parse_blk_func(zilog, &blk, arg, txg);
385 error = parse_lr_func(zilog, lr, arg, txg);
418 * checkpoint, each ZIL block whose txg is later than the txg
517 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) argument
527 lwb->lwb_max_txg = txg;
579 zilog_dirty(zilog_t *zilog, uint64_t txg) argument
589 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
321 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, boolean_t decrypt) argument
605 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) argument
638 uint64_t txg = 0; local
713 uint64_t txg; local
1061 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) argument
1387 uint64_t txg; local
1517 uint64_t dlen, dnow, lwb_sp, reclen, txg; local
1755 uint64_t otxg, txg; local
1800 uint64_t txg; local
1931 uint64_t otxg, txg; local
1977 uint64_t otxg, txg; local
2139 uint64_t txg = lrc->lrc_txg; local
2862 uint64_t txg = dmu_tx_get_txg(tx); local
3074 uint64_t txg; local
[all...]
/illumos-gate/usr/src/grub/grub-0.97/stage2/
H A Dfsys_zfs.c1532 uint64_t pool_state, txg = 0; local
1559 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1564 if (txg == 0)
/illumos-gate/usr/src/lib/libzfs/common/
H A Dlibzfs_import.c36 * pool guid -> toplevel vdev guid -> label txg
39 * examined every device, we pick the best label txg config for each toplevel
193 uint64_t pool_guid, vdev_guid, top_guid, txg, state; local
226 * we write a label with txg == 0 so that we can identify the device
238 &txg) != 0 || txg == 0) {
284 if (ce->ce_txg == txg)
292 ce->ce_txg = txg;
465 * We rely on the fact that the max txg for the
852 uint64_t state, txg, siz local
[all...]

Completed in 222 milliseconds

12