Searched refs:txg (Results 1 - 25 of 75) sorted by relevance

123

/illumos-gate/usr/src/uts/common/fs/zfs/sys/
H A Dtxg.h42 #define TXG_INITIAL TXG_SIZE /* initial txg */
43 #define TXG_IDX (txg & TXG_MASK)
69 extern void txg_init(struct dsl_pool *dp, uint64_t txg);
78 extern void txg_delay(struct dsl_pool *dp, uint64_t txg, hrtime_t delta,
85 * necessary syncs immediately). If txg==0, wait for the currently open
86 * txg to finish syncing.
88 extern void txg_wait_synced(struct dsl_pool *dp, uint64_t txg);
93 extern boolean_t txg_wait_synced_sig(struct dsl_pool *dp, uint64_t txg);
99 * should_quiesce is set. If txg == 0, wait for the next open txg
[all...]
H A Duberblock.h43 extern boolean_t uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg,
H A Dvdev.h61 extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
71 uint64_t txg, uint64_t size);
73 uint64_t txg, uint64_t size);
76 extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
94 extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
97 extern void vdev_expand(vdev_t *vd, uint64_t txg);
151 extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
178 extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
193 extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
H A Dzfeature.h30 #include <sys/txg.h>
53 uint64_t *txg);
H A Ddsl_pool.h31 #include <sys/txg.h>
146 int dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp);
150 struct dsl_crypto_params *dcp, uint64_t txg);
151 void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg);
152 void dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg);
158 void dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg);
159 void dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp);
160 void dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg,
H A Dtxg_impl.h35 #include <sys/txg.h>
74 uint64_t tc_count[TXG_SIZE]; /* tx hold count on each txg */
94 uint64_t tx_open_txg; /* currently open txg id */
95 uint64_t tx_quiescing_txg; /* currently quiescing txg id */
96 uint64_t tx_quiesced_txg; /* quiesced txg waiting for sync */
97 uint64_t tx_syncing_txg; /* currently syncing txg id */
98 uint64_t tx_synced_txg; /* last synced txg id */
102 uint64_t tx_sync_txg_waiting; /* txg we're waiting to sync */
103 uint64_t tx_quiesce_txg_waiting; /* txg we're waiting to open */
H A Ddsl_synctask.h29 #include <sys/txg.h>
H A Ddmu_tx.h34 #include <sys/txg.h>
131 extern dmu_tx_t *dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg);
H A Dvdev_indirect_births.h68 uint64_t offset, uint64_t txg, dmu_tx_t *tx);
H A Dzil.h55 * with a common structure that defines the type, length, and txg.
63 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
104 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
249 uint64_t lr_gen; /* generation (txg of creation) */
368 * When the txg commits the block is linked in.
403 uint64_t txg);
405 uint64_t txg);
411 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
449 extern void zil_lwb_add_txg(struct lwb *lwb, uint64_t txg);
/illumos-gate/usr/src/uts/common/fs/zfs/
H A Dtxg.c42 * these transaction groups. Each successive transaction group (txg) is
45 * there may be an active txg associated with each state; each active txg may
47 * be up to three active txgs, and there is always a txg in the open state
50 * accepted into the txg in the open state, and are completed while the txg is
56 * When a new txg becomes active, it first enters the open state. New
58 * currently open txg. There is always a txg in the open state so that ZFS can
59 * accept new changes (though the txg ma
118 txg_init(dsl_pool_t *dp, uint64_t txg) argument
297 uint64_t txg; local
354 txg_quiesce(dsl_pool_t *dp, uint64_t txg) argument
409 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) argument
486 uint64_t txg; local
565 uint64_t txg; local
610 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) argument
636 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig) argument
675 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) argument
685 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg) argument
695 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce) argument
760 txg_verify(spa_t *spa, uint64_t txg) argument
799 txg_list_empty(txg_list_t *tl, uint64_t txg) argument
828 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) argument
853 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) argument
881 txg_list_remove(txg_list_t *tl, uint64_t txg) argument
906 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) argument
930 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) argument
943 txg_list_head(txg_list_t *tl, uint64_t txg) argument
953 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) argument
[all...]
H A Duberblock.c48 uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg, uint64_t mmp_delay) argument
50 ASSERT(ub->ub_txg < txg);
57 ub->ub_txg = txg;
73 return (ub->ub_rootbp.blk_birth == txg);
H A Dvdev_removal.c36 #include <sys/txg.h>
223 * This is called as a synctask in the txg in which we will mark this vdev
242 uint64_t txg = dmu_tx_get_txg(tx); local
292 * Space which we are freeing this txg does not need to
307 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
336 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
482 uint64_t txg = spa_syncing_txg(spa); local
545 * this txg and iterating forward, we might find that this region
549 int txgoff = (txg + i) & TXG_MASK;
553 * will be synced in txg
716 uint64_t txg = dmu_tx_get_txg(tx); local
904 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, uint64_t maxalloc, uint64_t txg, vdev_copy_arg_t *vca, zio_alloc_list_t *zal) argument
1090 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) argument
1137 uint64_t txg; local
1213 uint64_t txg = dmu_tx_get_txg(tx); local
1469 uint64_t txg = dmu_tx_get_txg(tx); local
1754 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) argument
1976 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) argument
2069 uint64_t txg = 0; local
[all...]
H A Ddsl_pool.c79 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
82 * relevant, the per-txg value is useful for debugging. The tunable
87 * ensure that there is a txg syncing (see the comment in txg.c for a full
108 * zfs_dirty_data_max), push out a txg. This should be less than
186 dsl_pool_open_impl(spa_t *spa, uint64_t txg) argument
195 txg_init(dp, txg);
232 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) argument
235 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
446 uint64_t txg)
445 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, uint64_t txg) argument
577 dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) argument
598 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) argument
773 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) argument
896 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) argument
[all...]
H A Dzil.c52 * (txg), at which point they can be discarded; or
322 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
363 error = parse_blk_func(zilog, &blk, arg, txg);
385 error = parse_lr_func(zilog, lr, arg, txg);
418 * checkpoint, each ZIL block whose txg is later than the txg
517 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) argument
527 lwb->lwb_max_txg = txg;
579 zilog_dirty(zilog_t *zilog, uint64_t txg) argument
589 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
321 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, boolean_t decrypt) argument
605 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) argument
638 uint64_t txg = 0; local
713 uint64_t txg; local
1061 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) argument
1387 uint64_t txg; local
1517 uint64_t dlen, dnow, lwb_sp, reclen, txg; local
1755 uint64_t otxg, txg; local
1800 uint64_t txg; local
1921 uint64_t otxg, txg; local
1967 uint64_t otxg, txg; local
2129 uint64_t txg = lrc->lrc_txg; local
2852 uint64_t txg = dmu_tx_get_txg(tx); local
3064 uint64_t txg; local
[all...]
H A Dvdev_label.c125 * txg Transaction group in which this label was written
728 * which don't have a txg value stored on their label (i.e. spares/cache)
729 * or have not been completely initialized (txg = 0) just return
732 * 'txg' value.
735 vdev_label_read_config(vdev_t *vd, uint64_t txg) argument
770 * Auxiliary vdevs won't have txg values in their
781 } else if (label_txg <= txg && label_txg > best_txg) {
800 * We found a valid label but it didn't pass txg restrictions.
803 vdev_dbgmsg(vd, "label discarded as txg is too large "
805 (u_longlong_t)txg);
822 uint64_t state, pool_guid, device_guid, txg, spare_pool; local
1068 uint64_t txg = 0ULL; local
1442 vdev_label_sync(zio_t *zio, uint64_t *good_writes, vdev_t *vd, int l, uint64_t txg, int flags) argument
1489 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) argument
1542 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) argument
[all...]
H A Dspa_config.c363 spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) argument
382 * If txg is -1, report the current value of spa->spa_config_txg.
384 if (txg == -1ULL)
385 txg = spa->spa_config_txg;
409 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
520 uint64_t txg; local
526 txg = spa_last_synced_txg(spa) + 1;
553 vdev_expand(tvd, txg);
561 txg_wait_synced(spa->spa_dsl_pool, txg);
H A Dspa_log_spacemap.c59 * pairs are of the form <key: txg, value: log space map object for that txg>.
213 * flushed per txg) and that's why making it a percentage in terms of the
220 * linearly from txg to txg (e.g. the oldest log should have the most
402 * for a txg as an argument so we can locate the appropriate summary entry for
406 spa_log_summary_decrement_mscount(spa_t *spa, uint64_t txg) argument
418 if (e->lse_start > txg)
536 spa_log_sm_decrement_mscount(spa_t *spa, uint64_t txg) argument
538 spa_log_sm_t target = { .sls_txg = txg };
565 summary_add_data(spa_t *spa, uint64_t txg, uint64_t metaslabs_flushed, uint64_t nblocks) argument
721 uint64_t txg = dmu_tx_get_txg(tx); local
916 spa_log_sm_alloc(uint64_t sm_obj, uint64_t txg) argument
928 uint64_t txg = dmu_tx_get_txg(tx); local
[all...]
H A Dspa.c65 #include <sys/txg.h>
1383 * spa_sync to attempt to flush all the metaslabs for that txg.
2049 * current txg so that the "stubby" block can be removed
2581 uint64_t txg = ub->ub_txg; local
2599 * during the earlier tryimport. If the txg recorded there is 0 then
2630 if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
2633 "txg %llu ub_txg %llu "
2636 txg, ub->ub_txg, timestamp, ub->ub_timestamp,
2998 spa_load_note(spa, "using uberblock with txg=%llu",
3812 * This must all happen in a single txg
5071 uint64_t txg = TXG_INITIAL; local
5421 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) argument
5467 uint64_t guid, txg; local
6064 uint64_t txg; local
6199 uint64_t txg, dtl_max_txg; local
6418 uint64_t txg; local
6908 uint64_t txg, *glist; local
8381 uint64_t txg = tx->tx_txg; local
8467 uint64_t txg = tx->tx_txg; local
8524 spa_sync(spa_t *spa, uint64_t txg) argument
[all...]
H A Dvdev.c1191 vdev_metaslab_init(vdev_t *vd, uint64_t txg) argument
1202 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1231 if (txg == 0 && vd->vdev_ms_array != 0) {
1253 error = metaslab_init(vd->vdev_mg, m, object, txg,
1262 if (txg == 0)
1274 if (txg == 0)
1802 uint64_t txg; local
1821 * was modified at a point after the current txg.
1822 * If config lock is not held do not check for the txg. spa_sync could
1827 txg
2195 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) argument
2298 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) argument
2316 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) argument
2364 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
2379 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) argument
2506 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) argument
2747 vdev_dtl_sync(vdev_t *vd, uint64_t txg) argument
3127 vdev_remove_empty_log(vdev_t *vd, uint64_t txg) argument
3147 vdev_sync_done(vdev_t *vd, uint64_t txg) argument
3163 vdev_sync(vdev_t *vd, uint64_t txg) argument
3869 uint64_t txg = zio->io_txg; local
4495 vdev_expand(vdev_t *vd, uint64_t txg) argument
[all...]
H A Dmetaslab.c601 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) argument
625 if (txg >
629 metaslab_evict(msp, txg);
721 * have any space until we finish syncing out this txg.
1830 metaslab_verify_space(metaslab_t *msp, uint64_t txg) argument
1849 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1876 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
2100 * succeed. Between that and the normal unloading processing during txg sync,
2377 zfs_dbgmsg("loading: txg %llu, spa %s, vdev_id %llu, "
2526 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) argument
2554 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, metaslab_t **msp) argument
2807 uint64_t txg = spa_syncing_txg(spa); local
3485 uint64_t txg = dmu_tx_get_txg(tx); local
3817 metaslab_sync(metaslab_t *msp, uint64_t txg) argument
4111 metaslab_evict(metaslab_t *msp, uint64_t txg) argument
4132 metaslab_sync_done(metaslab_t *msp, uint64_t txg) argument
4515 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) argument
4659 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, int allocator, boolean_t try_hard) argument
4964 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, int allocator, boolean_t try_hard) argument
5003 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, zio_alloc_list_t *zal, int allocator) argument
5430 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) argument
5555 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) argument
5637 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) argument
5671 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) argument
5691 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, zio_alloc_list_t *zal, zio_t *zio, int allocator) argument
5748 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) argument
5779 ASSERT3U(spa_syncing_txg(spa), ==, txg); local
5798 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) argument
6030 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) argument
[all...]
/illumos-gate/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/
H A Dimport_rewind_device_replaced.ksh27 # 3. Sync a few times and note last synced txg.
31 # 6. Test 1: Rewind pool to noted txg and then verify data checksums.
33 # 7. Re-import pool at latest txg and let the replacement finish.
35 # 9. Test 2: Rewind pool to noted txg and then verify data checksums.
93 typeset txg
94 txg=$(get_last_txg_synced $TESTPOOL1)
116 log_must zpool import -d $DEVICE_DIR -o readonly=on -T $txg $TESTPOOL1
123 # Import pool at latest txg to finish the resilvering
136 log_must zpool import -d $DEVICE_DIR -T $txg $TESTPOOL1
H A Dzpool_import.kshlib340 # Use mdb to find the last txg that was synced in an active pool.
347 txg=$(tail "/proc/spl/kstat/zfs/$pool/txgs" |
349 [[ "$txg" ]] || txg=0
350 echo $txg
372 typeset -i txg
373 txg=$(mdb -k -e "$mdbcmd")
376 echo $txg
H A Dimport_rewind_config_changed.ksh27 # 3. Note last synced txg.
34 # 10. Verify that we can rewind the pool to the noted txg.
75 typeset txg
76 txg=$(get_last_txg_synced $TESTPOOL1)
119 # right after we recond the txg we plan to rewind to.
130 log_must zpool import -d $DEVICE_DIR -T $txg $TESTPOOL1
/illumos-gate/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_sync/
H A Dzpool_sync_001_pos.ksh27 # 2. Use zdb to obtain current txg
30 # 5. Verify the new txg is now bigger than the saved one
36 typeset -i txg=$(zdb -u $1 | sed -n 's/^[ ][ ]*txg = \(.*\)$/\1/p')
37 echo $txg
55 log_fail "'zpool ${args[i]}' failed: txg $orig_txg >= $new_txg"
68 log_fail "'sync_pool $TESTPOOL false' failed: txg $orig_txg >= $new_txg"
76 log_fail "'sync_pool $TESTPOOL true' failed: txg $orig_txg >= $new_txg"
85 log_fail "'sync_pool $TESTPOOL' failed: txg $orig_txg >= $new_txg"

Completed in 81 milliseconds

123