Lines Matching refs:tx

43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); in dmu_tx_create_dd() local
51 tx->tx_dir = dd; in dmu_tx_create_dd()
53 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd()
54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd()
56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd()
58 tx->tx_start = gethrtime(); in dmu_tx_create_dd()
59 return (tx); in dmu_tx_create_dd()
65 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create() local
66 tx->tx_objset = os; in dmu_tx_create()
67 return (tx); in dmu_tx_create()
73 dmu_tx_t *tx = dmu_tx_create_dd(NULL); in dmu_tx_create_assigned() local
76 tx->tx_pool = dp; in dmu_tx_create_assigned()
77 tx->tx_txg = txg; in dmu_tx_create_assigned()
78 tx->tx_anyobj = TRUE; in dmu_tx_create_assigned()
80 return (tx); in dmu_tx_create_assigned()
84 dmu_tx_is_syncing(dmu_tx_t *tx) in dmu_tx_is_syncing() argument
86 return (tx->tx_anyobj); in dmu_tx_is_syncing()
90 dmu_tx_private_ok(dmu_tx_t *tx) in dmu_tx_private_ok() argument
92 return (tx->tx_anyobj); in dmu_tx_private_ok()
96 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, in dmu_tx_hold_dnode_impl() argument
102 (void) zfs_refcount_add(&dn->dn_holds, tx); in dmu_tx_hold_dnode_impl()
103 if (tx->tx_txg != 0) { in dmu_tx_hold_dnode_impl()
111 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_dnode_impl()
112 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_dnode_impl()
118 txh->txh_tx = tx; in dmu_tx_hold_dnode_impl()
125 list_insert_tail(&tx->tx_holds, txh); in dmu_tx_hold_dnode_impl()
131 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, in dmu_tx_hold_object_impl() argument
141 tx->tx_err = err; in dmu_tx_hold_object_impl()
145 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); in dmu_tx_hold_object_impl()
152 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_add_new_object() argument
158 if (!dmu_tx_is_syncing(tx)) in dmu_tx_add_new_object()
159 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); in dmu_tx_add_new_object()
288 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) in dmu_tx_hold_write() argument
292 ASSERT0(tx->tx_txg); in dmu_tx_hold_write()
296 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_write()
305 dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_remap_l1indirect() argument
309 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_remap_l1indirect()
310 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_remap_l1indirect()
322 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_write_by_dnode() argument
326 ASSERT0(tx->tx_txg); in dmu_tx_hold_write_by_dnode()
330 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); in dmu_tx_hold_write_by_dnode()
346 dmu_tx_mark_netfree(dmu_tx_t *tx) in dmu_tx_mark_netfree() argument
348 tx->tx_netfree = B_TRUE; in dmu_tx_mark_netfree()
354 dmu_tx_t *tx; in dmu_tx_hold_free_impl() local
358 tx = txh->txh_tx; in dmu_tx_hold_free_impl()
359 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_free_impl()
409 zio_t *zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_hold_free_impl()
418 tx->tx_err = err; in dmu_tx_hold_free_impl()
428 tx->tx_err = err; in dmu_tx_hold_free_impl()
435 tx->tx_err = err; in dmu_tx_hold_free_impl()
442 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) in dmu_tx_hold_free() argument
446 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_free()
453 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) in dmu_tx_hold_free_by_dnode() argument
457 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); in dmu_tx_hold_free_by_dnode()
465 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_hold_zap_impl() local
469 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_zap_impl()
499 tx->tx_err = err; in dmu_tx_hold_zap_impl()
509 tx->tx_err = err; in dmu_tx_hold_zap_impl()
515 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) in dmu_tx_hold_zap() argument
519 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap()
521 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_zap()
528 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) in dmu_tx_hold_zap_by_dnode() argument
532 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap_by_dnode()
535 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); in dmu_tx_hold_zap_by_dnode()
541 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_bonus() argument
545 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_bonus()
547 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_bonus()
554 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_hold_bonus_by_dnode() argument
558 ASSERT0(tx->tx_txg); in dmu_tx_hold_bonus_by_dnode()
560 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); in dmu_tx_hold_bonus_by_dnode()
566 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) in dmu_tx_hold_space() argument
569 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_space()
571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_space()
579 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) in dmu_tx_dirty_buf() argument
586 ASSERT(tx->tx_txg != 0); in dmu_tx_dirty_buf()
587 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
590 if (tx->tx_anyobj) { in dmu_tx_dirty_buf()
601 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_dirty_buf()
602 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_dirty_buf()
603 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); in dmu_tx_dirty_buf()
782 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) in dmu_tx_delay() argument
784 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_delay()
803 if (now > tx->tx_start + min_tx_time) in dmu_tx_delay()
808 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, in dmu_tx_delay()
812 wakeup = MAX(tx->tx_start + min_tx_time, in dmu_tx_delay()
873 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) in dmu_tx_try_assign() argument
875 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_try_assign()
877 ASSERT0(tx->tx_txg); in dmu_tx_try_assign()
879 if (tx->tx_err) in dmu_tx_try_assign()
880 return (tx->tx_err); in dmu_tx_try_assign()
899 if (!tx->tx_dirty_delayed && in dmu_tx_try_assign()
900 dsl_pool_need_dirty_delay(tx->tx_pool)) { in dmu_tx_try_assign()
901 tx->tx_wait_dirty = B_TRUE; in dmu_tx_try_assign()
905 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); in dmu_tx_try_assign()
906 tx->tx_needassign_txh = NULL; in dmu_tx_try_assign()
916 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_try_assign()
917 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_try_assign()
921 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
923 tx->tx_needassign_txh = txh; in dmu_tx_try_assign()
927 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
928 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
929 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
937 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); in dmu_tx_try_assign()
941 if (tx->tx_dir != NULL && asize != 0) { in dmu_tx_try_assign()
942 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, in dmu_tx_try_assign()
943 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); in dmu_tx_try_assign()
952 dmu_tx_unassign(dmu_tx_t *tx) in dmu_tx_unassign() argument
954 if (tx->tx_txg == 0) in dmu_tx_unassign()
957 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_unassign()
963 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); in dmu_tx_unassign()
964 txh != tx->tx_needassign_txh; in dmu_tx_unassign()
965 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_unassign()
971 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
973 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
980 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_unassign()
982 tx->tx_lasttried_txg = tx->tx_txg; in dmu_tx_unassign()
983 tx->tx_txg = 0; in dmu_tx_unassign()
1007 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) in dmu_tx_assign() argument
1011 ASSERT(tx->tx_txg == 0); in dmu_tx_assign()
1013 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); in dmu_tx_assign()
1016 IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool)); in dmu_tx_assign()
1019 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_assign()
1021 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { in dmu_tx_assign()
1022 dmu_tx_unassign(tx); in dmu_tx_assign()
1027 dmu_tx_wait(tx); in dmu_tx_assign()
1030 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_assign()
1036 dmu_tx_wait(dmu_tx_t *tx) in dmu_tx_wait() argument
1038 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_wait()
1039 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_wait()
1041 ASSERT(tx->tx_txg == 0); in dmu_tx_wait()
1042 ASSERT(!dsl_pool_config_held(tx->tx_pool)); in dmu_tx_wait()
1044 if (tx->tx_wait_dirty) { in dmu_tx_wait()
1056 dmu_tx_delay(tx, dirty); in dmu_tx_wait()
1058 tx->tx_wait_dirty = B_FALSE; in dmu_tx_wait()
1066 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_wait()
1067 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { in dmu_tx_wait()
1076 } else if (tx->tx_needassign_txh) { in dmu_tx_wait()
1081 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait()
1084 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1087 tx->tx_needassign_txh = NULL; in dmu_tx_wait()
1099 dmu_tx_destroy(dmu_tx_t *tx) in dmu_tx_destroy() argument
1103 while ((txh = list_head(&tx->tx_holds)) != NULL) { in dmu_tx_destroy()
1106 list_remove(&tx->tx_holds, txh); in dmu_tx_destroy()
1113 dnode_rele(dn, tx); in dmu_tx_destroy()
1116 list_destroy(&tx->tx_callbacks); in dmu_tx_destroy()
1117 list_destroy(&tx->tx_holds); in dmu_tx_destroy()
1118 kmem_free(tx, sizeof (dmu_tx_t)); in dmu_tx_destroy()
1122 dmu_tx_commit(dmu_tx_t *tx) in dmu_tx_commit() argument
1124 ASSERT(tx->tx_txg != 0); in dmu_tx_commit()
1130 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_commit()
1131 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_commit()
1138 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1140 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1147 if (tx->tx_tempreserve_cookie) in dmu_tx_commit()
1148 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_commit()
1150 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_commit()
1151 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); in dmu_tx_commit()
1153 if (tx->tx_anyobj == FALSE) in dmu_tx_commit()
1154 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_commit()
1156 dmu_tx_destroy(tx); in dmu_tx_commit()
1160 dmu_tx_abort(dmu_tx_t *tx) in dmu_tx_abort() argument
1162 ASSERT(tx->tx_txg == 0); in dmu_tx_abort()
1167 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_abort()
1168 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); in dmu_tx_abort()
1170 dmu_tx_destroy(tx); in dmu_tx_abort()
1174 dmu_tx_get_txg(dmu_tx_t *tx) in dmu_tx_get_txg() argument
1176 ASSERT(tx->tx_txg != 0); in dmu_tx_get_txg()
1177 return (tx->tx_txg); in dmu_tx_get_txg()
1181 dmu_tx_pool(dmu_tx_t *tx) in dmu_tx_pool() argument
1183 ASSERT(tx->tx_pool != NULL); in dmu_tx_pool()
1184 return (tx->tx_pool); in dmu_tx_pool()
1188 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) in dmu_tx_callback_register() argument
1197 list_insert_tail(&tx->tx_callbacks, dcb); in dmu_tx_callback_register()
1230 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) in dmu_tx_sa_registration_hold() argument
1238 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, in dmu_tx_sa_registration_hold()
1241 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, in dmu_tx_sa_registration_hold()
1248 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_spill() argument
1252 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, in dmu_tx_hold_spill()
1260 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) in dmu_tx_hold_sa_create() argument
1262 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa_create()
1264 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); in dmu_tx_hold_sa_create()
1266 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa_create()
1269 if (tx->tx_objset->os_sa->sa_layout_attr_obj) { in dmu_tx_hold_sa_create()
1270 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1272 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa_create()
1273 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa_create()
1274 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1275 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1278 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa_create()
1283 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, in dmu_tx_hold_sa_create()
1297 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) in dmu_tx_hold_sa() argument
1300 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa()
1306 dmu_tx_hold_bonus(tx, object); in dmu_tx_hold_sa()
1308 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa()
1311 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || in dmu_tx_hold_sa()
1312 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { in dmu_tx_hold_sa()
1313 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa()
1314 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa()
1315 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1316 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1319 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa()
1321 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) in dmu_tx_hold_sa()
1322 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa()
1325 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1326 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()
1334 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1335 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()