Lines Matching refs:dn

43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
96 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, in dmu_tx_hold_dnode_impl() argument
101 if (dn != NULL) { in dmu_tx_hold_dnode_impl()
102 (void) zfs_refcount_add(&dn->dn_holds, tx); in dmu_tx_hold_dnode_impl()
104 mutex_enter(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
110 ASSERT(dn->dn_assigned_txg == 0); in dmu_tx_hold_dnode_impl()
111 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_dnode_impl()
112 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_dnode_impl()
113 mutex_exit(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
119 txh->txh_dnode = dn; in dmu_tx_hold_dnode_impl()
134 dnode_t *dn = NULL; in dmu_tx_hold_object_impl() local
139 err = dnode_hold(os, object, FTAG, &dn); in dmu_tx_hold_object_impl()
145 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); in dmu_tx_hold_object_impl()
146 if (dn != NULL) in dmu_tx_hold_object_impl()
147 dnode_rele(dn, FTAG); in dmu_tx_hold_object_impl()
152 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_add_new_object() argument
159 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); in dmu_tx_add_new_object()
191 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) in dmu_tx_check_ioerr() argument
196 rw_enter(&dn->dn_struct_rwlock, RW_READER); in dmu_tx_check_ioerr()
197 db = dbuf_hold_level(dn, level, blkid, FTAG); in dmu_tx_check_ioerr()
198 rw_exit(&dn->dn_struct_rwlock); in dmu_tx_check_ioerr()
210 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_write() local
221 if (dn == NULL) in dmu_tx_count_write()
230 if (dn->dn_maxblkid == 0) { in dmu_tx_count_write()
231 if (off < dn->dn_datablksz && in dmu_tx_count_write()
232 (off > 0 || len < dn->dn_datablksz)) { in dmu_tx_count_write()
233 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); in dmu_tx_count_write()
239 zio_t *zio = zio_root(dn->dn_objset->os_spa, in dmu_tx_count_write()
243 uint64_t start = off >> dn->dn_datablkshift; in dmu_tx_count_write()
244 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { in dmu_tx_count_write()
245 err = dmu_tx_check_ioerr(zio, dn, 0, start); in dmu_tx_count_write()
252 uint64_t end = (off + len - 1) >> dn->dn_datablkshift; in dmu_tx_count_write()
253 if (end != start && end <= dn->dn_maxblkid && in dmu_tx_count_write()
254 P2PHASE(off + len, dn->dn_datablksz)) { in dmu_tx_count_write()
255 err = dmu_tx_check_ioerr(zio, dn, 0, end); in dmu_tx_count_write()
262 if (dn->dn_nlevels > 1) { in dmu_tx_count_write()
263 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_count_write()
266 err = dmu_tx_check_ioerr(zio, dn, 1, i); in dmu_tx_count_write()
315 dnode_t *dn = txh->txh_dnode; in dmu_tx_hold_remap_l1indirect() local
317 1ULL << dn->dn_indblkshift, FTAG); in dmu_tx_hold_remap_l1indirect()
322 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_write_by_dnode() argument
330 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); in dmu_tx_hold_write_by_dnode()
355 dnode_t *dn; in dmu_tx_hold_free_impl() local
361 dn = txh->txh_dnode; in dmu_tx_hold_free_impl()
364 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) in dmu_tx_hold_free_impl()
367 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; in dmu_tx_hold_free_impl()
378 if (dn->dn_datablkshift == 0) { in dmu_tx_hold_free_impl()
379 if (off != 0 || len < dn->dn_datablksz) in dmu_tx_hold_free_impl()
380 dmu_tx_count_write(txh, 0, dn->dn_datablksz); in dmu_tx_hold_free_impl()
383 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) in dmu_tx_hold_free_impl()
386 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) in dmu_tx_hold_free_impl()
393 if (dn->dn_nlevels > 1) { in dmu_tx_hold_free_impl()
394 int shift = dn->dn_datablkshift + dn->dn_indblkshift - in dmu_tx_hold_free_impl()
399 ASSERT(dn->dn_indblkshift != 0); in dmu_tx_hold_free_impl()
406 if (dn->dn_datablkshift == 0) in dmu_tx_hold_free_impl()
413 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); in dmu_tx_hold_free_impl()
424 1 << dn->dn_indblkshift, FTAG); in dmu_tx_hold_free_impl()
426 err = dmu_tx_check_ioerr(zio, dn, 1, i); in dmu_tx_hold_free_impl()
453 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) in dmu_tx_hold_free_by_dnode() argument
457 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); in dmu_tx_hold_free_by_dnode()
466 dnode_t *dn; in dmu_tx_hold_zap_impl() local
471 dn = txh->txh_dnode; in dmu_tx_hold_zap_impl()
487 if (dn == NULL) in dmu_tx_hold_zap_impl()
490 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); in dmu_tx_hold_zap_impl()
492 if (dn->dn_maxblkid == 0 || name == NULL) { in dmu_tx_hold_zap_impl()
497 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); in dmu_tx_hold_zap_impl()
507 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); in dmu_tx_hold_zap_impl()
528 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) in dmu_tx_hold_zap_by_dnode() argument
533 ASSERT(dn != NULL); in dmu_tx_hold_zap_by_dnode()
535 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); in dmu_tx_hold_zap_by_dnode()
554 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_hold_bonus_by_dnode() argument
560 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); in dmu_tx_hold_bonus_by_dnode()
585 dnode_t *dn = DB_DNODE(db); in dmu_tx_dirty_buf() local
587 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
588 ASSERT3U(dn->dn_object, ==, db->db.db_object); in dmu_tx_dirty_buf()
603 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); in dmu_tx_dirty_buf()
604 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) in dmu_tx_dirty_buf()
606 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { in dmu_tx_dirty_buf()
607 int datablkshift = dn->dn_datablkshift ? in dmu_tx_dirty_buf()
608 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; in dmu_tx_dirty_buf()
609 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_dirty_buf()
918 dnode_t *dn = txh->txh_dnode; in dmu_tx_try_assign() local
919 if (dn != NULL) { in dmu_tx_try_assign()
920 mutex_enter(&dn->dn_mtx); in dmu_tx_try_assign()
921 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
922 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
926 if (dn->dn_assigned_txg == 0) in dmu_tx_try_assign()
927 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
928 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
929 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
930 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
966 dnode_t *dn = txh->txh_dnode; in dmu_tx_unassign() local
968 if (dn == NULL) in dmu_tx_unassign()
970 mutex_enter(&dn->dn_mtx); in dmu_tx_unassign()
971 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
973 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
974 dn->dn_assigned_txg = 0; in dmu_tx_unassign()
975 cv_broadcast(&dn->dn_notxholds); in dmu_tx_unassign()
977 mutex_exit(&dn->dn_mtx); in dmu_tx_unassign()
1081 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait() local
1083 mutex_enter(&dn->dn_mtx); in dmu_tx_wait()
1084 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1085 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); in dmu_tx_wait()
1086 mutex_exit(&dn->dn_mtx); in dmu_tx_wait()
1104 dnode_t *dn = txh->txh_dnode; in dmu_tx_destroy() local
1112 if (dn != NULL) in dmu_tx_destroy()
1113 dnode_rele(dn, tx); in dmu_tx_destroy()
1132 dnode_t *dn = txh->txh_dnode; in dmu_tx_commit() local
1134 if (dn == NULL) in dmu_tx_commit()
1137 mutex_enter(&dn->dn_mtx); in dmu_tx_commit()
1138 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1140 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1141 dn->dn_assigned_txg = 0; in dmu_tx_commit()
1142 cv_broadcast(&dn->dn_notxholds); in dmu_tx_commit()
1144 mutex_exit(&dn->dn_mtx); in dmu_tx_commit()
1329 dnode_t *dn; in dmu_tx_hold_sa() local
1332 dn = DB_DNODE(db); in dmu_tx_hold_sa()
1333 if (dn->dn_have_spill) { in dmu_tx_hold_sa()