Lines Matching refs:ip

100 ud_bmap_has_holes(struct ud_inode *ip)  in ud_bmap_has_holes()  argument
107 ASSERT(RW_LOCK_HELD(&ip->i_contents)); in ud_bmap_has_holes()
110 if (ip->i_desc_type != ICB_FLAG_ONE_AD) { in ud_bmap_has_holes()
111 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) { in ud_bmap_has_holes()
112 for (i = 0; i < ip->i_ext_used; i++) { in ud_bmap_has_holes()
113 iext = &ip->i_ext[i]; in ud_bmap_has_holes()
126 ud_bmap_read(struct ud_inode *ip, u_offset_t off, daddr_t *bnp, int32_t *lenp) in ud_bmap_read() argument
135 ASSERT(RW_LOCK_HELD(&ip->i_contents)); in ud_bmap_read()
137 lbmask = ip->i_udf->udf_lbmask; in ud_bmap_read()
138 l2b = ip->i_udf->udf_l2b_shift; in ud_bmap_read()
139 l2d = ip->i_udf->udf_l2d_shift; in ud_bmap_read()
141 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) { in ud_bmap_read()
142 for (i = 0; i < ip->i_ext_used; i++) { in ud_bmap_read()
143 iext = &ip->i_ext[i]; in ud_bmap_read()
159 bno = ud_xlate_to_daddr(ip->i_udf, in ud_bmap_read()
169 if (i == ip->i_ext_used) { in ud_bmap_read()
194 ud_bmap_write(struct ud_inode *ip, in ud_bmap_write() argument
212 ASSERT(RW_WRITE_HELD(&ip->i_contents)); in ud_bmap_write()
214 udf_vfsp = ip->i_udf; in ud_bmap_write()
223 issync = ((ip->i_flag & ISYNC) != 0); in ud_bmap_write()
225 isdir = (ip->i_type == VDIR); in ud_bmap_write()
231 if (ip->i_desc_type == ICB_FLAG_ONE_AD) { in ud_bmap_write()
232 if (end_req < ip->i_max_emb) { in ud_bmap_write()
236 if (ip->i_size != 0) { in ud_bmap_write()
237 error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp); in ud_bmap_write()
247 ip->i_desc_type = ICB_FLAG_SHORT_AD; in ud_bmap_write()
251 ASSERT(ip->i_ext == NULL); in ud_bmap_write()
252 ASSERT(ip->i_astrat == STRAT_TYPE4); in ud_bmap_write()
254 ip->i_ext_used = 0; in ud_bmap_write()
255 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad); in ud_bmap_write()
256 ip->i_cur_max_ext --; in ud_bmap_write()
262 ip->i_ext_count = in ud_bmap_write()
264 iext = ip->i_ext = (struct icb_ext *)kmem_zalloc( in ud_bmap_write()
265 ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP); in ud_bmap_write()
276 if ((PCEIL(ip->i_size) < PBASE(off)) && in ud_bmap_write()
277 ((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) { in ud_bmap_write()
279 if (ip->i_size != 0) { in ud_bmap_write()
287 if (error = ud_create_ext(ip, ip->i_ext_used, in ud_bmap_write()
299 count = PBASE(off) - PCEIL(ip->i_size); in ud_bmap_write()
300 (void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT, in ud_bmap_write()
318 if (error = ud_create_ext(ip, ip->i_ext_used, in ud_bmap_write()
329 ip->i_desc_type = ICB_FLAG_ONE_AD; in ud_bmap_write()
331 for (i = 0; i < ip->i_ext_used; i++) { in ud_bmap_write()
332 iext = &ip->i_ext[i]; in ud_bmap_write()
334 ud_free_space(ip->i_udf->udf_vfs, in ud_bmap_write()
341 kmem_free(ip->i_ext, in ud_bmap_write()
342 ip->i_ext_count * in ud_bmap_write()
344 ip->i_ext = NULL; in ud_bmap_write()
345 ip->i_ext_count = ip->i_ext_used = 0; in ud_bmap_write()
359 if (ip->i_ext == NULL) { in ud_bmap_write()
366 if (ud_read_icb_till_off(ip, ip->i_size) != 0) { in ud_bmap_write()
371 isize = CEIL(ip->i_size); in ud_bmap_write()
380 if (ip->i_ext == NULL) { in ud_bmap_write()
382 } else if (ip->i_ext_used == 0) { in ud_bmap_write()
386 error = ud_last_alloc_ext(ip, off, size, alloc_only); in ud_bmap_write()
396 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_bmap_write()
412 ASSERT(ip->i_ext); in ud_bmap_write()
417 for (i = 0; i < ip->i_ext_used; i++) { in ud_bmap_write()
418 iext = &ip->i_ext[i]; in ud_bmap_write()
431 iext = &ip->i_ext[i]; in ud_bmap_write()
462 ip, i, BASE(iext->ib_offset) - in ud_bmap_write()
473 if ((error = ud_break_create_new_icb(ip, i, in ud_bmap_write()
477 iext = &ip->i_ext[i]; in ud_bmap_write()
491 if ((error = ud_break_create_new_icb(ip, i, in ud_bmap_write()
511 pext = &ip->i_ext[i - 1]; in ud_bmap_write()
518 iext = &ip->i_ext[i]; in ud_bmap_write()
521 if ((error = ud_alloc_space(ip->i_vfs, in ud_bmap_write()
522 ip->i_icb_prn, prox, blkcount, in ud_bmap_write()
526 ip->i_lbr += sz; in ud_bmap_write()
532 error = ud_zero_it(ip, blkno, sz); in ud_bmap_write()
547 pext = &ip->i_ext[i - 1]; in ud_bmap_write()
558 ud_remove_ext_at_index(ip, i); in ud_bmap_write()
572 ip, i, sz << l2b)) != 0) { in ud_bmap_write()
576 iext = &ip->i_ext[i]; in ud_bmap_write()
578 iext->ib_prn = ip->i_icb_prn; in ud_bmap_write()
600 ud_common_ad(struct ud_inode *ip, struct buf *bp) in ud_common_ad() argument
615 if (ip->i_desc_type == ICB_FLAG_LONG_AD) { in ud_common_ad()
619 } else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) { in ud_common_ad()
629 count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) * in ud_common_ad()
632 bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext)); in ud_common_ad()
633 kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext)); in ud_common_ad()
634 ip->i_ext = addr; in ud_common_ad()
635 ip->i_ext_count = count; in ud_common_ad()
640 lbmask = ip->i_udf->udf_lbmask; in ud_common_ad()
641 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_common_ad()
653 if (ip->i_con_used == ip->i_con_count) { in ud_common_ad()
657 old = ip->i_con; in ud_common_ad()
658 old_count = ip->i_con_count * in ud_common_ad()
660 ip->i_con_count += EXT_PER_MALLOC; in ud_common_ad()
661 ip->i_con = kmem_zalloc(ip->i_con_count * in ud_common_ad()
665 bcopy(old, ip->i_con, old_count); in ud_common_ad()
669 con = &ip->i_con[ip->i_con_used]; in ud_common_ad()
674 con->ib_prn = ip->i_icb_prn; in ud_common_ad()
679 ip->i_con_used++; in ud_common_ad()
698 ip->i_ext_used++; in ud_common_ad()
705 ud_read_next_cont(struct ud_inode *ip) in ud_read_next_cont() argument
713 cont = &ip->i_con[ip->i_con_read]; in ud_read_next_cont()
716 bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block, in ud_read_next_cont()
718 bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift, in ud_read_next_cont()
730 ud_common_ad(ip, bp); in ud_read_next_cont()
738 ud_read_icb_till_off(struct ud_inode *ip, u_offset_t offset) in ud_read_icb_till_off() argument
745 if (ip->i_desc_type == ICB_FLAG_ONE_AD) in ud_read_icb_till_off()
747 else if ((ip->i_astrat != STRAT_TYPE4) && in ud_read_icb_till_off()
748 (ip->i_astrat != STRAT_TYPE4096)) in ud_read_icb_till_off()
750 else if (ip->i_ext_used == 0) in ud_read_icb_till_off()
751 return ((ip->i_size == 0) ? 0 : EINVAL); in ud_read_icb_till_off()
758 mutex_enter(&ip->i_con_lock); in ud_read_icb_till_off()
759 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_read_icb_till_off()
761 if (ip->i_con_used == ip->i_con_read) { in ud_read_icb_till_off()
765 if (error = ud_read_next_cont(ip)) in ud_read_icb_till_off()
767 ip->i_con_read++; in ud_read_icb_till_off()
768 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_read_icb_till_off()
770 mutex_exit(&ip->i_con_lock); in ud_read_icb_till_off()
781 ud_last_alloc_ext(struct ud_inode *ip, uint64_t off, in ud_last_alloc_ext() argument
792 udf_vfsp = ip->i_udf; in ud_last_alloc_ext()
806 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_last_alloc_ext()
830 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_last_alloc_ext()
841 error = ud_create_ext(ip, ip->i_ext_used, in ud_last_alloc_ext()
853 error = ud_create_ext(ip, ip->i_ext_used - 1, in ud_last_alloc_ext()
863 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_last_alloc_ext()
874 (void) ud_create_ext(ip, ip->i_ext_used - 1, in ud_last_alloc_ext()
884 (void) ud_create_ext(ip, ip->i_ext_used, in ud_last_alloc_ext()
897 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_last_alloc_ext()
903 iext = &ip->i_ext[ip->i_ext_used - 1]; in ud_last_alloc_ext()
914 error = ud_create_ext(ip, ip->i_ext_used, in ud_last_alloc_ext()
923 error = ud_create_ext(ip, ip->i_ext_used - 1, in ud_last_alloc_ext()
937 ud_break_create_new_icb(struct ud_inode *ip, in ud_break_create_new_icb() argument
945 iext = &ip->i_ext[index]; in ud_break_create_new_icb()
949 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) { in ud_break_create_new_icb()
953 for (i = ip->i_ext_used; i > index; i--) { in ud_break_create_new_icb()
954 ip->i_ext[i] = ip->i_ext[i - 1]; in ud_break_create_new_icb()
957 next = &ip->i_ext[index + 1]; in ud_break_create_new_icb()
958 iext = &ip->i_ext[index]; in ud_break_create_new_icb()
965 iext->ib_count >> ip->i_udf->udf_l2b_shift; in ud_break_create_new_icb()
967 ip->i_ext_used++; in ud_break_create_new_icb()
972 ud_remove_ext_at_index(struct ud_inode *ip, int32_t index) in ud_remove_ext_at_index() argument
976 ASSERT(index <= ip->i_ext_used); in ud_remove_ext_at_index()
978 for (i = index; i < ip->i_ext_used; i++) { in ud_remove_ext_at_index()
979 if ((i + 1) < ip->i_ext_count) { in ud_remove_ext_at_index()
980 ip->i_ext[i] = ip->i_ext[i + 1]; in ud_remove_ext_at_index()
982 bzero(&ip->i_ext[i], sizeof (struct icb_ext)); in ud_remove_ext_at_index()
985 ip->i_ext_used --; in ud_remove_ext_at_index()
989 ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag) in ud_bump_ext_count() argument
995 ASSERT(ip); in ud_bump_ext_count()
1000 if (ip->i_ext_used >= ip->i_ext_count) { in ud_bump_ext_count()
1002 old_count = sizeof (struct icb_ext) * ip->i_ext_count; in ud_bump_ext_count()
1003 ip->i_ext_count += EXT_PER_MALLOC; in ud_bump_ext_count()
1005 ip->i_ext_count, sleep_flag); in ud_bump_ext_count()
1006 bcopy(ip->i_ext, iext, old_count); in ud_bump_ext_count()
1007 kmem_free(ip->i_ext, old_count); in ud_bump_ext_count()
1008 ip->i_ext = iext; in ud_bump_ext_count()
1011 if (ip->i_ext_used >= ip->i_cur_max_ext) { in ud_bump_ext_count()
1017 lbmask = ip->i_udf->udf_lbmask; in ud_bump_ext_count()
1018 l2b = ip->i_udf->udf_l2b_shift; in ud_bump_ext_count()
1020 if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) { in ud_bump_ext_count()
1028 if (ip->i_con_used != 0) { in ud_bump_ext_count()
1029 icon = &ip->i_con[ip->i_con_used - 1]; in ud_bump_ext_count()
1038 if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn, in ud_bump_ext_count()
1048 if (ip->i_con_used == ip->i_con_count) { in ud_bump_ext_count()
1052 old = ip->i_con; in ud_bump_ext_count()
1053 old_count = ip->i_con_count * in ud_bump_ext_count()
1055 ip->i_con_count += EXT_PER_MALLOC; in ud_bump_ext_count()
1056 ip->i_con = kmem_zalloc(ip->i_con_count * in ud_bump_ext_count()
1059 bcopy(old, ip->i_con, old_count); in ud_bump_ext_count()
1063 icon = &ip->i_con[ip->i_con_used++]; in ud_bump_ext_count()
1065 icon->ib_prn = ip->i_icb_prn; in ud_bump_ext_count()
1076 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) { in ud_bump_ext_count()
1078 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) { in ud_bump_ext_count()
1084 ip->i_cur_max_ext += sz / elen; in ud_bump_ext_count()
1090 ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags, in ud_create_ext() argument
1104 udf_vfsp = ip->i_udf; in ud_create_ext()
1109 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) { in ud_create_ext()
1113 iext = &ip->i_ext[index]; in ud_create_ext()
1116 (ip->i_ext_count == 0)) { in ud_create_ext()
1119 iext->ib_prn = ip->i_icb_prn; in ud_create_ext()
1125 if ((error = ud_alloc_space(ip->i_vfs, in ud_create_ext()
1126 ip->i_icb_prn, 0, blkcount, in ud_create_ext()
1133 ip->i_lbr += sz; in ud_create_ext()
1144 if (ip->i_ext_used <= index) in ud_create_ext()
1145 ip->i_ext_used ++; in ud_create_ext()
1158 if ((error = ud_alloc_space(ip->i_vfs, in ud_create_ext()
1159 ip->i_icb_prn, prox, blkcount, in ud_create_ext()
1173 ip->i_lbr += sz; in ud_create_ext()
1177 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) in ud_create_ext()
1181 pext = &ip->i_ext[index]; in ud_create_ext()
1182 iext = &ip->i_ext[index + 1]; in ud_create_ext()
1184 iext->ib_prn = ip->i_icb_prn; in ud_create_ext()
1194 if (ip->i_ext_used <= index) in ud_create_ext()
1195 ip->i_ext_used ++; in ud_create_ext()
1199 error = ud_zero_it(ip, blkno, sz); in ud_create_ext()
1220 if (ip->i_ext_used <= index) in ud_create_ext()
1221 ip->i_ext_used ++; in ud_create_ext()
1249 ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count) in ud_zero_it() argument
1263 udf_vfsp = ip->i_udf; in ud_zero_it()
1265 ip->i_icb_prn, start_block, block_count, &dummy); in ud_zero_it()
1273 bp->b_edev = ip->i_dev; in ud_zero_it()
1274 bp->b_dev = cmpdev(ip->i_dev); in ud_zero_it()
1278 bp->b_file = ip->i_vnode; in ud_zero_it()